diff --git a/.buildkite/solana-private.sh b/.buildkite/solana-private.sh old mode 100644 new mode 100755 index 20a510a9c9cbac..bb1ac9d5dbc774 --- a/.buildkite/solana-private.sh +++ b/.buildkite/solana-private.sh @@ -8,10 +8,8 @@ # set -e -# NAME=$(buildkite-agent meta-data get name) cd "$(dirname "$0")"/.. source ci/_ -sudo chmod 0777 ci/buildkite-solana-private.sh _ ci/buildkite-solana-private.sh pipeline.yml echo +++ pipeline diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 17cc728e68bd17..c348d69acbe4ea 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -69,7 +69,7 @@ jobs: needs: - check if: > - github.repository == 'solana-labs/solana' && + github.repository == 'anza-xyz/agave' && needs.check.outputs.continue == 1 # the name is used by .mergify.yml as well name: build & deploy docs @@ -79,9 +79,9 @@ jobs: uses: actions/checkout@v4 - name: Setup Node - uses: actions/setup-node@v3 + uses: actions/setup-node@v4 with: - node-version: 16 + node-version: 22 - name: Build working-directory: docs diff --git a/.github/workflows/downstream-project-anchor.yml b/.github/workflows/downstream-project-anchor.yml index a7768f6a1c433b..33ecc632f0b7d5 100644 --- a/.github/workflows/downstream-project-anchor.yml +++ b/.github/workflows/downstream-project-anchor.yml @@ -13,11 +13,10 @@ on: - "**.rs" - "Cargo.toml" - "Cargo.lock" - - "cargo-build-bpf" - - "cargo-test-bpf" - "cargo-build-sbf" - "cargo-test-sbf" - "scripts/build-downstream-anchor-projects.sh" + - "scripts/patch-spl-crates-for-anchor.sh" - ".github/scripts/purge-ubuntu-runner.sh" - ".github/scripts/downstream-project-spl-install-deps.sh" - ".github/workflows/downstream-project-anchor.yml" diff --git a/.github/workflows/downstream-project-spl.yml b/.github/workflows/downstream-project-spl.yml index 8d3baf25949e99..d2065f178fd5a5 100644 --- a/.github/workflows/downstream-project-spl.yml +++ b/.github/workflows/downstream-project-spl.yml @@ -13,8 +13,6 @@ on: - "**.rs" - "Cargo.toml" - "Cargo.lock" - - "cargo-build-bpf" - - "cargo-test-bpf" - "cargo-build-sbf" - "cargo-test-sbf" - "ci/downstream-projects/run-spl.sh" diff --git a/.gitignore b/.gitignore index 995129e50d16cc..3127645a259560 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ target/ *.log log-*.txt log-*/ +!log-collector/ !log-analyzer/* # intellij files diff --git a/.mergify.yml b/.mergify.yml index 23469b3f055882..d50fd5e1e277ba 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -64,7 +64,7 @@ pull_request_rules: - check-success=clippy-nightly (macos-latest) - check-success=clippy-nightly (macos-latest-large) - or: - - -files~=(\.rs|Cargo\.toml|Cargo\.lock|cargo-build-bpf|cargo-test-bpf|cargo-build-sbf|cargo-test-sbf|ci/downstream-projects/run-spl\.sh|\.github/workflows/downstream-project-spl\.yml)$ + - -files~=(\.rs|Cargo\.toml|Cargo\.lock|cargo-build-sbf|cargo-test-sbf|ci/downstream-projects/run-spl\.sh|\.github/workflows/downstream-project-spl\.yml)$ - and: - status-success=cargo-test-sbf (token/program) - status-success=cargo-test-sbf (instruction-padding/program, token/program-2022, token/program-2022-test) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f08a16889b81b..1da9bd05f68b70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,21 +14,36 @@ Release channels have their own copy of this changelog: ## [2.1.0] - Unreleased +* Breaking: + * SDK: + * `cargo-build-sbf` and `cargo-build-bpf` have been deprecated for two years and have now been definitely removed. + Use `cargo-build-sbf` and `cargo-test-sbf` instead. + * Stake: + * removed the unreleased `redelegate` instruction processor and CLI commands (#2213) * Changes * SDK: removed the `respan` macro. This was marked as "internal use only" and was no longer used internally. ## [2.0.0] * Breaking - * SDK: Support for Borsh v0.9 removed, please use v1 or v0.10 (#1440) - * SDK: `Copy` is no longer derived on `Rent` and `EpochSchedule`, please switch to using `clone()` (solana-labs#32767) - * SDK: deprecated SyncClient trait methods removed + * SDK: + * Support for Borsh v0.9 removed, please use v1 or v0.10 (#1440) + * `Copy` is no longer derived on `Rent` and `EpochSchedule`, please switch to using `clone()` (solana-labs#32767) + * `solana-sdk`: deprecated symbols removed + * `solana-program`: deprecated symbols removed * RPC: obsolete and deprecated v1 endpoints are removed. These endpoints are: confirmTransaction, getSignatureStatus, getSignatureConfirmation, getTotalSupply, getConfirmedSignaturesForAddress, getConfirmedBlock, getConfirmedBlocks, getConfirmedBlocksWithLimit, getConfirmedTransaction, getConfirmedSignaturesForAddress2, getRecentBlockhash, getFees, getFeeCalculatorForBlockhash, getFeeRateGovernor, getSnapshotSlot getStakeActivation - * `--enable-rpc-obsolete_v1_7` flag removed * Deprecated methods are removed from `RpcClient` and `RpcClient::nonblocking` + * `solana-client`: deprecated re-exports removed; please import `solana-connection-cache`, `solana-quic-client`, or `solana-udp-client` directly + * Deprecated arguments removed from `agave-validator`: + * `--enable-rpc-obsolete_v1_7` (#1886) + * `--accounts-db-caching-enabled` (#2063) + * `--accounts-db-index-hashing` (#2063) + * `--no-accounts-db-index-hashing` (#2063) + * `--incremental-snapshots` (#2148) + * `--halt-on-known-validators-accounts-hash-mismatch` (#2157) * Changes * `central-scheduler` as default option for `--block-production-method` (#34891) * `solana-rpc-client-api`: `RpcFilterError` depends on `base64` version 0.22, so users may need to upgrade to `base64` version 0.22 diff --git a/Cargo.lock b/Cargo.lock index 8e13c3ccd8a785..d9175d0d9c0682 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -186,6 +186,7 @@ dependencies = [ "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", + "solana-log-collector", "solana-logger", "solana-measure", "solana-program-runtime", @@ -216,6 +217,16 @@ dependencies = [ "solana-version", ] +[[package]] +name = "agave-transaction-view" +version = "2.1.0" +dependencies = [ + "agave-transaction-view", + "bincode", + "criterion", + "solana-sdk", +] + [[package]] name = "agave-validator" version = "2.1.0" @@ -565,9 +576,9 @@ checksum = "9ad284aeb45c13f2fb4f084de4a420ebf447423bdf9386c0540ce33cb3ef4b8c" [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" [[package]] name = "arrayvec" @@ -703,13 +714,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -862,7 +873,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -1019,7 +1030,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", "syn_derive", ] @@ -1136,9 +1147,9 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "bytemuck" -version = "1.16.1" +version = "1.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" +checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" dependencies = [ "bytemuck_derive", ] @@ -1151,7 +1162,7 @@ checksum = "1ee891b04274a59bd38b412188e24b849617b2e45a0fd8d057deb63e7403761b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -1162,9 +1173,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "bytesize" @@ -1768,7 +1779,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -1779,7 +1790,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -1841,7 +1852,7 @@ checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -1965,7 +1976,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -2071,7 +2082,7 @@ checksum = "03cdc46ec28bd728e67540c528013c6a10eb69a02eb31078a1bda695438cbfb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -2341,7 +2352,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -2528,17 +2539,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "goblin" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c955ab4e0ad8c843ea653a3d143048b87490d9be56bd7132a435c2407846ac8f" -dependencies = [ - "log", - "plain", - "scroll", -] - [[package]] name = "h2" version = "0.3.26" @@ -2551,7 +2551,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.6", + "indexmap 2.3.0", "slab", "tokio", "tokio-util 0.7.11", @@ -2744,9 +2744,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -2906,9 +2906,9 @@ dependencies = [ [[package]] name = "index_list" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb725b6505e51229de32027e0cfcd9db29da4d89156f9747b0a5195643fa3e1" +checksum = "4e6ba961c14e98151cd6416dd3685efe786a94c38bc1a535c06ceff0a1600813" [[package]] name = "indexmap" @@ -2922,9 +2922,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -3339,9 +3339,9 @@ dependencies = [ [[package]] name = "lz4" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6eab492fe7f8651add23237ea56dbf11b3c4ff762ab83d40a47f11433421f91" +checksum = "958b4caa893816eea05507c20cfe47574a43d9a697138a7872990bba8a0ece68" dependencies = [ "libc", "lz4-sys", @@ -3349,9 +3349,9 @@ dependencies = [ [[package]] name = "lz4-sys" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9764018d143cc854c9f17f0b907de70f14393b1f502da6375dce70f00514eb3" +checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" dependencies = [ "cc", "libc", @@ -3640,7 +3640,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -3697,23 +3697,23 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -3775,9 +3775,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.64" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", "cfg-if 1.0.0", @@ -3807,18 +3807,18 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-src" -version = "300.1.6+3.1.4" +version = "300.3.1+3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439fac53e092cd7442a3660c85dde4643ab3b5bd39040912388dcdabf6b88085" +checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.101" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -4075,12 +4075,6 @@ version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" -[[package]] -name = "plain" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" - [[package]] name = "plotters" version = "0.3.4" @@ -4344,7 +4338,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -4948,20 +4942,6 @@ name = "scroll" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" -dependencies = [ - "scroll_derive", -] - -[[package]] -name = "scroll_derive" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] [[package]] name = "sct" @@ -5034,9 +5014,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -5052,22 +5032,23 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] name = "serde_json" -version = "1.0.119" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -5112,7 +5093,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -5133,7 +5114,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "itoa", "ryu", "serde", @@ -5162,7 +5143,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -5469,6 +5450,7 @@ dependencies = [ "solana-measure", "solana-net-utils", "solana-rpc-client", + "solana-rpc-client-api", "solana-runtime", "solana-sdk", "solana-streamer", @@ -5482,6 +5464,7 @@ dependencies = [ name = "solana-accounts-db" version = "2.1.0" dependencies = [ + "ahash 0.8.10", "assert_matches", "bincode", "blake3", @@ -5494,7 +5477,7 @@ dependencies = [ "dashmap", "ed25519-dalek", "index_list", - "indexmap 2.2.6", + "indexmap 2.3.0", "itertools 0.12.1", "lazy_static", "libsecp256k1", @@ -5521,6 +5504,7 @@ dependencies = [ "solana-frozen-abi", "solana-frozen-abi-macro", "solana-inline-spl", + "solana-lattice-hash", "solana-logger", "solana-measure", "solana-metrics", @@ -5528,7 +5512,7 @@ dependencies = [ "solana-rayon-threadlimit", "solana-sdk", "solana-stake-program", - "solana-svm", + "solana-svm-transaction", "solana-vote-program", "static_assertions", "strum", @@ -5549,6 +5533,7 @@ dependencies = [ "num-derive", "num-traits", "rustc_version 0.4.0", + "solana-log-collector", "solana-program", "solana-program-runtime", "solana-sdk", @@ -5566,6 +5551,13 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-atomic-u64" +version = "2.1.0" +dependencies = [ + "parking_lot 0.12.3", +] + [[package]] name = "solana-banking-bench" version = "2.1.0" @@ -5710,6 +5702,23 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-bn254" +version = "2.1.0" +dependencies = [ + "ark-bn254", + "ark-ec", + "ark-ff", + "ark-serialize", + "array-bytes", + "bytemuck", + "serde", + "serde_derive", + "serde_json", + "solana-program", + "thiserror", +] + [[package]] name = "solana-bpf-loader-program" version = "2.1.0" @@ -5722,12 +5731,16 @@ dependencies = [ "memoffset 0.9.1", "rand 0.8.5", "scopeguard", + "solana-bn254", "solana-compute-budget", "solana-curve25519", + "solana-log-collector", "solana-measure", "solana-poseidon", + "solana-program-memory", "solana-program-runtime", "solana-sdk", + "solana-timings", "solana-type-overrides", "solana-vote", "solana_rbpf", @@ -5767,11 +5780,24 @@ dependencies = [ ] [[package]] -name = "solana-cargo-build-bpf" +name = "solana-builtins-default-costs" version = "2.1.0" dependencies = [ + "ahash 0.8.10", + "lazy_static", "log", - "solana-logger", + "rand 0.8.5", + "rustc_version 0.4.0", + "solana-address-lookup-table-program", + "solana-bpf-loader-program", + "solana-compute-budget-program", + "solana-config-program", + "solana-frozen-abi", + "solana-loader-v4-program", + "solana-sdk", + "solana-stake-program", + "solana-system-program", + "solana-vote-program", ] [[package]] @@ -5795,10 +5821,6 @@ dependencies = [ "tar", ] -[[package]] -name = "solana-cargo-test-bpf" -version = "2.1.0" - [[package]] name = "solana-cargo-test-sbf" version = "2.1.0" @@ -5877,6 +5899,8 @@ dependencies = [ "solana-client", "solana-compute-budget", "solana-config-program", + "solana-connection-cache", + "solana-decode-error", "solana-faucet", "solana-loader-v4-program", "solana-logger", @@ -5956,7 +5980,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.2.6", + "indexmap 2.3.0", "indicatif", "log", "quinn", @@ -6033,9 +6057,11 @@ dependencies = [ "chrono", "serde", "serde_derive", + "solana-log-collector", "solana-logger", "solana-program-runtime", "solana-sdk", + "solana-short-vec", ] [[package]] @@ -6046,7 +6072,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.2.6", + "indexmap 2.3.0", "indicatif", "log", "rand 0.8.5", @@ -6066,6 +6092,7 @@ name = "solana-core" version = "2.1.0" dependencies = [ "ahash 0.8.10", + "anyhow", "arrayvec", "assert_matches", "base64 0.22.1", @@ -6101,11 +6128,14 @@ dependencies = [ "serial_test", "solana-accounts-db", "solana-bloom", + "solana-builtins-default-costs", "solana-client", "solana-compute-budget", + "solana-connection-cache", "solana-core", "solana-cost-model", "solana-entry", + "solana-fee", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-geyser-plugin-manager", @@ -6123,12 +6153,15 @@ dependencies = [ "solana-rpc", "solana-rpc-client-api", "solana-runtime", + "solana-runtime-transaction", "solana-sanitize", "solana-sdk", "solana-send-transaction-service", + "solana-short-vec", "solana-stake-program", "solana-streamer", "solana-svm", + "solana-timings", "solana-tpu-client", "solana-transaction-status", "solana-turbine", @@ -6158,19 +6191,16 @@ dependencies = [ "itertools 0.12.1", "lazy_static", "log", + "rand 0.8.5", "rustc_version 0.4.0", - "solana-address-lookup-table-program", - "solana-bpf-loader-program", + "solana-builtins-default-costs", "solana-compute-budget", - "solana-compute-budget-program", - "solana-config-program", "solana-frozen-abi", "solana-frozen-abi-macro", - "solana-loader-v4-program", "solana-logger", "solana-metrics", + "solana-runtime-transaction", "solana-sdk", - "solana-stake-program", "solana-system-program", "solana-vote-program", "static_assertions", @@ -6188,6 +6218,14 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-decode-error" +version = "2.1.0" +dependencies = [ + "num-derive", + "num-traits", +] + [[package]] name = "solana-define-syscall" version = "2.1.0" @@ -6205,6 +6243,7 @@ dependencies = [ "serde", "solana-bench-tps", "solana-client", + "solana-connection-cache", "solana-core", "solana-faucet", "solana-gossip", @@ -6291,6 +6330,14 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-fee" +version = "2.1.0" +dependencies = [ + "solana-sdk", + "solana-svm-transaction", +] + [[package]] name = "solana-frozen-abi" version = "2.1.0" @@ -6319,7 +6366,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -6393,7 +6440,7 @@ dependencies = [ "clap 2.33.3", "crossbeam-channel", "flate2", - "indexmap 2.2.6", + "indexmap 2.3.0", "itertools 0.12.1", "log", "lru", @@ -6421,9 +6468,11 @@ dependencies = [ "solana-net-utils", "solana-perf", "solana-rayon-threadlimit", + "solana-rpc-client", "solana-runtime", "solana-sanitize", "solana-sdk", + "solana-short-vec", "solana-streamer", "solana-tpu-client", "solana-version", @@ -6439,8 +6488,7 @@ name = "solana-inline-spl" version = "2.1.0" dependencies = [ "bytemuck", - "rustc_version 0.4.0", - "solana-sdk", + "solana-program", ] [[package]] @@ -6460,6 +6508,18 @@ dependencies = [ "tiny-bip39", ] +[[package]] +name = "solana-lattice-hash" +version = "2.1.0" +dependencies = [ + "base64 0.22.1", + "blake3", + "bytemuck", + "criterion", + "rand 0.8.5", + "rand_chacha 0.3.1", +] + [[package]] name = "solana-ledger" version = "2.1.0" @@ -6486,6 +6546,7 @@ dependencies = [ "num_cpus", "num_enum", "prost", + "qualifier_attr", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", @@ -6515,6 +6576,7 @@ dependencies = [ "solana-storage-bigtable", "solana-storage-proto", "solana-svm", + "solana-timings", "solana-transaction-status", "solana-vote", "solana-vote-program", @@ -6539,6 +6601,7 @@ dependencies = [ "bincode", "log", "solana-compute-budget", + "solana-log-collector", "solana-measure", "solana-program-runtime", "solana-sdk", @@ -6599,6 +6662,13 @@ dependencies = [ "solana-version", ] +[[package]] +name = "solana-log-collector" +version = "2.1.0" +dependencies = [ + "log", +] + [[package]] name = "solana-logger" version = "2.1.0" @@ -6658,6 +6728,13 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-msg" +version = "2.1.0" +dependencies = [ + "solana-define-syscall", +] + [[package]] name = "solana-net-shaper" version = "2.1.0" @@ -6686,7 +6763,6 @@ dependencies = [ "solana-logger", "solana-sdk", "solana-version", - "static_assertions", "tokio", "url 2.5.2", ] @@ -6713,7 +6789,7 @@ version = "2.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", "toml 0.8.12", ] @@ -6744,6 +6820,7 @@ dependencies = [ "solana-metrics", "solana-rayon-threadlimit", "solana-sdk", + "solana-short-vec", "solana-vote-program", "test-case", ] @@ -6802,10 +6879,6 @@ version = "2.1.0" dependencies = [ "anyhow", "arbitrary", - "ark-bn254", - "ark-ec", - "ark-ff", - "ark-serialize", "array-bytes", "assert_matches", "base64 0.22.1", @@ -6832,6 +6905,7 @@ dependencies = [ "num-derive", "num-traits", "parking_lot 0.12.3", + "qualifier_attr", "rand 0.8.5", "rustc_version 0.4.0", "serde", @@ -6841,17 +6915,31 @@ dependencies = [ "serial_test", "sha2 0.10.8", "sha3 0.10.8", + "solana-atomic-u64", + "solana-decode-error", "solana-define-syscall", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", + "solana-msg", + "solana-program-memory", "solana-sanitize", "solana-sdk-macro", + "solana-secp256k1-recover", + "solana-short-vec", "static_assertions", "thiserror", "wasm-bindgen", ] +[[package]] +name = "solana-program-memory" +version = "2.1.0" +dependencies = [ + "num-traits", + "solana-define-syscall", +] + [[package]] name = "solana-program-runtime" version = "2.1.0" @@ -6859,7 +6947,6 @@ dependencies = [ "assert_matches", "base64 0.22.1", "bincode", - "eager", "enum-iterator", "itertools 0.12.1", "libc", @@ -6873,10 +6960,12 @@ dependencies = [ "solana-compute-budget", "solana-frozen-abi", "solana-frozen-abi-macro", + "solana-log-collector", "solana-logger", "solana-measure", "solana-metrics", "solana-sdk", + "solana-timings", "solana-type-overrides", "solana-vote", "solana_rbpf", @@ -6903,12 +6992,14 @@ dependencies = [ "solana-bpf-loader-program", "solana-compute-budget", "solana-inline-spl", + "solana-log-collector", "solana-logger", "solana-program-runtime", "solana-runtime", "solana-sdk", "solana-stake-program", "solana-svm", + "solana-timings", "solana-vote-program", "solana_rbpf", "test-case", @@ -7092,6 +7183,7 @@ dependencies = [ "anyhow", "base64 0.22.1", "bs58", + "const_format", "jsonrpc-core", "reqwest", "reqwest-middleware", @@ -7138,6 +7230,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-client", + "solana-connection-cache", "solana-logger", "solana-pubsub-client", "solana-rpc", @@ -7205,6 +7298,7 @@ dependencies = [ "solana-compute-budget-program", "solana-config-program", "solana-cost-model", + "solana-fee", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-inline-spl", @@ -7216,10 +7310,12 @@ dependencies = [ "solana-program-runtime", "solana-rayon-threadlimit", "solana-runtime", + "solana-runtime-transaction", "solana-sdk", "solana-stake-program", "solana-svm", "solana-system-program", + "solana-timings", "solana-transaction-status", "solana-version", "solana-vote", @@ -7256,9 +7352,6 @@ dependencies = [ [[package]] name = "solana-sanitize" version = "2.1.0" -dependencies = [ - "thiserror", -] [[package]] name = "solana-sdk" @@ -7304,13 +7397,18 @@ dependencies = [ "sha2 0.10.8", "sha3 0.10.8", "siphasher", + "solana-bn254", + "solana-decode-error", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-logger", "solana-program", + "solana-program-memory", "solana-sanitize", "solana-sdk", "solana-sdk-macro", + "solana-secp256k1-recover", + "solana-short-vec", "static_assertions", "thiserror", "tiny-bip39", @@ -7325,7 +7423,21 @@ dependencies = [ "bs58", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", +] + +[[package]] +name = "solana-secp256k1-recover" +version = "2.1.0" +dependencies = [ + "anyhow", + "borsh 1.5.1", + "libsecp256k1", + "rustc_version 0.4.0", + "solana-define-syscall", + "solana-frozen-abi", + "solana-frozen-abi-macro", + "thiserror", ] [[package]] @@ -7341,6 +7453,7 @@ dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-connection-cache", "solana-logger", "solana-measure", "solana-metrics", @@ -7349,6 +7462,19 @@ dependencies = [ "solana-tpu-client", ] +[[package]] +name = "solana-short-vec" +version = "2.1.0" +dependencies = [ + "assert_matches", + "bincode", + "rustc_version 0.4.0", + "serde", + "serde_json", + "solana-frozen-abi", + "solana-frozen-abi-macro", +] + [[package]] name = "solana-stake-accounts" version = "2.1.0" @@ -7376,6 +7502,7 @@ dependencies = [ "rustc_version 0.4.0", "solana-compute-budget", "solana-config-program", + "solana-log-collector", "solana-logger", "solana-program-runtime", "solana-sdk", @@ -7457,7 +7584,7 @@ dependencies = [ "futures 0.3.30", "futures-util", "histogram", - "indexmap 2.2.6", + "indexmap 2.3.0", "itertools 0.12.1", "libc", "log", @@ -7492,26 +7619,34 @@ dependencies = [ "log", "percentage", "prost", + "qualifier_attr", "rand 0.8.5", "rustc_version 0.4.0", "serde", "serde_derive", + "shuttle", "solana-bpf-loader-program", "solana-compute-budget", "solana-compute-budget-program", + "solana-fee", "solana-frozen-abi", "solana-frozen-abi-macro", "solana-loader-v4-program", + "solana-log-collector", "solana-logger", "solana-measure", "solana-metrics", "solana-program-runtime", + "solana-runtime-transaction", "solana-sdk", "solana-svm", "solana-svm-conformance", + "solana-svm-transaction", "solana-system-program", + "solana-timings", "solana-type-overrides", "solana-vote", + "thiserror", ] [[package]] @@ -7523,6 +7658,13 @@ dependencies = [ "prost-types", ] +[[package]] +name = "solana-svm-transaction" +version = "2.1.0" +dependencies = [ + "solana-sdk", +] + [[package]] name = "solana-system-program" version = "2.1.0" @@ -7533,6 +7675,7 @@ dependencies = [ "serde", "serde_derive", "solana-compute-budget", + "solana-log-collector", "solana-logger", "solana-program-runtime", "solana-sdk", @@ -7551,7 +7694,6 @@ dependencies = [ "serde_json", "solana-accounts-db", "solana-cli-output", - "solana-client", "solana-compute-budget", "solana-core", "solana-geyser-plugin-manager", @@ -7562,6 +7704,7 @@ dependencies = [ "solana-program-test", "solana-rpc", "solana-rpc-client", + "solana-rpc-client-api", "solana-runtime", "solana-sdk", "solana-streamer", @@ -7583,6 +7726,15 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-timings" +version = "2.1.0" +dependencies = [ + "eager", + "enum-iterator", + "solana-sdk", +] + [[package]] name = "solana-tokens" version = "2.1.0" @@ -7594,7 +7746,7 @@ dependencies = [ "console", "csv", "ctrlc", - "indexmap 2.2.6", + "indexmap 2.3.0", "indicatif", "pickledb", "serde", @@ -7645,7 +7797,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.2.6", + "indexmap 2.3.0", "indicatif", "log", "rayon", @@ -7691,7 +7843,6 @@ dependencies = [ name = "solana-transaction-metrics-tracker" version = "2.1.0" dependencies = [ - "Inflector", "base64 0.22.1", "bincode", "lazy_static", @@ -7699,6 +7850,7 @@ dependencies = [ "rand 0.8.5", "solana-perf", "solana-sdk", + "solana-short-vec", ] [[package]] @@ -7810,9 +7962,9 @@ dependencies = [ "scopeguard", "solana-ledger", "solana-logger", - "solana-program-runtime", "solana-runtime", "solana-sdk", + "solana-timings", "solana-unified-scheduler-logic", "vec_extract_if_polyfill", ] @@ -7901,10 +8053,10 @@ dependencies = [ "solana-ledger", "solana-logger", "solana-program", - "solana-program-runtime", "solana-runtime", "solana-sdk", "solana-streamer", + "solana-timings", "solana-vote-program", "tempfile", ] @@ -7916,6 +8068,7 @@ dependencies = [ "bytemuck", "num-derive", "num-traits", + "solana-log-collector", "solana-program-runtime", "solana-sdk", "solana-zk-sdk", @@ -7928,9 +8081,7 @@ dependencies = [ "bs58", "clap 3.2.23", "dirs-next", - "num_cpus", "solana-clap-v3-utils", - "solana-cli-config", "solana-remote-wallet", "solana-sdk", "solana-version", @@ -7977,6 +8128,7 @@ dependencies = [ "curve25519-dalek", "num-derive", "num-traits", + "solana-log-collector", "solana-program-runtime", "solana-sdk", "solana-zk-token-sdk", @@ -8026,20 +8178,20 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06beab07f4104d6ad70d47baa67ad1bcd501a2345a983e20c389bade7c72305e" +checksum = "381f595f78accb55aeea018a90e3acf6048f960d932002737d249e3294bd58fe" dependencies = [ "byteorder", "combine", "gdbstub", - "goblin", "hash32", "libc", "log", "rand 0.8.5", "rustc-demangle", "scroll", + "shuttle", "thiserror", "winapi 0.3.9", ] @@ -8091,7 +8243,7 @@ checksum = "d9e8418ea6269dcfb01c712f0444d2c75542c04448b480e87de59d2865edc750" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -8103,7 +8255,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.68", + "syn 2.0.72", "thiserror", ] @@ -8162,7 +8314,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -8350,9 +8502,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.68" +version = "2.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" dependencies = [ "proc-macro2", "quote", @@ -8368,7 +8520,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -8554,7 +8706,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -8566,7 +8718,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", "test-case-core", ] @@ -8587,22 +8739,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -8739,7 +8891,7 @@ source = "git+https://github.com/anza-xyz/solana-tokio.git?rev=7cf47705faacf7bf0 dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -8869,7 +9021,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "toml_datetime", "winnow 0.5.16", ] @@ -8880,7 +9032,7 @@ version = "0.22.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3328d4f68a705b2a4498da1d580585d39a6510f98318a2cec3018a7ec61ddef" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "serde", "serde_spanned", "toml_datetime", @@ -8983,7 +9135,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -9292,7 +9444,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", "wasm-bindgen-shared", ] @@ -9326,7 +9478,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9676,7 +9828,7 @@ checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] @@ -9696,7 +9848,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.72", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 02ab28a0acea10..de49b9b8ac1016 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ members = [ "bench-tps", "bloom", "bucket_map", + "builtins-default-costs", "cargo-registry", "clap-utils", "clap-v3-utils", @@ -37,6 +38,7 @@ members = [ "download-utils", "entry", "faucet", + "fee", "frozen-abi", "frozen-abi/macro", "genesis", @@ -47,10 +49,12 @@ members = [ "inline-spl", "install", "keygen", + "lattice-hash", "ledger", "ledger-tool", "local-cluster", "log-analyzer", + "log-collector", "logger", "measure", "memory-management", @@ -96,15 +100,18 @@ members = [ "runtime-transaction", "sanitize", "sdk", - "sdk/cargo-build-bpf", + "sdk/atomic-u64", "sdk/cargo-build-sbf", - "sdk/cargo-test-bpf", "sdk/cargo-test-sbf", + "sdk/decode-error", "sdk/gen-headers", "sdk/macro", + "sdk/msg", "sdk/package-metadata-macro", "sdk/program", + "sdk/program-memory", "send-transaction-service", + "short-vec", "stake-accounts", "storage-bigtable", "storage-bigtable/build-proto", @@ -112,14 +119,17 @@ members = [ "streamer", "svm", "svm-conformance", + "svm-transaction", "test-validator", "thin-client", + "timings", "tokens", "tps-client", "tpu-client", "transaction-dos", "transaction-metrics-tracker", "transaction-status", + "transaction-view", "turbine", "type-overrides", "udp-client", @@ -150,6 +160,7 @@ edition = "2021" [workspace.dependencies] Inflector = "0.11.4" +agave-transaction-view = { path = "transaction-view", version = "=2.1.0" } aquamarine = "0.3.3" aes-gcm-siv = "0.11.1" ahash = "0.8.10" @@ -160,13 +171,13 @@ ark-ec = "0.4.0" ark-ff = "0.4.0" ark-serialize = "0.4.0" array-bytes = "=1.4.1" -arrayref = "0.3.7" +arrayref = "0.3.8" arrayvec = "0.7.4" assert_cmd = "2.0" assert_matches = "1.5.0" async-channel = "1.9.0" async-mutex = "1.4.0" -async-trait = "0.1.80" +async-trait = "0.1.81" atty = "0.2.11" backoff = "0.4.0" base64 = "0.22.1" @@ -178,10 +189,10 @@ bs58 = "0.5.1" bv = "0.11.1" byte-unit = "4.0.19" bytecount = "0.6.8" -bytemuck = "1.16.1" +bytemuck = "1.16.3" bytemuck_derive = "1.7.0" byteorder = "1.5.0" -bytes = "1.6" +bytes = "1.7" bzip2 = "0.4.4" caps = "0.5.5" cargo_metadata = "0.15.4" @@ -231,11 +242,11 @@ histogram = "0.6.9" hmac = "0.12.1" http = "0.2.12" humantime = "2.0.1" -hyper = "0.14.29" +hyper = "0.14.30" hyper-proxy = "0.9.1" im = "15.1.0" -index_list = "0.2.12" -indexmap = "2.2.6" +index_list = "0.2.13" +indexmap = "2.3.0" indicatif = "0.17.8" itertools = "0.12.1" jemallocator = { package = "tikv-jemallocator", version = "0.4.1", features = [ @@ -260,7 +271,7 @@ libsecp256k1 = { version = "0.6.0", default-features = false, features = [ light-poseidon = "0.2.0" log = "0.4.22" lru = "0.7.7" -lz4 = "1.25.0" +lz4 = "1.26.0" memmap2 = "0.5.10" memoffset = "0.9" merlin = "3" @@ -272,7 +283,7 @@ num-bigint = "0.4.6" num-derive = "0.4" num-traits = "0.2" num_cpus = "1.16.0" -num_enum = "0.7.2" +num_enum = "0.7.3" openssl = "0.10" parking_lot = "0.12" pbkdf2 = { version = "0.11.0", default-features = false } @@ -307,10 +318,10 @@ rustls = { version = "0.21.12", default-features = false, features = ["quic"] } scopeguard = "1.2.0" semver = "1.0.23" seqlock = "0.2.0" -serde = "1.0.203" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.204" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_bytes = "0.11.15" -serde_derive = "1.0.203" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_json = "1.0.119" +serde_derive = "1.0.204" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_json = "1.0.122" serde_with = { version = "2.3.3", default-features = false } serde_yaml = "0.9.34" serial_test = "2.0.0" @@ -326,13 +337,16 @@ soketto = "0.7" solana-account-decoder = { path = "account-decoder", version = "=2.1.0" } solana-accounts-db = { path = "accounts-db", version = "=2.1.0" } solana-address-lookup-table-program = { path = "programs/address-lookup-table", version = "=2.1.0" } +solana-atomic-u64 = { path = "sdk/atomic-u64", version = "=2.1.0" } solana-banks-client = { path = "banks-client", version = "=2.1.0" } solana-banks-interface = { path = "banks-interface", version = "=2.1.0" } solana-banks-server = { path = "banks-server", version = "=2.1.0" } solana-bench-tps = { path = "bench-tps", version = "=2.1.0" } solana-bloom = { path = "bloom", version = "=2.1.0" } +solana-bn254 = { path = "curves/bn254", version = "=2.1.0" } solana-bpf-loader-program = { path = "programs/bpf_loader", version = "=2.1.0" } solana-bucket-map = { path = "bucket_map", version = "=2.1.0" } +solana-builtins-default-costs = { path = "builtins-default-costs", version = "=2.1.0" } agave-cargo-registry = { path = "cargo-registry", version = "=2.1.0" } solana-clap-utils = { path = "clap-utils", version = "=2.1.0" } solana-clap-v3-utils = { path = "clap-v3-utils", version = "=2.1.0" } @@ -347,10 +361,12 @@ solana-connection-cache = { path = "connection-cache", version = "=2.1.0", defau solana-core = { path = "core", version = "=2.1.0" } solana-cost-model = { path = "cost-model", version = "=2.1.0" } solana-curve25519 = { path = "curves/curve25519", version = "=2.1.0" } +solana-decode-error = { path = "sdk/decode-error", version = "=2.1.0" } solana-define-syscall = { path = "define-syscall", version = "=2.1.0" } solana-download-utils = { path = "download-utils", version = "=2.1.0" } solana-entry = { path = "entry", version = "=2.1.0" } solana-faucet = { path = "faucet", version = "=2.1.0" } +solana-fee = { path = "fee", version = "=2.1.0" } solana-frozen-abi = { path = "frozen-abi", version = "=2.1.0" } solana-frozen-abi-macro = { path = "frozen-abi/macro", version = "=2.1.0" } solana-tps-client = { path = "tps-client", version = "=2.1.0" } @@ -360,13 +376,16 @@ agave-geyser-plugin-interface = { path = "geyser-plugin-interface", version = "= solana-geyser-plugin-manager = { path = "geyser-plugin-manager", version = "=2.1.0" } solana-gossip = { path = "gossip", version = "=2.1.0" } solana-inline-spl = { path = "inline-spl", version = "=2.1.0" } +solana-lattice-hash = { path = "lattice-hash", version = "=2.1.0" } solana-ledger = { path = "ledger", version = "=2.1.0" } solana-loader-v4-program = { path = "programs/loader-v4", version = "=2.1.0" } solana-local-cluster = { path = "local-cluster", version = "=2.1.0" } +solana-log-collector = { path = "log-collector", version = "=2.1.0" } solana-logger = { path = "logger", version = "=2.1.0" } solana-measure = { path = "measure", version = "=2.1.0" } solana-merkle-tree = { path = "merkle-tree", version = "=2.1.0" } solana-metrics = { path = "metrics", version = "=2.1.0" } +solana-msg = { path = "sdk/msg", version = "=2.1.0" } solana-net-utils = { path = "net-utils", version = "=2.1.0" } solana-nohash-hasher = "0.2.1" solana-notifier = { path = "notifier", version = "=2.1.0" } @@ -374,7 +393,8 @@ solana-package-metadata-macro = { path = "sdk/package-metadata-macro", version = solana-perf = { path = "perf", version = "=2.1.0" } solana-poh = { path = "poh", version = "=2.1.0" } solana-poseidon = { path = "poseidon", version = "=2.1.0" } -solana-program = { path = "sdk/program", version = "=2.1.0" } +solana-program = { path = "sdk/program", version = "=2.1.0", default-features = false } +solana-program-memory = { path = "sdk/program-memory", version = "=2.1.0" } solana-program-runtime = { path = "program-runtime", version = "=2.1.0" } solana-program-test = { path = "program-test", version = "=2.1.0" } solana-pubsub-client = { path = "pubsub-client", version = "=2.1.0" } @@ -382,6 +402,7 @@ solana-quic-client = { path = "quic-client", version = "=2.1.0" } solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=2.1.0" } solana-remote-wallet = { path = "remote-wallet", version = "=2.1.0", default-features = false } solana-sanitize = { path = "sanitize", version = "=2.1.0" } +solana-timings = { path = "timings", version = "=2.1.0" } solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=2.1.0" } solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=2.1.0" } solana-rpc = { path = "rpc", version = "=2.1.0" } @@ -392,13 +413,16 @@ solana-runtime = { path = "runtime", version = "=2.1.0" } solana-runtime-transaction = { path = "runtime-transaction", version = "=2.1.0" } solana-sdk = { path = "sdk", version = "=2.1.0" } solana-sdk-macro = { path = "sdk/macro", version = "=2.1.0" } +solana-secp256k1-recover = { path = "curves/secp256k1-recover", version = "=2.1.0", default-features = false } solana-send-transaction-service = { path = "send-transaction-service", version = "=2.1.0" } +solana-short-vec = { path = "short-vec", version = "=2.1.0" } solana-stake-program = { path = "programs/stake", version = "=2.1.0" } solana-storage-bigtable = { path = "storage-bigtable", version = "=2.1.0" } solana-storage-proto = { path = "storage-proto", version = "=2.1.0" } solana-streamer = { path = "streamer", version = "=2.1.0" } solana-svm = { path = "svm", version = "=2.1.0" } solana-svm-conformance = { path = "svm-conformance", version = "=2.1.0" } +solana-svm-transaction = { path = "svm-transaction", version = "=2.1.0" } solana-system-program = { path = "programs/system", version = "=2.1.0" } solana-test-validator = { path = "test-validator", version = "=2.1.0" } solana-thin-client = { path = "thin-client", version = "=2.1.0" } @@ -417,7 +441,7 @@ solana-zk-keygen = { path = "zk-keygen", version = "=2.1.0" } solana-zk-sdk = { path = "zk-sdk", version = "=2.1.0" } solana-zk-token-proof-program = { path = "programs/zk-token-proof", version = "=2.1.0" } solana-zk-token-sdk = { path = "zk-token-sdk", version = "=2.1.0" } -solana_rbpf = "=0.8.1" +solana_rbpf = "=0.8.2" spl-associated-token-account = "=4.0.0" spl-instruction-padding = "0.2" spl-memo = "=5.0.0" @@ -440,7 +464,7 @@ tar = "0.4.41" tarpc = "0.29.0" tempfile = "3.10.1" test-case = "3.3.1" -thiserror = "1.0.61" +thiserror = "1.0.63" tiny-bip39 = "0.8.2" # Update solana-tokio patch below when updating this version tokio = "1.29.1" diff --git a/RELEASE.md b/RELEASE.md index 14cc160ed8bef4..a862a0e4106ceb 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -61,6 +61,11 @@ There are three release channels that map to branches as follows: ## Steps to Create a Branch +### Major release branch +1. If the new branch will be the first branch of a new major release check that +all eligible deprecated symbols have been removed. Our policy is to deprecate +for at least one full minor version before removal. + ### Create the new branch 1. Check out the latest commit on `master` branch: ``` diff --git a/account-decoder/src/parse_token.rs b/account-decoder/src/parse_token.rs index 41a7eb44f9e14f..878d738fe03367 100644 --- a/account-decoder/src/parse_token.rs +++ b/account-decoder/src/parse_token.rs @@ -26,37 +26,6 @@ pub fn is_known_spl_token_id(program_id: &Pubkey) -> bool { *program_id == spl_token::id() || *program_id == spl_token_2022::id() } -// A helper function to convert spl_token::native_mint::id() as spl_sdk::pubkey::Pubkey to -// solana_sdk::pubkey::Pubkey -#[deprecated( - since = "1.16.0", - note = "Pubkey conversions no longer needed. Please use spl_token::native_mint::id() directly" -)] -pub fn spl_token_native_mint() -> Pubkey { - Pubkey::new_from_array(spl_token::native_mint::id().to_bytes()) -} - -// The program id of the `spl_token_native_mint` account -#[deprecated( - since = "1.16.0", - note = "Pubkey conversions no longer needed. Please use spl_token::id() directly" -)] -pub fn spl_token_native_mint_program_id() -> Pubkey { - spl_token::id() -} - -// A helper function to convert a solana_sdk::pubkey::Pubkey to spl_sdk::pubkey::Pubkey -#[deprecated(since = "1.16.0", note = "Pubkey conversions no longer needed")] -pub fn spl_token_pubkey(pubkey: &Pubkey) -> SplTokenPubkey { - SplTokenPubkey::new_from_array(pubkey.to_bytes()) -} - -// A helper function to convert a spl_sdk::pubkey::Pubkey to solana_sdk::pubkey::Pubkey -#[deprecated(since = "1.16.0", note = "Pubkey conversions no longer needed")] -pub fn pubkey_from_spl_token(pubkey: &SplTokenPubkey) -> Pubkey { - Pubkey::new_from_array(pubkey.to_bytes()) -} - #[deprecated(since = "2.0.0", note = "Use `parse_token_v2` instead")] pub fn parse_token( data: &[u8], diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index 8ff7cc12b26276..5593de62c802f9 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -17,13 +17,13 @@ solana-account-decoder = { workspace = true } solana-clap-utils = { workspace = true } solana-cli-config = { workspace = true } solana-client = { workspace = true } -solana-faucet = { workspace = true } solana-gossip = { workspace = true } solana-inline-spl = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-net-utils = { workspace = true } solana-rpc-client = { workspace = true, features = ["default"] } +solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } @@ -34,6 +34,7 @@ spl-token = { workspace = true, features = ["no-entrypoint"] } [dev-dependencies] solana-accounts-db = { workspace = true } solana-core = { workspace = true, features = ["dev-context-only-utils"] } +solana-faucet = { workspace = true } solana-local-cluster = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-test-validator = { workspace = true } diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs index 9e84456011e023..6c8a15dc69db69 100644 --- a/accounts-cluster-bench/src/main.rs +++ b/accounts-cluster-bench/src/main.rs @@ -8,11 +8,12 @@ use { hidden_unless_forced, input_parsers::pubkey_of, input_validators::is_url_or_moniker, }, solana_cli_config::{ConfigInput, CONFIG_FILE}, - solana_client::{rpc_request::TokenAccountsFilter, transaction_executor::TransactionExecutor}, + solana_client::transaction_executor::TransactionExecutor, solana_gossip::gossip_service::discover, solana_inline_spl::token, solana_measure::measure::Measure, solana_rpc_client::rpc_client::RpcClient, + solana_rpc_client_api::request::TokenAccountsFilter, solana_sdk::{ commitment_config::CommitmentConfig, hash::Hash, diff --git a/accounts-db/Cargo.toml b/accounts-db/Cargo.toml index bf12964211d123..eada1ee5e3375e 100644 --- a/accounts-db/Cargo.toml +++ b/accounts-db/Cargo.toml @@ -10,6 +10,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] +ahash = { workspace = true } bincode = { workspace = true } blake3 = { workspace = true } bv = { workspace = true, features = ["serde"] } @@ -39,13 +40,14 @@ solana-bucket-map = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-inline-spl = { workspace = true } +solana-lattice-hash = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-nohash-hasher = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true, optional = true } -solana-svm = { workspace = true } +solana-svm-transaction = { workspace = true } solana-vote-program = { workspace = true, optional = true } static_assertions = { workspace = true } tar = { workspace = true } @@ -69,7 +71,6 @@ solana-accounts-db = { path = ".", features = ["dev-context-only-utils"] } solana-compute-budget = { workspace = true } solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } -solana-svm = { workspace = true, features = ["dev-context-only-utils"] } static_assertions = { workspace = true } strum = { workspace = true, features = ["derive"] } strum_macros = { workspace = true } @@ -82,12 +83,15 @@ targets = ["x86_64-unknown-linux-gnu"] rustc_version = { workspace = true } [features] -dev-context-only-utils = ["dep:qualifier_attr", "dep:solana-stake-program", "dep:solana-vote-program"] +dev-context-only-utils = [ + "dep:qualifier_attr", + "dep:solana-stake-program", + "dep:solana-vote-program", +] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", - "solana-svm/frozen-abi", "solana-vote-program/frozen-abi", ] @@ -102,3 +106,7 @@ harness = false [[bench]] name = "bench_serde" harness = false + +[[bench]] +name = "bench_lock_accounts" +harness = false diff --git a/accounts-db/accounts-hash-cache-tool/src/main.rs b/accounts-db/accounts-hash-cache-tool/src/main.rs index 3f7b3db3702b9c..ed0159c3d81b92 100644 --- a/accounts-db/accounts-hash-cache-tool/src/main.rs +++ b/accounts-db/accounts-hash-cache-tool/src/main.rs @@ -7,13 +7,13 @@ use { memmap2::Mmap, solana_accounts_db::{ parse_cache_hash_data_filename, CacheHashDataFileEntry, CacheHashDataFileHeader, + ParsedCacheHashDataFilename, }, std::{ cmp::Ordering, collections::HashMap, - fs::{self, File}, + fs::{self, File, Metadata}, io::{self, BufReader, Read}, - iter, mem::size_of, num::Saturating, path::{Path, PathBuf}, @@ -303,49 +303,15 @@ fn do_diff_dirs( dir2: impl AsRef, then_diff_files: bool, ) -> Result<(), String> { - let get_files_in = |dir: &Path| { - let mut files = Vec::new(); - let entries = fs::read_dir(dir)?; - for entry in entries { - let path = entry?.path(); - let meta = fs::metadata(&path)?; - if meta.is_file() { - let path = fs::canonicalize(path)?; - files.push((path, meta)); - } - } - Ok::<_, io::Error>(files) - }; - let parse_files = |files: &[(PathBuf, _)]| { - files - .iter() - .map(|(file, _)| { - Path::file_name(file) - .and_then(parse_cache_hash_data_filename) - .ok_or_else(|| format!("failed to parse '{}'", file.display())) - }) - .collect::, String>>() - }; - let parse_and_sort_files_in = |dir: &Path| { - let files = get_files_in(dir) - .map_err(|err| format!("failed to get files in '{}': {err}", dir.display()))?; - let parsed_files = parse_files(&files)?; - let mut combined: Vec<_> = iter::zip(files, parsed_files).collect(); - combined.sort_unstable_by(|a, b| { - a.1.slot_range_start - .cmp(&b.1.slot_range_start) - .then_with(|| a.1.slot_range_end.cmp(&b.1.slot_range_end)) - }); - Ok::<_, String>(combined) - }; - let _timer = ElapsedOnDrop { message: "diffing directories took ".to_string(), start: Instant::now(), }; - let files1 = parse_and_sort_files_in(dir1.as_ref())?; - let files2 = parse_and_sort_files_in(dir2.as_ref())?; + let files1 = get_cache_files_in(dir1) + .map_err(|err| format!("failed to get cache files in dir1: {err}"))?; + let files2 = get_cache_files_in(dir2) + .map_err(|err| format!("failed to get cache files in dir2: {err}"))?; let mut uniques1 = Vec::new(); let mut uniques2 = Vec::new(); @@ -355,7 +321,11 @@ fn do_diff_dirs( while idx1.0 < files1.len() && idx2.0 < files2.len() { let file1 = &files1[idx1.0]; let file2 = &files2[idx2.0]; - match file1.1.slot_range_start.cmp(&file2.1.slot_range_start) { + match file1 + .parsed + .slot_range_start + .cmp(&file2.parsed.slot_range_start) + { Ordering::Less => { // file1 is an older slot range than file2, so note it and advance idx1 uniques1.push(file1); @@ -367,7 +337,11 @@ fn do_diff_dirs( idx2 += 1; } Ordering::Equal => { - match file1.1.slot_range_end.cmp(&file2.1.slot_range_end) { + match file1 + .parsed + .slot_range_end + .cmp(&file2.parsed.slot_range_end) + { Ordering::Less => { // file1 is a smaller slot range than file2, so note it and advance idx1 uniques1.push(file1); @@ -382,20 +356,20 @@ fn do_diff_dirs( // slot ranges match, so compare the files and advance both idx1 and idx2 let is_equal = || { // if the files have different sizes, they are not equal - if file1.0 .1.len() != file2.0 .1.len() { + if file1.metadata.len() != file2.metadata.len() { return false; } // if the parsed file names have different hashes, they are not equal - if file1.1.hash != file2.1.hash { + if file1.parsed.hash != file2.parsed.hash { return false; } // if the file headers have different entry counts, they are not equal - let Ok((mmap1, header1)) = map_file(&file1.0 .0, false) else { + let Ok((mmap1, header1)) = map_file(&file1.path, false) else { return false; }; - let Ok((mmap2, header2)) = map_file(&file2.0 .0, false) else { + let Ok((mmap2, header2)) = map_file(&file2.path, false) else { return false; }; if header1.count != header2.count { @@ -431,13 +405,13 @@ fn do_diff_dirs( uniques2.push(file); } - let do_print = |entries: &[&((PathBuf, _), _)]| { + let do_print = |entries: &[&CacheFileInfo]| { let count_width = (entries.len() as f64).log10().ceil() as usize; if entries.is_empty() { println!("(none)"); } else { for (i, entry) in entries.iter().enumerate() { - println!("{i:count_width$}: '{}'", entry.0 .0.display()); + println!("{i:count_width$}: '{}'", entry.path.display()); } } }; @@ -454,18 +428,18 @@ fn do_diff_dirs( for (i, (file1, file2)) in mismatches.iter().enumerate() { println!( "{i:count_width$}: '{}', '{}'", - file1.0 .0.display(), - file2.0 .0.display(), + file1.path.display(), + file2.path.display(), ); } if then_diff_files { for (file1, file2) in &mismatches { println!( "Differences between '{}' and '{}':", - file1.0 .0.display(), - file2.0 .0.display(), + file1.path.display(), + file2.path.display(), ); - if let Err(err) = do_diff_files(&file1.0 .0, &file2.0 .0) { + if let Err(err) = do_diff_files(&file1.path, &file2.path) { eprintln!("Error: failed to diff files: {err}"); } } @@ -475,6 +449,51 @@ fn do_diff_dirs( Ok(()) } +/// Returns all the cache hash data files in `dir`, sorted in ascending slot-and-bin-range order +fn get_cache_files_in(dir: impl AsRef) -> Result, io::Error> { + fn get_files_in(dir: impl AsRef) -> Result, io::Error> { + let mut files = Vec::new(); + let entries = fs::read_dir(dir)?; + for entry in entries { + let path = entry?.path(); + let meta = fs::metadata(&path)?; + if meta.is_file() { + let path = fs::canonicalize(path)?; + files.push((path, meta)); + } + } + Ok(files) + } + + let files = get_files_in(&dir).map_err(|err| { + io::Error::other(format!( + "failed to get files in '{}': {err}", + dir.as_ref().display(), + )) + })?; + let mut cache_files: Vec<_> = files + .into_iter() + .filter_map(|file| { + Path::file_name(&file.0) + .and_then(parse_cache_hash_data_filename) + .map(|parsed_file_name| CacheFileInfo { + path: file.0, + metadata: file.1, + parsed: parsed_file_name, + }) + }) + .collect(); + cache_files.sort_unstable_by(|a, b| { + a.parsed + .slot_range_start + .cmp(&b.parsed.slot_range_start) + .then_with(|| a.parsed.slot_range_end.cmp(&b.parsed.slot_range_end)) + .then_with(|| a.parsed.bin_range_start.cmp(&b.parsed.bin_range_start)) + .then_with(|| a.parsed.bin_range_end.cmp(&b.parsed.bin_range_end)) + }); + Ok(cache_files) +} + /// Scan file with `reader` and apply `user_fn` to each entry /// /// NOTE: `reader`'s cursor must already be at the first entry; i.e. *past* the header. @@ -557,6 +576,14 @@ fn open_file( Ok((reader, header)) } +#[derive(Debug)] +struct CacheFileInfo { + path: PathBuf, + metadata: Metadata, + parsed: ParsedCacheHashDataFilename, +} + +#[derive(Debug)] struct ElapsedOnDrop { message: String, start: Instant, diff --git a/accounts-db/benches/bench_accounts_file.rs b/accounts-db/benches/bench_accounts_file.rs index 284bcffeadfab7..6fe87523cf18f1 100644 --- a/accounts-db/benches/bench_accounts_file.rs +++ b/accounts-db/benches/bench_accounts_file.rs @@ -1,14 +1,25 @@ #![allow(clippy::arithmetic_side_effects)] use { criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput}, + rand::{distributions::WeightedIndex, prelude::*}, + rand_chacha::ChaChaRng, solana_accounts_db::{ - append_vec::{self, AppendVec}, - tiered_storage::hot::HotStorageWriter, + accounts_file::StorageAccess, + append_vec::{self, AppendVec, SCAN_BUFFER_SIZE_WITHOUT_DATA}, + tiered_storage::{ + file::TieredReadableFile, + hot::{HotStorageReader, HotStorageWriter}, + }, }, solana_sdk::{ - account::AccountSharedData, clock::Slot, pubkey::Pubkey, + account::{AccountSharedData, ReadableAccount}, + clock::Slot, + pubkey::Pubkey, + rent::Rent, rent_collector::RENT_EXEMPT_RENT_EPOCH, + system_instruction::MAX_PERMITTED_DATA_LENGTH, }, + std::{iter, mem::ManuallyDrop}, }; const ACCOUNTS_COUNTS: [usize; 4] = [ @@ -87,5 +98,118 @@ fn bench_write_accounts_file(c: &mut Criterion) { } } -criterion_group!(benches, bench_write_accounts_file); +fn bench_scan_pubkeys(c: &mut Criterion) { + let mut group = c.benchmark_group("scan_pubkeys"); + let temp_dir = tempfile::tempdir().unwrap(); + + // distribution of account data sizes to use when creating accounts + // 3% of accounts have no data + // 75% of accounts are 165 bytes (a token account) + // 20% of accounts are 200 bytes (a stake account) + // 1% of accounts are 256 kibibytes (pathological case for the scan buffer) + // 1% of accounts are 10 mebibytes (the max size for an account) + let data_sizes = [ + 0, + 165, + 200, + SCAN_BUFFER_SIZE_WITHOUT_DATA, + MAX_PERMITTED_DATA_LENGTH as usize, + ]; + let weights = [3, 75, 20, 1, 1]; + let distribution = WeightedIndex::new(weights).unwrap(); + + let rent = Rent::default(); + let rent_minimum_balances: Vec<_> = data_sizes + .iter() + .map(|data_size| rent.minimum_balance(*data_size)) + .collect(); + + for accounts_count in ACCOUNTS_COUNTS { + group.throughput(Throughput::Elements(accounts_count as u64)); + let mut rng = ChaChaRng::seed_from_u64(accounts_count as u64); + + let pubkeys: Vec<_> = iter::repeat_with(Pubkey::new_unique) + .take(accounts_count) + .collect(); + let accounts: Vec<_> = iter::repeat_with(|| { + let index = distribution.sample(&mut rng); + AccountSharedData::new_rent_epoch( + rent_minimum_balances[index], + data_sizes[index], + &Pubkey::default(), + RENT_EXEMPT_RENT_EPOCH, + ) + }) + .take(pubkeys.len()) + .collect(); + let storable_accounts: Vec<_> = iter::zip(&pubkeys, &accounts).collect(); + + // create an append vec file + let append_vec_path = temp_dir.path().join(format!("append_vec_{accounts_count}")); + _ = std::fs::remove_file(&append_vec_path); + let file_size = accounts + .iter() + .map(|account| append_vec::aligned_stored_size(account.data().len())) + .sum(); + let append_vec = AppendVec::new(append_vec_path, true, file_size); + let stored_accounts_info = append_vec + .append_accounts(&(Slot::MAX, storable_accounts.as_slice()), 0) + .unwrap(); + assert_eq!(stored_accounts_info.offsets.len(), accounts_count); + append_vec.flush().unwrap(); + // Open append vecs for reading here, outside of the bench function, so we don't open lots + // of file handles and run out/crash. We also need to *not* remove the backing file in + // these new append vecs because that would cause double-free (or triple-free here). + // Wrap the append vecs in ManuallyDrop to *not* remove the backing file on drop. + let append_vec_mmap = ManuallyDrop::new( + AppendVec::new_from_file(append_vec.path(), append_vec.len(), StorageAccess::Mmap) + .unwrap() + .0, + ); + let append_vec_file = ManuallyDrop::new( + AppendVec::new_from_file(append_vec.path(), append_vec.len(), StorageAccess::File) + .unwrap() + .0, + ); + + // create a hot storage file + let hot_storage_path = temp_dir + .path() + .join(format!("hot_storage_{accounts_count}")); + _ = std::fs::remove_file(&hot_storage_path); + let mut hot_storage_writer = HotStorageWriter::new(&hot_storage_path).unwrap(); + let stored_accounts_info = hot_storage_writer + .write_accounts(&(Slot::MAX, storable_accounts.as_slice()), 0) + .unwrap(); + assert_eq!(stored_accounts_info.offsets.len(), accounts_count); + hot_storage_writer.flush().unwrap(); + // Similar to the append vec case above, open the hot storage for reading here. + let hot_storage_file = TieredReadableFile::new(&hot_storage_path).unwrap(); + let hot_storage_reader = HotStorageReader::new(hot_storage_file).unwrap(); + + group.bench_function(BenchmarkId::new("append_vec_mmap", accounts_count), |b| { + b.iter(|| { + let mut count = 0; + append_vec_mmap.scan_pubkeys(|_| count += 1); + assert_eq!(count, accounts_count); + }); + }); + group.bench_function(BenchmarkId::new("append_vec_file", accounts_count), |b| { + b.iter(|| { + let mut count = 0; + append_vec_file.scan_pubkeys(|_| count += 1); + assert_eq!(count, accounts_count); + }); + }); + group.bench_function(BenchmarkId::new("hot_storage", accounts_count), |b| { + b.iter(|| { + let mut count = 0; + hot_storage_reader.scan_pubkeys(|_| count += 1).unwrap(); + assert_eq!(count, accounts_count); + }); + }); + } +} + +criterion_group!(benches, bench_write_accounts_file, bench_scan_pubkeys); criterion_main!(benches); diff --git a/accounts-db/benches/bench_hashing.rs b/accounts-db/benches/bench_hashing.rs index 78df86a97f5168..3ee18a5a1ff09b 100644 --- a/accounts-db/benches/bench_hashing.rs +++ b/accounts-db/benches/bench_hashing.rs @@ -24,7 +24,7 @@ const DATA_SIZES: [usize; 6] = [ /// part of computing an account's hash. /// /// Ensure this constant stays in sync with the value of `META_SIZE` in -/// AccountsDb::hash_account_data(). +/// AccountsDb::hash_account_helper(). const META_SIZE: usize = 81; fn bench_hash_account(c: &mut Criterion) { @@ -37,9 +37,12 @@ fn bench_hash_account(c: &mut Criterion) { let num_bytes = META_SIZE.checked_add(data_size).unwrap(); group.throughput(Throughput::Bytes(num_bytes as u64)); let account = AccountSharedData::new(lamports, data_size, &owner); - group.bench_function(BenchmarkId::new("data_size", data_size), |b| { + group.bench_function(BenchmarkId::new("blake3", data_size), |b| { b.iter(|| AccountsDb::hash_account(&account, &address)); }); + group.bench_function(BenchmarkId::new("lattice", data_size), |b| { + b.iter(|| AccountsDb::lt_hash_account(&account, &address)); + }); } } diff --git a/accounts-db/benches/bench_lock_accounts.rs b/accounts-db/benches/bench_lock_accounts.rs new file mode 100644 index 00000000000000..d34166fa7e7844 --- /dev/null +++ b/accounts-db/benches/bench_lock_accounts.rs @@ -0,0 +1,96 @@ +use { + criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}, + itertools::iproduct, + solana_accounts_db::{accounts::Accounts, accounts_db::AccountsDb}, + solana_sdk::{ + instruction::{AccountMeta, Instruction}, + pubkey::Pubkey, + system_program, + transaction::{SanitizedTransaction, Transaction, MAX_TX_ACCOUNT_LOCKS}, + }, + std::sync::Arc, +}; + +// simultaneous transactions locked +const BATCH_SIZES: [usize; 3] = [1, 32, 64]; + +// locks acquired per transaction +const LOCK_COUNTS: [usize; 2] = [2, 64]; + +// total transactions per run +const TOTAL_TRANSACTIONS: usize = 1024; + +fn create_test_transactions(lock_count: usize, read_conflicts: bool) -> Vec { + // keys available to be shared between transactions, depending on mode + // currently, we test batches with no conflicts and batches with reader/reader conflicts + // in the future with SIMD83, we will also test reader/writer and writer/writer conflicts + let shared_pubkeys: Vec<_> = (0..lock_count).map(|_| Pubkey::new_unique()).collect(); + let mut transactions = vec![]; + + for _ in 0..TOTAL_TRANSACTIONS { + let mut account_metas = vec![]; + + #[allow(clippy::needless_range_loop)] + for i in 0..lock_count { + // `lock_accounts()` distinguishes writable from readonly, so give transactions an even split + // signer doesnt matter for locking but `sanitize()` expects to see at least one + let account_meta = if i == 0 { + AccountMeta::new(Pubkey::new_unique(), true) + } else if i % 2 == 0 { + AccountMeta::new(Pubkey::new_unique(), false) + } else if read_conflicts { + AccountMeta::new_readonly(shared_pubkeys[i], false) + } else { + AccountMeta::new_readonly(Pubkey::new_unique(), false) + }; + + account_metas.push(account_meta); + } + + let instruction = Instruction::new_with_bincode(system_program::id(), &(), account_metas); + let transaction = Transaction::new_with_payer(&[instruction], None); + + transactions.push(SanitizedTransaction::from_transaction_for_tests( + transaction, + )); + } + + transactions +} + +fn bench_entry_lock_accounts(c: &mut Criterion) { + let mut group = c.benchmark_group("bench_lock_accounts"); + + for (batch_size, lock_count, read_conflicts) in + iproduct!(BATCH_SIZES, LOCK_COUNTS, [false, true]) + { + let name = format!( + "batch_size_{batch_size}_locks_count_{lock_count}{}", + if read_conflicts { + "_read_conflicts" + } else { + "" + } + ); + + let accounts_db = AccountsDb::new_single_for_tests(); + let accounts = Accounts::new(Arc::new(accounts_db)); + + let transactions = create_test_transactions(lock_count, read_conflicts); + group.throughput(Throughput::Elements(transactions.len() as u64)); + let transaction_batches: Vec<_> = transactions.chunks(batch_size).collect(); + + group.bench_function(name.as_str(), move |b| { + b.iter(|| { + for batch in &transaction_batches { + let results = + accounts.lock_accounts(black_box(batch.iter()), MAX_TX_ACCOUNT_LOCKS); + accounts.unlock_accounts(batch.iter().zip(&results)); + } + }) + }); + } +} + +criterion_group!(benches, bench_entry_lock_accounts); +criterion_main!(benches); diff --git a/accounts-db/src/account_storage.rs b/accounts-db/src/account_storage.rs index b9852e5439d980..738d7d958e9ce2 100644 --- a/accounts-db/src/account_storage.rs +++ b/accounts-db/src/account_storage.rs @@ -61,7 +61,7 @@ impl AccountStorage { lookup_in_map() .or_else(|| { self.shrink_in_progress_map.get(&slot).and_then(|entry| { - (entry.value().append_vec_id() == store_id).then(|| Arc::clone(entry.value())) + (entry.value().id() == store_id).then(|| Arc::clone(entry.value())) }) }) .or_else(lookup_in_map) @@ -151,7 +151,7 @@ impl AccountStorage { .insert( slot, AccountStorageReference { - id: store.append_vec_id(), + id: store.id(), storage: store, } ) @@ -248,11 +248,11 @@ impl<'a> Drop for ShrinkInProgress<'a> { self.slot, AccountStorageReference { storage: Arc::clone(&self.new_store), - id: self.new_store.append_vec_id() + id: self.new_store.id() } ) .map(|store| store.id), - Some(self.old_store.append_vec_id()) + Some(self.old_store.id()) ); // The new store can be removed from 'shrink_in_progress_map' @@ -489,25 +489,19 @@ pub(crate) mod tests { ); let shrinking_in_progress = storage.shrinking_in_progress(slot, sample.clone()); assert!(storage.map.contains_key(&slot)); - assert_eq!( - id_to_shrink, - storage.map.get(&slot).unwrap().storage.append_vec_id() - ); + assert_eq!(id_to_shrink, storage.map.get(&slot).unwrap().storage.id()); assert_eq!( (slot, id_shrunk), storage .shrink_in_progress_map .iter() .next() - .map(|r| (*r.key(), r.value().append_vec_id())) + .map(|r| (*r.key(), r.value().id())) .unwrap() ); drop(shrinking_in_progress); assert!(storage.map.contains_key(&slot)); - assert_eq!( - id_shrunk, - storage.map.get(&slot).unwrap().storage.append_vec_id() - ); + assert_eq!(id_shrunk, storage.map.get(&slot).unwrap().storage.id()); assert!(storage.shrink_in_progress_map.is_empty()); storage.shrinking_in_progress(slot, sample); } @@ -536,7 +530,7 @@ pub(crate) mod tests { // verify data structures during and after shrink and then with subsequent shrink call let storage = AccountStorage::default(); let sample = storage.get_test_storage(); - let id = sample.append_vec_id(); + let id = sample.id(); let missing_id = 9999; let slot = sample.slot(); // id is missing since not in maps at all diff --git a/accounts-db/src/account_storage/meta.rs b/accounts-db/src/account_storage/meta.rs index 5a6d0ad19c22af..338f821cd9e211 100644 --- a/accounts-db/src/account_storage/meta.rs +++ b/accounts-db/src/account_storage/meta.rs @@ -66,15 +66,6 @@ impl<'storage> StoredAccountMeta<'storage> { } } - pub fn write_version(&self) -> StoredMetaWriteVersion { - match self { - Self::AppendVec(av) => av.write_version(), - // Hot account does not support this API as it does not - // use a write version. - Self::Hot(_) => StoredMetaWriteVersion::default(), - } - } - pub fn meta(&self) -> &StoredMeta { match self { Self::AppendVec(av) => av.meta(), diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 1f87be1ae86e44..bb8656648c23c9 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -8,33 +8,23 @@ use { ancestors::Ancestors, storable_accounts::StorableAccounts, }, + ahash::{AHashMap, AHashSet}, dashmap::DashMap, log::*, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - account_utils::StateMut, address_lookup_table::{self, error::AddressLookupError, state::AddressLookupTable}, clock::{BankId, Slot}, - message::{ - v0::{LoadedAddresses, MessageAddressTableLookup}, - SanitizedMessage, - }, - nonce::{ - state::{DurableNonce, Versions as NonceVersions}, - State as NonceState, - }, + message::v0::{LoadedAddresses, MessageAddressTableLookup}, pubkey::Pubkey, slot_hashes::SlotHashes, - transaction::{Result, SanitizedTransaction, TransactionAccountLocks, TransactionError}, + transaction::{Result, SanitizedTransaction, TransactionError}, transaction_context::TransactionAccount, }, - solana_svm::{ - account_loader::TransactionLoadResult, nonce_info::NonceInfo, - rollback_accounts::RollbackAccounts, transaction_results::TransactionExecutionResult, - }, + solana_svm_transaction::svm_message::SVMMessage, std::{ cmp::Reverse, - collections::{hash_map, BinaryHeap, HashMap, HashSet}, + collections::{hash_map, BinaryHeap, HashSet}, ops::RangeBounds, sync::{ atomic::{AtomicUsize, Ordering}, @@ -45,11 +35,10 @@ use { pub type PubkeyAccountSlot = (Pubkey, AccountSharedData, Slot); -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug, Default)] pub struct AccountLocks { - write_locks: HashSet, - readonly_locks: HashMap, + write_locks: AHashSet, + readonly_locks: AHashMap, } impl AccountLocks { @@ -98,8 +87,27 @@ impl AccountLocks { } } +struct TransactionAccountLocksIterator<'a, T: SVMMessage> { + transaction: &'a T, +} + +impl<'a, T: SVMMessage> TransactionAccountLocksIterator<'a, T> { + pub(crate) fn new(transaction: &'a T) -> Self { + Self { transaction } + } + + pub(crate) fn accounts_with_is_writable( + &self, + ) -> impl Iterator + Clone { + self.transaction + .account_keys() + .iter() + .enumerate() + .map(|(index, key)| (key, self.transaction.is_writable(index))) + } +} + /// This structure handles synchronization for db -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug)] pub struct Accounts { /// Single global AccountsDb @@ -553,31 +561,27 @@ impl Accounts { self.accounts_db.store_uncached(slot, &[(pubkey, account)]); } - fn lock_account( + fn lock_account<'a>( &self, account_locks: &mut AccountLocks, - writable_keys: Vec<&Pubkey>, - readonly_keys: Vec<&Pubkey>, + keys: impl Iterator + Clone, ) -> Result<()> { - for k in writable_keys.iter() { - if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) { - debug!("Writable account in use: {:?}", k); - return Err(TransactionError::AccountInUse); - } - } - for k in readonly_keys.iter() { - if account_locks.is_locked_write(k) { + for (k, writable) in keys.clone() { + if writable { + if account_locks.is_locked_write(k) || account_locks.is_locked_readonly(k) { + debug!("Writable account in use: {:?}", k); + return Err(TransactionError::AccountInUse); + } + } else if account_locks.is_locked_write(k) { debug!("Read-only account in use: {:?}", k); return Err(TransactionError::AccountInUse); } } - for k in writable_keys { - account_locks.write_locks.insert(*k); - } - - for k in readonly_keys { - if !account_locks.lock_readonly(k) { + for (k, writable) in keys { + if writable { + account_locks.write_locks.insert(*k); + } else if !account_locks.lock_readonly(k) { account_locks.insert_new_readonly(k); } } @@ -585,17 +589,17 @@ impl Accounts { Ok(()) } - fn unlock_account( + fn unlock_account<'a>( &self, account_locks: &mut AccountLocks, - writable_keys: Vec<&Pubkey>, - readonly_keys: Vec<&Pubkey>, + keys: impl Iterator, ) { - for k in writable_keys { - account_locks.unlock_write(k); - } - for k in readonly_keys { - account_locks.unlock_readonly(k); + for (k, writable) in keys { + if writable { + account_locks.unlock_write(k); + } else { + account_locks.unlock_readonly(k); + } } } @@ -607,8 +611,12 @@ impl Accounts { txs: impl Iterator, tx_account_lock_limit: usize, ) -> Vec> { + // Validate the account locks, then get iterator if successful validation. let tx_account_locks_results: Vec> = txs - .map(|tx| tx.get_account_locks(tx_account_lock_limit)) + .map(|tx| { + SanitizedTransaction::validate_account_locks(tx.message(), tx_account_lock_limit) + .map(|_| TransactionAccountLocksIterator::new(tx)) + }) .collect(); self.lock_accounts_inner(tx_account_locks_results) } @@ -620,10 +628,15 @@ impl Accounts { results: impl Iterator>, tx_account_lock_limit: usize, ) -> Vec> { + // Validate the account locks, then get iterator if successful validation. let tx_account_locks_results: Vec> = txs .zip(results) .map(|(tx, result)| match result { - Ok(()) => tx.get_account_locks(tx_account_lock_limit), + Ok(()) => SanitizedTransaction::validate_account_locks( + tx.message(), + tx_account_lock_limit, + ) + .map(|_| TransactionAccountLocksIterator::new(tx)), Err(err) => Err(err), }) .collect(); @@ -633,17 +646,15 @@ impl Accounts { #[must_use] fn lock_accounts_inner( &self, - tx_account_locks_results: Vec>, + tx_account_locks_results: Vec>>, ) -> Vec> { let account_locks = &mut self.account_locks.lock().unwrap(); tx_account_locks_results .into_iter() .map(|tx_account_locks_result| match tx_account_locks_result { - Ok(tx_account_locks) => self.lock_account( - account_locks, - tx_account_locks.writable, - tx_account_locks.readonly, - ), + Ok(tx_account_locks) => { + self.lock_account(account_locks, tx_account_locks.accounts_with_is_writable()) + } Err(err) => Err(err), }) .collect() @@ -652,39 +663,33 @@ impl Accounts { /// Once accounts are unlocked, new transactions that modify that state can enter the pipeline pub fn unlock_accounts<'a>( &self, - txs_and_results: impl Iterator)>, + txs_and_results: impl Iterator)> + Clone, ) { - let keys: Vec<_> = txs_and_results - .filter(|(_, res)| res.is_ok()) - .map(|(tx, _)| tx.get_account_locks_unchecked()) - .collect(); - if keys.is_empty() { + if !txs_and_results.clone().any(|(_, res)| res.is_ok()) { return; } let mut account_locks = self.account_locks.lock().unwrap(); debug!("bank unlock accounts"); - keys.into_iter().for_each(|keys| { - self.unlock_account(&mut account_locks, keys.writable, keys.readonly); - }); + for (tx, res) in txs_and_results { + if res.is_ok() { + let tx_account_locks = TransactionAccountLocksIterator::new(tx.message()); + self.unlock_account( + &mut account_locks, + tx_account_locks.accounts_with_is_writable(), + ); + } + } } /// Store the accounts into the DB - // allow(clippy) needed for various gating flags - #[allow(clippy::too_many_arguments)] - pub fn store_cached( + pub fn store_cached<'a>( &self, - slot: Slot, - txs: &[SanitizedTransaction], - res: &[TransactionExecutionResult], - loaded: &mut [TransactionLoadResult], - durable_nonce: &DurableNonce, - lamports_per_signature: u64, + accounts: impl StorableAccounts<'a>, + transactions: &'a [Option<&'a SanitizedTransaction>], ) { - let (accounts_to_store, transactions) = - self.collect_accounts_to_store(txs, res, loaded, durable_nonce, lamports_per_signature); self.accounts_db - .store_cached_inline_update_index((slot, &accounts_to_store[..]), Some(&transactions)); + .store_cached_inline_update_index(accounts, Some(transactions)); } pub fn store_accounts_cached<'a>(&self, accounts: impl StorableAccounts<'a>) { @@ -695,153 +700,22 @@ impl Accounts { pub fn add_root(&self, slot: Slot) -> AccountsAddRootTiming { self.accounts_db.add_root(slot) } - - #[allow(clippy::too_many_arguments)] - fn collect_accounts_to_store<'a>( - &self, - txs: &'a [SanitizedTransaction], - execution_results: &'a [TransactionExecutionResult], - load_results: &'a mut [TransactionLoadResult], - durable_nonce: &DurableNonce, - lamports_per_signature: u64, - ) -> ( - Vec<(&'a Pubkey, &'a AccountSharedData)>, - Vec>, - ) { - let mut accounts = Vec::with_capacity(load_results.len()); - let mut transactions = Vec::with_capacity(load_results.len()); - for (i, (tx_load_result, tx)) in load_results.iter_mut().zip(txs).enumerate() { - let Ok(loaded_transaction) = tx_load_result else { - // Don't store any accounts if tx failed to load - continue; - }; - - let execution_status = match &execution_results[i] { - TransactionExecutionResult::Executed { details, .. } => &details.status, - // Don't store any accounts if tx wasn't executed - TransactionExecutionResult::NotExecuted(_) => continue, - }; - - // Accounts that are invoked and also not passed as an instruction - // account to a program don't need to be stored because it's assumed - // to be impossible for a committable transaction to modify an - // invoked account if said account isn't passed to some program. - // - // Note that this assumption might not hold in the future after - // SIMD-0082 is implemented because we may decide to commit - // transactions that incorrectly attempt to invoke a fee payer or - // durable nonce account. If that happens, we would NOT want to use - // this filter because we always NEED to store those accounts even - // if they aren't passed to any programs (because they are mutated - // outside of the VM). - let is_storable_account = |message: &SanitizedMessage, key_index: usize| -> bool { - !message.is_invoked(key_index) || message.is_instruction_account(key_index) - }; - - let message = tx.message(); - let rollback_accounts = &loaded_transaction.rollback_accounts; - let maybe_nonce_address = rollback_accounts.nonce().map(|account| account.address()); - - for (i, (address, account)) in (0..message.account_keys().len()) - .zip(loaded_transaction.accounts.iter_mut()) - .filter(|(i, _)| is_storable_account(message, *i)) - { - if message.is_writable(i) { - let should_collect_account = match execution_status { - Ok(()) => true, - Err(_) => { - let is_fee_payer = i == 0; - let is_nonce_account = Some(&*address) == maybe_nonce_address; - post_process_failed_tx( - account, - is_fee_payer, - is_nonce_account, - rollback_accounts, - durable_nonce, - lamports_per_signature, - ); - - is_fee_payer || is_nonce_account - } - }; - - if should_collect_account { - // Add to the accounts to store - accounts.push((&*address, &*account)); - transactions.push(Some(tx)); - } - } - } - } - (accounts, transactions) - } -} - -fn post_process_failed_tx( - account: &mut AccountSharedData, - is_fee_payer: bool, - is_nonce_account: bool, - rollback_accounts: &RollbackAccounts, - &durable_nonce: &DurableNonce, - lamports_per_signature: u64, -) { - // For the case of RollbackAccounts::SameNonceAndFeePayer, it's crucial - // for `is_nonce_account` to be checked earlier than `is_fee_payer`. - if is_nonce_account { - if let Some(nonce) = rollback_accounts.nonce() { - // The transaction failed which would normally drop the account - // processing changes, since this account is now being included - // in the accounts written back to the db, roll it back to - // pre-processing state. - *account = nonce.account().clone(); - - // Advance the stored blockhash to prevent fee theft by someone - // replaying nonce transactions that have failed with an - // `InstructionError`. - // - // Since we know we are dealing with a valid nonce account, - // unwrap is safe here - let nonce_versions = StateMut::::state(account).unwrap(); - if let NonceState::Initialized(ref data) = nonce_versions.state() { - let nonce_state = NonceState::new_initialized( - &data.authority, - durable_nonce, - lamports_per_signature, - ); - let nonce_versions = NonceVersions::new(nonce_state); - account.set_state(&nonce_versions).unwrap(); - } - } - } else if is_fee_payer { - *account = rollback_accounts.fee_payer_account().clone(); - } } #[cfg(test)] mod tests { use { super::*, - assert_matches::assert_matches, - solana_compute_budget::compute_budget_processor::ComputeBudgetLimits, solana_sdk::{ account::{AccountSharedData, WritableAccount}, address_lookup_table::state::LookupTableMeta, - fee::FeeDetails, hash::Hash, - instruction::{CompiledInstruction, InstructionError}, + instruction::CompiledInstruction, message::{Message, MessageHeader}, native_loader, - nonce::state::Data as NonceData, - nonce_account, - rent_debits::RentDebits, - signature::{keypair_from_seed, signers::Signers, Keypair, Signer}, - system_instruction, system_program, + signature::{signers::Signers, Keypair, Signer}, transaction::{Transaction, MAX_TX_ACCOUNT_LOCKS}, }, - solana_svm::{ - account_loader::LoadedTransaction, nonce_info::NoncePartial, - transaction_results::TransactionExecutionDetails, - }, std::{ borrow::Cow, iter, @@ -862,21 +736,6 @@ mod tests { )) } - fn new_execution_result(status: Result<()>) -> TransactionExecutionResult { - TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { - status, - log_messages: None, - inner_instructions: None, - fee_details: FeeDetails::default(), - return_data: None, - executed_units: 0, - accounts_data_len_delta: 0, - }, - programs_modified_by_tx: HashMap::new(), - } - } - #[test] fn test_hold_range_in_memory() { let accounts_db = AccountsDb::default_for_tests(); @@ -1523,112 +1382,6 @@ mod tests { .is_empty()); } - #[test] - fn test_collect_accounts_to_store() { - let keypair0 = Keypair::new(); - let keypair1 = Keypair::new(); - let pubkey = solana_sdk::pubkey::new_rand(); - let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); - let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); - let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); - - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; - let message = Message::new_with_compiled_instructions( - 1, - 0, - 2, - vec![keypair0.pubkey(), pubkey, native_loader::id()], - Hash::default(), - instructions, - ); - let transaction_accounts0 = vec![ - (message.account_keys[0], account0), - (message.account_keys[1], account2.clone()), - ]; - let tx0 = new_sanitized_tx(&[&keypair0], message, Hash::default()); - - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; - let message = Message::new_with_compiled_instructions( - 1, - 0, - 2, - vec![keypair1.pubkey(), pubkey, native_loader::id()], - Hash::default(), - instructions, - ); - let transaction_accounts1 = vec![ - (message.account_keys[0], account1), - (message.account_keys[1], account2), - ]; - let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default()); - - let loaded0 = Ok(LoadedTransaction { - accounts: transaction_accounts0, - program_indices: vec![], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::default(), - compute_budget_limits: ComputeBudgetLimits::default(), - rent: 0, - rent_debits: RentDebits::default(), - loaded_accounts_data_size: 0, - }); - - let loaded1 = Ok(LoadedTransaction { - accounts: transaction_accounts1, - program_indices: vec![], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::default(), - compute_budget_limits: ComputeBudgetLimits::default(), - rent: 0, - rent_debits: RentDebits::default(), - loaded_accounts_data_size: 0, - }); - - let mut loaded = vec![loaded0, loaded1]; - - let accounts_db = AccountsDb::new_single_for_tests(); - let accounts = Accounts::new(Arc::new(accounts_db)); - { - accounts - .account_locks - .lock() - .unwrap() - .insert_new_readonly(&pubkey); - } - let txs = vec![tx0.clone(), tx1.clone()]; - let execution_results = vec![new_execution_result(Ok(())); 2]; - let (collected_accounts, transactions) = accounts.collect_accounts_to_store( - &txs, - &execution_results, - loaded.as_mut_slice(), - &DurableNonce::default(), - 0, - ); - assert_eq!(collected_accounts.len(), 2); - assert!(collected_accounts - .iter() - .any(|(pubkey, _account)| *pubkey == &keypair0.pubkey())); - assert!(collected_accounts - .iter() - .any(|(pubkey, _account)| *pubkey == &keypair1.pubkey())); - - assert_eq!(transactions.len(), 2); - assert!(transactions.iter().any(|txn| txn.unwrap().eq(&tx0))); - assert!(transactions.iter().any(|txn| txn.unwrap().eq(&tx1))); - - // Ensure readonly_lock reflects lock - assert_eq!( - *accounts - .account_locks - .lock() - .unwrap() - .readonly_locks - .get(&pubkey) - .unwrap(), - 1 - ); - } - #[test] fn huge_clean() { solana_logger::setup(); @@ -1653,324 +1406,6 @@ mod tests { accounts.accounts_db.clean_accounts_for_tests(); } - fn create_accounts_post_process_failed_tx() -> ( - Pubkey, - AccountSharedData, - AccountSharedData, - DurableNonce, - u64, - ) { - let data = NonceVersions::new(NonceState::Initialized(NonceData::default())); - let account = AccountSharedData::new_data(42, &data, &system_program::id()).unwrap(); - let mut pre_account = account.clone(); - pre_account.set_lamports(43); - let durable_nonce = DurableNonce::from_blockhash(&Hash::new(&[1u8; 32])); - (Pubkey::default(), pre_account, account, durable_nonce, 1234) - } - - fn run_post_process_failed_tx_test( - account: &mut AccountSharedData, - is_fee_payer: bool, - is_nonce_account: bool, - rollback_accounts: &RollbackAccounts, - durable_nonce: &DurableNonce, - lamports_per_signature: u64, - expect_account: &AccountSharedData, - ) -> bool { - // Verify expect_account's relationship - if !is_fee_payer { - if is_nonce_account { - assert_ne!(expect_account, rollback_accounts.nonce().unwrap().account()); - } else { - assert_eq!(expect_account, account); - } - } - - post_process_failed_tx( - account, - is_fee_payer, - is_nonce_account, - rollback_accounts, - durable_nonce, - lamports_per_signature, - ); - assert_eq!(expect_account, account); - expect_account == account - } - - #[test] - fn test_post_process_failed_tx_expected() { - let (pre_account_address, pre_account, mut post_account, blockhash, lamports_per_signature) = - create_accounts_post_process_failed_tx(); - let rollback_accounts = RollbackAccounts::SameNonceAndFeePayer { - nonce: NoncePartial::new(pre_account_address, pre_account.clone()), - }; - - let mut expect_account = pre_account; - expect_account - .set_state(&NonceVersions::new(NonceState::Initialized( - NonceData::new(Pubkey::default(), blockhash, lamports_per_signature), - ))) - .unwrap(); - - assert!(run_post_process_failed_tx_test( - &mut post_account, - false, // is_fee_payer - true, // is_nonce_account - &rollback_accounts, - &blockhash, - lamports_per_signature, - &expect_account, - )); - } - - #[test] - fn test_post_process_failed_tx_not_nonce_address() { - let (pre_account_address, pre_account, mut post_account, blockhash, lamports_per_signature) = - create_accounts_post_process_failed_tx(); - - let rollback_accounts = RollbackAccounts::SameNonceAndFeePayer { - nonce: NoncePartial::new(pre_account_address, pre_account.clone()), - }; - - let expect_account = post_account.clone(); - assert!(run_post_process_failed_tx_test( - &mut post_account, - false, // is_fee_payer - false, // is_nonce_account - &rollback_accounts, - &blockhash, - lamports_per_signature, - &expect_account, - )); - } - - #[test] - fn test_rollback_nonce_fee_payer() { - let nonce_account = AccountSharedData::new_data(1, &(), &system_program::id()).unwrap(); - let pre_fee_payer_account = - AccountSharedData::new_data(42, &(), &system_program::id()).unwrap(); - let post_fee_payer_account = - AccountSharedData::new_data(84, &[1, 2, 3, 4], &system_program::id()).unwrap(); - let rollback_accounts = RollbackAccounts::SeparateNonceAndFeePayer { - nonce: NoncePartial::new(Pubkey::new_unique(), nonce_account), - fee_payer_account: pre_fee_payer_account.clone(), - }; - - assert!(run_post_process_failed_tx_test( - &mut post_fee_payer_account.clone(), - false, // is_fee_payer - false, // is_nonce_account - &rollback_accounts, - &DurableNonce::default(), - 1, - &post_fee_payer_account, - )); - - assert!(run_post_process_failed_tx_test( - &mut post_fee_payer_account.clone(), - true, // is_fee_payer - false, // is_nonce_account - &rollback_accounts, - &DurableNonce::default(), - 1, - &pre_fee_payer_account, - )); - } - - #[test] - fn test_nonced_failure_accounts_rollback_from_pays() { - let nonce_address = Pubkey::new_unique(); - let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); - let from = keypair_from_seed(&[1; 32]).unwrap(); - let from_address = from.pubkey(); - let to_address = Pubkey::new_unique(); - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - ))); - let nonce_account_post = - AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); - let from_account_post = AccountSharedData::new(4199, 0, &Pubkey::default()); - let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); - let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); - let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); - - let instructions = vec![ - system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()), - system_instruction::transfer(&from_address, &to_address, 42), - ]; - let message = Message::new(&instructions, Some(&from_address)); - let blockhash = Hash::new_unique(); - let transaction_accounts = vec![ - (message.account_keys[0], from_account_post), - (message.account_keys[1], nonce_authority_account), - (message.account_keys[2], nonce_account_post), - (message.account_keys[3], to_account), - (message.account_keys[4], recent_blockhashes_sysvar_account), - ]; - let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); - - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - ))); - let nonce_account_pre = - AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); - let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); - - let nonce = NoncePartial::new(nonce_address, nonce_account_pre.clone()); - let loaded = Ok(LoadedTransaction { - accounts: transaction_accounts, - program_indices: vec![], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::SeparateNonceAndFeePayer { - nonce: nonce.clone(), - fee_payer_account: from_account_pre.clone(), - }, - compute_budget_limits: ComputeBudgetLimits::default(), - rent: 0, - rent_debits: RentDebits::default(), - loaded_accounts_data_size: 0, - }); - - let mut loaded = vec![loaded]; - - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts_db = AccountsDb::new_single_for_tests(); - let accounts = Accounts::new(Arc::new(accounts_db)); - let txs = vec![tx]; - let execution_results = vec![new_execution_result(Err( - TransactionError::InstructionError(1, InstructionError::InvalidArgument), - ))]; - let (collected_accounts, _) = accounts.collect_accounts_to_store( - &txs, - &execution_results, - loaded.as_mut_slice(), - &durable_nonce, - 0, - ); - assert_eq!(collected_accounts.len(), 2); - assert_eq!( - collected_accounts - .iter() - .find(|(pubkey, _account)| *pubkey == &from_address) - .map(|(_pubkey, account)| *account) - .cloned() - .unwrap(), - from_account_pre, - ); - let collected_nonce_account = collected_accounts - .iter() - .find(|(pubkey, _account)| *pubkey == &nonce_address) - .map(|(_pubkey, account)| *account) - .cloned() - .unwrap(); - assert_eq!( - collected_nonce_account.lamports(), - nonce_account_pre.lamports(), - ); - assert_matches!( - nonce_account::verify_nonce_account(&collected_nonce_account, durable_nonce.as_hash()), - Some(_) - ); - } - - #[test] - fn test_nonced_failure_accounts_rollback_nonce_pays() { - let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); - let nonce_address = nonce_authority.pubkey(); - let from = keypair_from_seed(&[1; 32]).unwrap(); - let from_address = from.pubkey(); - let to_address = Pubkey::new_unique(); - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - ))); - let nonce_account_post = - AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); - let from_account_post = AccountSharedData::new(4200, 0, &Pubkey::default()); - let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); - let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); - let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); - - let instructions = vec![ - system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()), - system_instruction::transfer(&from_address, &to_address, 42), - ]; - let message = Message::new(&instructions, Some(&nonce_address)); - let blockhash = Hash::new_unique(); - let transaction_accounts = vec![ - (message.account_keys[0], from_account_post), - (message.account_keys[1], nonce_authority_account), - (message.account_keys[2], nonce_account_post), - (message.account_keys[3], to_account), - (message.account_keys[4], recent_blockhashes_sysvar_account), - ]; - let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); - - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( - nonce_authority.pubkey(), - durable_nonce, - 0, - ))); - let nonce_account_pre = - AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); - - let nonce = NoncePartial::new(nonce_address, nonce_account_pre.clone()); - let loaded = Ok(LoadedTransaction { - accounts: transaction_accounts, - program_indices: vec![], - fee_details: FeeDetails::default(), - rollback_accounts: RollbackAccounts::SameNonceAndFeePayer { - nonce: nonce.clone(), - }, - compute_budget_limits: ComputeBudgetLimits::default(), - rent: 0, - rent_debits: RentDebits::default(), - loaded_accounts_data_size: 0, - }); - - let mut loaded = vec![loaded]; - - let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts_db = AccountsDb::new_single_for_tests(); - let accounts = Accounts::new(Arc::new(accounts_db)); - let txs = vec![tx]; - let execution_results = vec![new_execution_result(Err( - TransactionError::InstructionError(1, InstructionError::InvalidArgument), - ))]; - let (collected_accounts, _) = accounts.collect_accounts_to_store( - &txs, - &execution_results, - loaded.as_mut_slice(), - &durable_nonce, - 0, - ); - assert_eq!(collected_accounts.len(), 1); - let collected_nonce_account = collected_accounts - .iter() - .find(|(pubkey, _account)| *pubkey == &nonce_address) - .map(|(_pubkey, account)| *account) - .cloned() - .unwrap(); - assert_eq!( - collected_nonce_account.lamports(), - nonce_account_pre.lamports() - ); - assert_matches!( - nonce_account::verify_nonce_account(&collected_nonce_account, durable_nonce.as_hash()), - Some(_) - ); - } - #[test] fn test_load_largest_accounts() { let accounts_db = AccountsDb::new_single_for_tests(); diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 829c74eb63e2b7..6520d4de189d64 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -19,6 +19,7 @@ //! commit for each slot entry would be indexed. mod geyser_plugin_utils; +mod scan_account_storage; #[cfg(feature = "dev-context-only-utils")] use qualifier_attr::qualifiers; @@ -34,10 +35,11 @@ use { StorageAccess, ALIGN_BOUNDARY_OFFSET, }, accounts_hash::{ - AccountHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, AccountsHasher, - CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, IncrementalAccountsHash, - SerdeAccountsDeltaHash, SerdeAccountsHash, SerdeIncrementalAccountsHash, - ZeroLamportAccounts, + AccountHash, AccountLtHash, AccountsDeltaHash, AccountsHash, AccountsHashKind, + AccountsHasher, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, + IncrementalAccountsHash, SerdeAccountsDeltaHash, SerdeAccountsHash, + SerdeIncrementalAccountsHash, ZeroLamportAccounts, ZERO_LAMPORT_ACCOUNT_HASH, + ZERO_LAMPORT_ACCOUNT_LT_HASH, }, accounts_index::{ in_mem_accounts_index::StartupStats, AccountMapEntry, AccountSecondaryIndexes, @@ -56,22 +58,18 @@ use { }, append_vec::{ aligned_stored_size, APPEND_VEC_MMAPPED_FILES_DIRTY, APPEND_VEC_MMAPPED_FILES_OPEN, - APPEND_VEC_REOPEN_AS_FILE_IO, STORE_META_OVERHEAD, - }, - cache_hash_data::{ - CacheHashData, CacheHashDataFileReference, DeletionPolicy as CacheHashDeletionPolicy, + APPEND_VEC_OPEN_AS_FILE_IO, STORE_META_OVERHEAD, }, + cache_hash_data::{CacheHashData, DeletionPolicy as CacheHashDeletionPolicy}, contains::Contains, epoch_accounts_hash::EpochAccountsHashManager, partitioned_rewards::{PartitionedEpochRewardsConfig, TestPartitionedEpochRewards}, - pubkey_bins::PubkeyBinCalculator24, read_only_accounts_cache::ReadOnlyAccountsCache, sorted_storages::SortedStorages, storable_accounts::{StorableAccounts, StorableAccountsBySlot}, u64_align, utils, verify_accounts_hash_in_background::VerifyAccountsHashInBackground, }, - blake3::traits::digest::Digest, crossbeam_channel::{unbounded, Receiver, Sender}, dashmap::{DashMap, DashSet}, log::*, @@ -80,6 +78,7 @@ use { seqlock::SeqLock, serde::{Deserialize, Serialize}, smallvec::SmallVec, + solana_lattice_hash::lt_hash::LtHash, solana_measure::{measure::Measure, measure_us}, solana_nohash_hasher::{IntMap, IntSet}, solana_rayon_threadlimit::get_thread_count, @@ -97,7 +96,7 @@ use { std::{ borrow::Cow, boxed::Box, - collections::{hash_map, BTreeSet, HashMap, HashSet}, + collections::{BTreeSet, HashMap, HashSet}, fs, hash::{Hash as StdHash, Hasher as StdHasher}, io::Result as IoResult, @@ -114,7 +113,6 @@ use { tempfile::TempDir, }; -const PAGE_SIZE: u64 = 4 * 1024; // when the accounts write cache exceeds this many bytes, we will flush it // this can be specified on the command line, too (--accounts-db-cache-limit-mb) const WRITE_CACHE_LIMIT_BYTES_DEFAULT: u64 = 15_000_000_000; @@ -122,7 +120,7 @@ const SCAN_SLOT_PAR_ITER_THRESHOLD: usize = 4000; const UNREF_ACCOUNTS_BATCH_SIZE: usize = 10_000; -pub const DEFAULT_FILE_SIZE: u64 = PAGE_SIZE * 1024; +pub const DEFAULT_FILE_SIZE: u64 = 4 * 1024 * 1024; pub const DEFAULT_NUM_THREADS: u32 = 8; pub const DEFAULT_NUM_DIRS: u32 = 4; @@ -165,13 +163,6 @@ impl<'a> StoreTo<'a> { } } -enum ScanAccountStorageResult { - /// this data has already been scanned and cached - CacheFileAlreadyExists(CacheHashDataFileReference), - /// this data needs to be scanned and cached - CacheFileNeedsToBeCreated((String, Range)), -} - #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub(crate) enum ScanAccountStorageData { /// callback for accounts in storage will not include `data` @@ -1011,21 +1002,22 @@ impl<'a> LoadedAccount<'a> { pub fn data_len(&self) -> usize { self.data().len() } +} +impl<'a> ReadableAccount for LoadedAccount<'a> { fn lamports(&self) -> u64 { match self { LoadedAccount::Stored(stored_account_meta) => stored_account_meta.lamports(), LoadedAccount::Cached(cached_account) => cached_account.account.lamports(), } } - fn data(&self) -> &[u8] { match self { LoadedAccount::Stored(stored_account_meta) => stored_account_meta.data(), LoadedAccount::Cached(cached_account) => cached_account.account.data(), } } - pub(crate) fn owner(&self) -> &Pubkey { + fn owner(&self) -> &Pubkey { match self { LoadedAccount::Stored(stored_account_meta) => stored_account_meta.owner(), LoadedAccount::Cached(cached_account) => cached_account.account.owner(), @@ -1043,6 +1035,9 @@ impl<'a> LoadedAccount<'a> { LoadedAccount::Cached(cached_account) => cached_account.account.rent_epoch(), } } + fn to_account_shared_data(&self) -> AccountSharedData { + self.take_account() + } } #[derive(Debug)] @@ -1122,7 +1117,6 @@ impl AccountStorageEntry { return None; } - APPEND_VEC_REOPEN_AS_FILE_IO.fetch_add(1, Ordering::Relaxed); let count_and_status = self.count_and_status.lock_write(); self.accounts.reopen_as_readonly().map(|accounts| Self { id: self.id, @@ -1203,7 +1197,7 @@ impl AccountStorageEntry { self.slot } - pub fn append_vec_id(&self) -> AccountsFileId { + pub fn id(&self) -> AccountsFileId { self.id } @@ -1266,7 +1260,7 @@ impl AccountStorageEntry { count >= num_accounts, "double remove of account in slot: {}/store: {}!!", self.slot(), - self.append_vec_id(), + self.id(), ); self.alive_bytes.fetch_sub(num_bytes, Ordering::SeqCst); @@ -1391,7 +1385,7 @@ pub struct AccountsDb { /// distribute the accounts across storage lists pub next_id: AtomicAccountsFileId, - /// Set of shrinkable stores organized by map of slot to append_vec_id + /// Set of shrinkable stores organized by map of slot to storage id pub shrink_candidate_slots: Mutex, pub write_version: AtomicU64, @@ -1502,6 +1496,9 @@ pub struct AccountsDb { /// Some time later (to allow for slow calculation time), the bank hash at a slot calculated using 'M' includes the full accounts hash. /// Thus, the state of all accounts on a validator is known to be correct at least once per epoch. pub epoch_accounts_hash_manager: EpochAccountsHashManager, + + /// The latest full snapshot slot dictates how to handle zero lamport accounts + latest_full_snapshot_slot: SeqLock>, } #[derive(Debug, Default)] @@ -1922,7 +1919,7 @@ impl LatestAccountsIndexRootsStats { ), ( "append_vecs_open_as_file_io", - APPEND_VEC_REOPEN_AS_FILE_IO.load(Ordering::Relaxed), + APPEND_VEC_OPEN_AS_FILE_IO.load(Ordering::Relaxed), i64 ) ); @@ -1943,6 +1940,7 @@ struct CleanAccountsStats { remove_dead_accounts_remove_us: AtomicU64, remove_dead_accounts_shrink_us: AtomicU64, clean_stored_dead_slots_us: AtomicU64, + uncleaned_roots_slot_list_1: AtomicU64, } impl CleanAccountsStats { @@ -1967,6 +1965,8 @@ pub(crate) struct ShrinkAncientStats { pub(crate) many_ref_slots_skipped: AtomicU64, pub(crate) slots_cannot_move_count: AtomicU64, pub(crate) many_refs_old_alive: AtomicU64, + pub(crate) slots_eligible_to_shrink: AtomicU64, + pub(crate) total_dead_bytes: AtomicU64, } #[derive(Debug, Default)] @@ -2249,6 +2249,16 @@ impl ShrinkAncientStats { self.random_shrink.swap(0, Ordering::Relaxed) as i64, i64 ), + ( + "slots_eligible_to_shrink", + self.slots_eligible_to_shrink.swap(0, Ordering::Relaxed), + i64 + ), + ( + "total_dead_bytes", + self.total_dead_bytes.swap(0, Ordering::Relaxed), + i64 + ), ( "slots_considered", self.slots_considered.swap(0, Ordering::Relaxed) as i64, @@ -2338,88 +2348,6 @@ impl<'a> ZeroLamport for StoredAccountMeta<'a> { } } -/// called on a struct while scanning append vecs -trait AppendVecScan: Send + Sync + Clone { - /// return true if this pubkey should be included - fn filter(&mut self, pubkey: &Pubkey) -> bool; - /// set current slot of the scan - fn set_slot(&mut self, slot: Slot); - /// found `account` in the append vec - fn found_account(&mut self, account: &LoadedAccount); - /// scanning is done - fn scanning_complete(self) -> BinnedHashData; - /// initialize accumulator - fn init_accum(&mut self, count: usize); -} - -#[derive(Clone)] -/// state to keep while scanning append vec accounts for hash calculation -/// These would have been captured in a fn from within the scan function. -/// Some of these are constant across all pubkeys, some are constant across a slot. -/// Some could be unique per pubkey. -struct ScanState<'a> { - /// slot we're currently scanning - current_slot: Slot, - /// accumulated results - accum: BinnedHashData, - bin_calculator: &'a PubkeyBinCalculator24, - bin_range: &'a Range, - range: usize, - sort_time: Arc, - pubkey_to_bin_index: usize, -} - -impl<'a> AppendVecScan for ScanState<'a> { - fn set_slot(&mut self, slot: Slot) { - self.current_slot = slot; - } - fn filter(&mut self, pubkey: &Pubkey) -> bool { - self.pubkey_to_bin_index = self.bin_calculator.bin_from_pubkey(pubkey); - self.bin_range.contains(&self.pubkey_to_bin_index) - } - fn init_accum(&mut self, count: usize) { - if self.accum.is_empty() { - self.accum.append(&mut vec![Vec::new(); count]); - } - } - fn found_account(&mut self, loaded_account: &LoadedAccount) { - let pubkey = loaded_account.pubkey(); - assert!(self.bin_range.contains(&self.pubkey_to_bin_index)); // get rid of this once we have confidence - - // when we are scanning with bin ranges, we don't need to use exact bin numbers. - // Subtract to make first bin we care about at index 0. - self.pubkey_to_bin_index -= self.bin_range.start; - - let balance = loaded_account.lamports(); - let mut account_hash = loaded_account.loaded_hash(); - - let hash_is_missing = account_hash == AccountHash(Hash::default()); - if hash_is_missing { - let computed_hash = AccountsDb::hash_account_data( - loaded_account.lamports(), - loaded_account.owner(), - loaded_account.executable(), - loaded_account.rent_epoch(), - loaded_account.data(), - loaded_account.pubkey(), - ); - account_hash = computed_hash; - } - let source_item = CalculateHashIntermediate { - hash: account_hash, - lamports: balance, - pubkey: *pubkey, - }; - self.init_accum(self.range); - self.accum[self.pubkey_to_bin_index].push(source_item); - } - fn scanning_complete(mut self) -> BinnedHashData { - let timing = AccountsDb::sort_slot_storage_scan(&mut self.accum); - self.sort_time.fetch_add(timing, Ordering::Relaxed); - self.accum - } -} - #[derive(Clone, Debug, Eq, PartialEq)] pub struct PubkeyHashAccount { pub pubkey: Pubkey, @@ -2536,6 +2464,7 @@ impl AccountsDb { partitioned_epoch_rewards_config: PartitionedEpochRewardsConfig::default(), epoch_accounts_hash_manager: EpochAccountsHashManager::new_invalid(), test_skip_rewrites_but_include_in_bank_hash: false, + latest_full_snapshot_slot: SeqLock::new(None), } } @@ -2825,13 +2754,12 @@ impl AccountsDb { // do not match the criteria of deleting all appendvecs which contain them // then increment their storage count. let mut already_counted = IntSet::default(); - for (pubkey, (account_infos, ref_count_from_storage)) in purges.iter() { + for (pubkey, (slot_list, ref_count)) in purges.iter() { let mut failed_slot = None; - let all_stores_being_deleted = - account_infos.len() as RefCount == *ref_count_from_storage; + let all_stores_being_deleted = slot_list.len() as RefCount == *ref_count; if all_stores_being_deleted { let mut delete = true; - for (slot, _account_info) in account_infos { + for (slot, _account_info) in slot_list { if let Some(count) = store_counts.get(slot).map(|s| s.0) { debug!( "calc_delete_dependencies() @@ -2858,19 +2786,19 @@ impl AccountsDb { debug!( "calc_delete_dependencies(), pubkey: {}, - account_infos: {:?}, - account_infos_len: {}, - ref_count_from_storage: {}", + slot_list: {:?}, + slot_list_len: {}, + ref_count: {}", pubkey, - account_infos, - account_infos.len(), - ref_count_from_storage, + slot_list, + slot_list.len(), + ref_count, ); } // increment store_counts to non-zero for all stores that can not be deleted. let mut pending_stores = IntSet::default(); - for (slot, _account_info) in account_infos { + for (slot, _account_info) in slot_list { if !already_counted.contains(slot) { pending_stores.insert(*slot); } @@ -2881,7 +2809,7 @@ impl AccountsDb { if let Some(failed_slot) = failed_slot.take() { info!("calc_delete_dependencies, oldest slot is not able to be deleted because of {pubkey} in slot {failed_slot}"); } else { - info!("calc_delete_dependencies, oldest slot is not able to be deleted because of {pubkey}, account infos len: {}, ref count: {ref_count_from_storage}", account_infos.len()); + info!("calc_delete_dependencies, oldest slot is not able to be deleted because of {pubkey}, slot list len: {}, ref count: {ref_count}", slot_list.len()); } } @@ -3067,7 +2995,6 @@ impl AccountsDb { &self, max_clean_root_inclusive: Option, is_startup: bool, - last_full_snapshot_slot: Option, timings: &mut CleanKeyTimings, epoch_schedule: &EpochSchedule, ) -> (Vec, Option) { @@ -3158,16 +3085,17 @@ impl AccountsDb { timings.hashset_to_vec_us += hashset_to_vec.as_us(); // Check if we should purge any of the zero_lamport_accounts_to_purge_later, based on the - // last_full_snapshot_slot. + // latest_full_snapshot_slot. + let latest_full_snapshot_slot = self.latest_full_snapshot_slot(); assert!( - last_full_snapshot_slot.is_some() || self.zero_lamport_accounts_to_purge_after_full_snapshot.is_empty(), + latest_full_snapshot_slot.is_some() || self.zero_lamport_accounts_to_purge_after_full_snapshot.is_empty(), "if snapshots are disabled, then zero_lamport_accounts_to_purge_later should always be empty" ); - if let Some(last_full_snapshot_slot) = last_full_snapshot_slot { + if let Some(latest_full_snapshot_slot) = latest_full_snapshot_slot { self.zero_lamport_accounts_to_purge_after_full_snapshot .retain(|(slot, pubkey)| { let is_candidate_for_clean = - max_slot_inclusive >= *slot && last_full_snapshot_slot >= *slot; + max_slot_inclusive >= *slot && latest_full_snapshot_slot >= *slot; if is_candidate_for_clean { pubkeys.push(*pubkey); } @@ -3180,7 +3108,7 @@ impl AccountsDb { /// Call clean_accounts() with the common parameters that tests/benches use. pub fn clean_accounts_for_tests(&self) { - self.clean_accounts(None, false, None, &EpochSchedule::default()) + self.clean_accounts(None, false, &EpochSchedule::default()) } /// called with cli argument to verify refcounts are correct on all accounts @@ -3262,7 +3190,6 @@ impl AccountsDb { &self, max_clean_root_inclusive: Option, is_startup: bool, - last_full_snapshot_slot: Option, epoch_schedule: &EpochSchedule, ) { if self.exhaustively_verify_refcounts { @@ -3279,24 +3206,23 @@ impl AccountsDb { self.report_store_stats(); let mut key_timings = CleanKeyTimings::default(); - let (mut pubkeys, min_dirty_slot) = self.construct_candidate_clean_keys( + let (mut candidates, min_dirty_slot) = self.construct_candidate_clean_keys( max_clean_root_inclusive, is_startup, - last_full_snapshot_slot, &mut key_timings, epoch_schedule, ); let mut sort = Measure::start("sort"); if is_startup { - pubkeys.par_sort_unstable(); + candidates.par_sort_unstable(); } else { self.thread_pool_clean - .install(|| pubkeys.par_sort_unstable()); + .install(|| candidates.par_sort_unstable()); } sort.stop(); - let total_keys_count = pubkeys.len(); + let num_candidates = candidates.len(); let mut accounts_scan = Measure::start("accounts_scan"); let uncleaned_roots = self.accounts_index.clone_uncleaned_roots(); let found_not_zero_accum = AtomicU64::new(0); @@ -3307,9 +3233,9 @@ impl AccountsDb { // parallel scan the index. let (mut purges_zero_lamports, purges_old_accounts) = { let do_clean_scan = || { - pubkeys + candidates .par_chunks(4096) - .map(|pubkeys: &[Pubkey]| { + .map(|candidates: &[Pubkey]| { let mut purges_zero_lamports = HashMap::new(); let mut purges_old_accounts = Vec::new(); let mut found_not_zero = 0; @@ -3317,10 +3243,11 @@ impl AccountsDb { let mut missing = 0; let mut useful = 0; self.accounts_index.scan( - pubkeys.iter(), - |pubkey, slots_refs, _entry| { + candidates.iter(), + |candidate, slot_list_and_ref_count, _entry| { let mut useless = true; - if let Some((slot_list, ref_count)) = slots_refs { + if let Some((slot_list, ref_count)) = slot_list_and_ref_count { + // find the highest rooted slot in the slot list let index_in_slot_list = self.accounts_index.latest_slot( None, slot_list, @@ -3334,9 +3261,12 @@ impl AccountsDb { &slot_list[index_in_slot_list]; if account_info.is_zero_lamport() { useless = false; + // the latest one is zero lamports. we may be able to purge it. + // so, add to purges_zero_lamports purges_zero_lamports.insert( - *pubkey, + *candidate, ( + // add all the rooted entries that contain this pubkey. we know the highest rooted entry is zero lamports self.accounts_index.get_rooted_entries( slot_list, max_clean_root_inclusive, @@ -3355,8 +3285,15 @@ impl AccountsDb { { assert!(slot <= &max_clean_root_inclusive); } - purges_old_accounts.push(*pubkey); - useless = false; + if slot_list.len() > 1 { + // no need to purge old accounts if there is only 1 slot in the slot list + purges_old_accounts.push(*candidate); + useless = false; + } else { + self.clean_accounts_stats + .uncleaned_roots_slot_list_1 + .fetch_add(1, Ordering::Relaxed); + } } } None => { @@ -3368,7 +3305,7 @@ impl AccountsDb { // touched in must be unrooted. not_found_on_fork += 1; useless = false; - purges_old_accounts.push(*pubkey); + purges_old_accounts.push(*candidate); } } } else { @@ -3377,11 +3314,7 @@ impl AccountsDb { if !useless { useful += 1; } - if useless { - AccountsIndexScanResult::OnlyKeepInMemoryIfDirty - } else { - AccountsIndexScanResult::KeepInMemory - } + AccountsIndexScanResult::OnlyKeepInMemoryIfDirty }, None, false, @@ -3394,11 +3327,11 @@ impl AccountsDb { }) .reduce( || (HashMap::new(), Vec::new()), - |mut m1, m2| { + |mut a, b| { // Collapse down the hashmaps/vecs into one. - m1.0.extend(m2.0); - m1.1.extend(m2.1); - m1 + a.0.extend(b.0); + a.1.extend(b.1); + a }, ) }; @@ -3427,13 +3360,13 @@ impl AccountsDb { // Calculate store counts as if everything was purged // Then purge if we can let mut store_counts: HashMap)> = HashMap::new(); - for (key, (account_infos, ref_count)) in purges_zero_lamports.iter_mut() { - if purged_account_slots.contains_key(key) { - *ref_count = self.accounts_index.ref_count_from_storage(key); + for (pubkey, (slot_list, ref_count)) in purges_zero_lamports.iter_mut() { + if purged_account_slots.contains_key(pubkey) { + *ref_count = self.accounts_index.ref_count_from_storage(pubkey); } - account_infos.retain(|(slot, account_info)| { + slot_list.retain(|(slot, account_info)| { let was_slot_purged = purged_account_slots - .get(key) + .get(pubkey) .map(|slots_removed| slots_removed.contains(slot)) .unwrap_or(false); if was_slot_purged { @@ -3452,14 +3385,14 @@ impl AccountsDb { } if let Some(store_count) = store_counts.get_mut(slot) { store_count.0 -= 1; - store_count.1.insert(*key); + store_count.1.insert(*pubkey); } else { let mut key_set = HashSet::new(); - key_set.insert(*key); + key_set.insert(*pubkey); assert!( !account_info.is_cached(), "The Accounts Cache must be flushed first for this account info. pubkey: {}, slot: {}", - *key, + *pubkey, *slot ); let count = self @@ -3486,7 +3419,6 @@ impl AccountsDb { let mut purge_filter = Measure::start("purge_filter"); self.filter_zero_lamport_clean_for_incremental_snapshots( max_clean_root_inclusive, - last_full_snapshot_slot, &store_counts, &mut purges_zero_lamports, ); @@ -3561,7 +3493,7 @@ impl AccountsDb { ("dirty_pubkeys_count", key_timings.dirty_pubkeys_count, i64), ("sort_us", sort.as_us(), i64), ("useful_keys", useful_accum.load(Ordering::Relaxed), i64), - ("total_keys_count", total_keys_count, i64), + ("total_keys_count", num_candidates, i64), ( "scan_found_not_zero", found_not_zero_accum.load(Ordering::Relaxed), @@ -3574,6 +3506,13 @@ impl AccountsDb { ), ("scan_missing", missing_accum.load(Ordering::Relaxed), i64), ("uncleaned_roots_len", uncleaned_roots.len(), i64), + ( + "uncleaned_roots_slot_list_1", + self.clean_accounts_stats + .uncleaned_roots_slot_list_1 + .swap(0, Ordering::Relaxed), + i64 + ), ( "clean_old_root_us", self.clean_accounts_stats @@ -3621,6 +3560,13 @@ impl AccountsDb { self.accounts_index.roots_added.swap(0, Ordering::Relaxed), i64 ), + ( + "purge_older_root_entries_one_slot_list", + self.accounts_index + .purge_older_root_entries_one_slot_list + .swap(0, Ordering::Relaxed), + i64 + ), ( "roots_removed", self.accounts_index.roots_removed.swap(0, Ordering::Relaxed), @@ -3718,7 +3664,7 @@ impl AccountsDb { /// get purged. Filter out those accounts here by removing them from 'purges_zero_lamports' /// /// When using incremental snapshots, do not purge zero-lamport accounts if the slot is higher - /// than the last full snapshot slot. This is to protect against the following scenario: + /// than the latest full snapshot slot. This is to protect against the following scenario: /// /// ```text /// A full snapshot is taken, including account 'alpha' with a non-zero balance. In a later slot, @@ -3733,19 +3679,19 @@ impl AccountsDb { /// balance. Very bad! /// ``` /// - /// This filtering step can be skipped if there is no `last_full_snapshot_slot`, or if the - /// `max_clean_root_inclusive` is less-than-or-equal-to the `last_full_snapshot_slot`. + /// This filtering step can be skipped if there is no `latest_full_snapshot_slot`, or if the + /// `max_clean_root_inclusive` is less-than-or-equal-to the `latest_full_snapshot_slot`. fn filter_zero_lamport_clean_for_incremental_snapshots( &self, max_clean_root_inclusive: Option, - last_full_snapshot_slot: Option, store_counts: &HashMap)>, purges_zero_lamports: &mut HashMap, RefCount)>, ) { + let latest_full_snapshot_slot = self.latest_full_snapshot_slot(); let should_filter_for_incremental_snapshots = max_clean_root_inclusive.unwrap_or(Slot::MAX) - > last_full_snapshot_slot.unwrap_or(Slot::MAX); + > latest_full_snapshot_slot.unwrap_or(Slot::MAX); assert!( - last_full_snapshot_slot.is_some() || !should_filter_for_incremental_snapshots, + latest_full_snapshot_slot.is_some() || !should_filter_for_incremental_snapshots, "if filtering for incremental snapshots, then snapshots should be enabled", ); @@ -3780,7 +3726,7 @@ impl AccountsDb { // not get purged here are added to a list so they be considered for purging later // (i.e. after the next full snapshot). assert!(account_info.is_zero_lamport()); - let cannot_purge = *slot > last_full_snapshot_slot.unwrap(); + let cannot_purge = *slot > latest_full_snapshot_slot.unwrap(); if cannot_purge { self.zero_lamport_accounts_to_purge_after_full_snapshot .insert((*slot, *pubkey)); @@ -3873,7 +3819,7 @@ impl AccountsDb { dead += 1; } else { // Hold onto the index entry arc so that it cannot be flushed. - // Since we are shrinking these entries, we need to disambiguate append_vec_ids during this period and those only exist in the in-memory accounts index. + // Since we are shrinking these entries, we need to disambiguate storage ids during this period and those only exist in the in-memory accounts index. index_entries_being_shrunk.push(Arc::clone(entry.unwrap())); all_are_zero_lamports &= stored_account.is_zero_lamport(); alive_accounts.add(ref_count, stored_account, slot_list); @@ -4041,18 +3987,16 @@ impl AccountsDb { let alive_total_bytes = alive_accounts.alive_bytes(); - let aligned_total_bytes: u64 = Self::page_align(alive_total_bytes as u64); - stats .accounts_removed .fetch_add(len - alive_accounts.len(), Ordering::Relaxed); stats.bytes_removed.fetch_add( - capacity.saturating_sub(aligned_total_bytes), + capacity.saturating_sub(alive_total_bytes as u64), Ordering::Relaxed, ); stats .bytes_written - .fetch_add(aligned_total_bytes, Ordering::Relaxed); + .fetch_add(alive_total_bytes as u64, Ordering::Relaxed); ShrinkCollect { slot, @@ -4129,12 +4073,18 @@ impl AccountsDb { let shrink_collect = self.shrink_collect::>(store, &unique_accounts, &self.shrink_stats); - // This shouldn't happen if alive_bytes/approx_stored_count are accurate + // This shouldn't happen if alive_bytes/approx_stored_count are accurate. + // However, it is possible that the remaining alive bytes could be 0. In that case, the whole slot should be marked dead by clean. if Self::should_not_shrink( shrink_collect.alive_total_bytes as u64, shrink_collect.capacity, - ) { - warn!( + ) || shrink_collect.alive_total_bytes == 0 + { + if shrink_collect.alive_total_bytes == 0 { + // clean needs to take care of this dead slot + self.accounts_index.add_uncleaned_roots([slot]); + } + info!( "Unexpected shrink for slot {} alive {} capacity {}, \ likely caused by a bug for calculating alive bytes.", slot, shrink_collect.alive_total_bytes, shrink_collect.capacity @@ -4182,39 +4132,36 @@ impl AccountsDb { let mut stats_sub = ShrinkStatsSub::default(); let mut rewrite_elapsed = Measure::start("rewrite_elapsed"); - if shrink_collect.alive_total_bytes > 0 { - let (shrink_in_progress, time_us) = measure_us!( - self.get_store_for_shrink(slot, shrink_collect.alive_total_bytes as u64) - ); - stats_sub.create_and_insert_store_elapsed_us = Saturating(time_us); - - // here, we're writing back alive_accounts. That should be an atomic operation - // without use of rather wide locks in this whole function, because we're - // mutating rooted slots; There should be no writers to them. - let accounts = [(slot, &shrink_collect.alive_accounts.alive_accounts()[..])]; - let storable_accounts = StorableAccountsBySlot::new(slot, &accounts, self); - stats_sub.store_accounts_timing = - self.store_accounts_frozen(storable_accounts, shrink_in_progress.new_storage()); - - rewrite_elapsed.stop(); - stats_sub.rewrite_elapsed_us = Saturating(rewrite_elapsed.as_us()); - - // `store_accounts_frozen()` above may have purged accounts from some - // other storage entries (the ones that were just overwritten by this - // new storage entry). This means some of those stores might have caused - // this slot to be read to `self.shrink_candidate_slots`, so delete - // those here - self.shrink_candidate_slots.lock().unwrap().remove(&slot); - - self.remove_old_stores_shrink( - &shrink_collect, - &self.shrink_stats, - Some(shrink_in_progress), - false, - ); + let (shrink_in_progress, time_us) = + measure_us!(self.get_store_for_shrink(slot, shrink_collect.alive_total_bytes as u64)); + stats_sub.create_and_insert_store_elapsed_us = Saturating(time_us); + + // here, we're writing back alive_accounts. That should be an atomic operation + // without use of rather wide locks in this whole function, because we're + // mutating rooted slots; There should be no writers to them. + let accounts = [(slot, &shrink_collect.alive_accounts.alive_accounts()[..])]; + let storable_accounts = StorableAccountsBySlot::new(slot, &accounts, self); + stats_sub.store_accounts_timing = + self.store_accounts_frozen(storable_accounts, shrink_in_progress.new_storage()); - self.reopen_storage_as_readonly_shrinking_in_progress_ok(slot); - } + rewrite_elapsed.stop(); + stats_sub.rewrite_elapsed_us = Saturating(rewrite_elapsed.as_us()); + + // `store_accounts_frozen()` above may have purged accounts from some + // other storage entries (the ones that were just overwritten by this + // new storage entry). This means some of those stores might have caused + // this slot to be read to `self.shrink_candidate_slots`, so delete + // those here + self.shrink_candidate_slots.lock().unwrap().remove(&slot); + + self.remove_old_stores_shrink( + &shrink_collect, + &self.shrink_stats, + Some(shrink_in_progress), + false, + ); + + self.reopen_storage_as_readonly_shrinking_in_progress_ok(slot); Self::update_shrink_stats(&self.shrink_stats, stats_sub, true); self.shrink_stats.report(); @@ -4302,7 +4249,7 @@ impl AccountsDb { // which could return (slot, append vec id). We want the lookup for the storage to get a storage // that works whether the lookup occurs before or after the replace call here. // So, the two storages have to be exactly equivalent wrt offsets, counts, len, id, etc. - assert_eq!(storage.append_vec_id(), new_storage.append_vec_id()); + assert_eq!(storage.id(), new_storage.id()); assert_eq!(storage.accounts.len(), new_storage.accounts.len()); self.storage .replace_storage_with_equivalent(slot, Arc::new(new_storage)); @@ -4310,10 +4257,9 @@ impl AccountsDb { } } - /// return a store that can contain 'aligned_total' bytes - pub fn get_store_for_shrink(&self, slot: Slot, aligned_total: u64) -> ShrinkInProgress<'_> { - let shrunken_store = - self.create_store(slot, aligned_total, "shrink", self.shrink_paths.as_slice()); + /// return a store that can contain 'size' bytes + pub fn get_store_for_shrink(&self, slot: Slot, size: u64) -> ShrinkInProgress<'_> { + let shrunken_store = self.create_store(slot, size, "shrink", self.shrink_paths.as_slice()); self.storage.shrinking_in_progress(slot, shrunken_store) } @@ -4373,10 +4319,10 @@ impl AccountsDb { continue; }; candidates_count += 1; - total_alive_bytes += Self::page_align(store.alive_bytes() as u64); + let alive_bytes = store.alive_bytes(); + total_alive_bytes += alive_bytes as u64; total_bytes += store.capacity(); - let alive_ratio = - Self::page_align(store.alive_bytes() as u64) as f64 / store.capacity() as f64; + let alive_ratio = alive_bytes as f64 / store.capacity() as f64; store_usage.push(StoreUsageInfo { slot: *slot, alive_ratio, @@ -4398,7 +4344,7 @@ impl AccountsDb { let store = &usage.store; let alive_ratio = (total_alive_bytes as f64) / (total_bytes as f64); debug!("alive_ratio: {:?} store_id: {:?}, store_ratio: {:?} requirement: {:?}, total_bytes: {:?} total_alive_bytes: {:?}", - alive_ratio, usage.store.append_vec_id(), usage.alive_ratio, shrink_ratio, total_bytes, total_alive_bytes); + alive_ratio, usage.store.id(), usage.alive_ratio, shrink_ratio, total_bytes, total_alive_bytes); if alive_ratio > shrink_ratio { // we have reached our goal, stop debug!( @@ -4413,7 +4359,7 @@ impl AccountsDb { } } else { let current_store_size = store.capacity(); - let after_shrink_size = Self::page_align(store.alive_bytes() as u64); + let after_shrink_size = store.alive_bytes() as u64; let bytes_saved = current_store_size.saturating_sub(after_shrink_size); total_bytes -= bytes_saved; shrink_slots.insert(usage.slot, Arc::clone(store)); @@ -4872,7 +4818,8 @@ impl AccountsDb { return 0; } - let _guard = self.active_stats.activate(ActiveStatItem::Shrink); + let _guard = (!shrink_slots.is_empty()) + .then_some(|| self.active_stats.activate(ActiveStatItem::Shrink)); let mut measure_shrink_all_candidates = Measure::start("shrink_all_candidate_slots-ms"); let num_candidates = shrink_slots.len(); @@ -4906,17 +4853,42 @@ impl AccountsDb { num_candidates } + /// This is only called at startup from bank when we are being extra careful such as when we downloaded a snapshot. + /// Also called from tests. + /// `newest_slot_skip_shrink_inclusive` is used to avoid shrinking the slot we are loading a snapshot from. If we shrink that slot, we affect + /// the bank hash calculation verification at startup. pub fn shrink_all_slots( &self, is_startup: bool, - last_full_snapshot_slot: Option, epoch_schedule: &EpochSchedule, + newest_slot_skip_shrink_inclusive: Option, ) { let _guard = self.active_stats.activate(ActiveStatItem::Shrink); const DIRTY_STORES_CLEANING_THRESHOLD: usize = 10_000; const OUTER_CHUNK_SIZE: usize = 2000; + let mut slots = self.all_slots_in_storage(); + if let Some(newest_slot_skip_shrink_inclusive) = newest_slot_skip_shrink_inclusive { + // at startup, we cannot shrink the slot that we're about to replay and recalculate bank hash for. + // That storage's contents are used to verify the bank hash (and accounts delta hash) of the startup slot. + slots.retain(|slot| slot < &newest_slot_skip_shrink_inclusive); + } + + // if we are restoring from incremental + full snapshot, then we cannot clean past latest_full_snapshot_slot. + // If we were to clean past that, then we could mark accounts prior to latest_full_snapshot_slot as dead. + // If we mark accounts prior to latest_full_snapshot_slot as dead, then we could shrink those accounts away. + // If we shrink accounts away, then when we run the full hash of all accounts calculation up to latest_full_snapshot_slot, + // then we will get the wrong answer, because some accounts may be GONE from the slot range up to latest_full_snapshot_slot. + // So, we can only clean UP TO and including latest_full_snapshot_slot. + // As long as we don't mark anything as dead at slots > latest_full_snapshot_slot, then shrink will have nothing to do for + // slots > latest_full_snapshot_slot. + let maybe_clean = || { + if self.dirty_stores.len() > DIRTY_STORES_CLEANING_THRESHOLD { + let latest_full_snapshot_slot = self.latest_full_snapshot_slot(); + self.clean_accounts(latest_full_snapshot_slot, is_startup, epoch_schedule); + } + }; + if is_startup { - let slots = self.all_slots_in_storage(); let threads = num_cpus::get(); let inner_chunk_size = std::cmp::max(OUTER_CHUNK_SIZE / threads, 1); slots.chunks(OUTER_CHUNK_SIZE).for_each(|chunk| { @@ -4925,16 +4897,12 @@ impl AccountsDb { self.shrink_slot_forced(*slot); } }); - if self.dirty_stores.len() > DIRTY_STORES_CLEANING_THRESHOLD { - self.clean_accounts(None, is_startup, last_full_snapshot_slot, epoch_schedule); - } + maybe_clean(); }); } else { - for slot in self.all_slots_in_storage() { + for slot in slots { self.shrink_slot_forced(slot); - if self.dirty_stores.len() > DIRTY_STORES_CLEANING_THRESHOLD { - self.clean_accounts(None, is_startup, last_full_snapshot_slot, epoch_schedule); - } + maybe_clean(); } } } @@ -5763,10 +5731,6 @@ impl AccountsDb { store } - pub fn page_align(size: u64) -> u64 { - (size + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1) - } - fn has_space_available(&self, slot: Slot, size: u64) -> bool { let store = self.storage.get_slot_storage_entry(slot).unwrap(); if store.status() == AccountStorageStatus::Available @@ -5792,7 +5756,7 @@ impl AccountsDb { debug!( "creating store: {} slot: {} len: {} size: {} from: {} path: {}", - store.append_vec_id(), + store.id(), slot, store.accounts.len(), store.accounts.capacity(), @@ -6194,28 +6158,30 @@ impl AccountsDb { } } - pub fn hash_account(account: &T, pubkey: &Pubkey) -> AccountHash { - Self::hash_account_data( - account.lamports(), - account.owner(), - account.executable(), - account.rent_epoch(), - account.data(), - pubkey, - ) + /// Calculates the `AccountLtHash` of `account` + pub fn lt_hash_account(account: &impl ReadableAccount, pubkey: &Pubkey) -> AccountLtHash { + if account.lamports() == 0 { + return ZERO_LAMPORT_ACCOUNT_LT_HASH; + } + + let hasher = Self::hash_account_helper(account, pubkey); + let lt_hash = LtHash::with(&hasher); + AccountLtHash(lt_hash) } - fn hash_account_data( - lamports: u64, - owner: &Pubkey, - executable: bool, - rent_epoch: Epoch, - data: &[u8], - pubkey: &Pubkey, - ) -> AccountHash { - if lamports == 0 { - return AccountHash(Hash::default()); + /// Calculates the `AccountHash` of `account` + pub fn hash_account(account: &T, pubkey: &Pubkey) -> AccountHash { + if account.lamports() == 0 { + return ZERO_LAMPORT_ACCOUNT_HASH; } + + let hasher = Self::hash_account_helper(account, pubkey); + let hash = Hash::new_from_array(hasher.finalize().into()); + AccountHash(hash) + } + + /// Hashes `account` and returns the underlying Hasher + fn hash_account_helper(account: &impl ReadableAccount, pubkey: &Pubkey) -> blake3::Hasher { let mut hasher = blake3::Hasher::new(); // allocate a buffer on the stack that's big enough @@ -6226,9 +6192,10 @@ impl AccountsDb { let mut buffer = SmallVec::<[u8; BUFFER_SIZE]>::new(); // collect lamports, rent_epoch into buffer to hash - buffer.extend_from_slice(&lamports.to_le_bytes()); - buffer.extend_from_slice(&rent_epoch.to_le_bytes()); + buffer.extend_from_slice(&account.lamports().to_le_bytes()); + buffer.extend_from_slice(&account.rent_epoch().to_le_bytes()); + let data = account.data(); if data.len() > DATA_SIZE { // For larger accounts whose data can't fit into the buffer, update the hash now. hasher.update(&buffer); @@ -6242,12 +6209,12 @@ impl AccountsDb { } // collect exec_flag, owner, pubkey into buffer to hash - buffer.push(executable.into()); - buffer.extend_from_slice(owner.as_ref()); + buffer.push(account.executable().into()); + buffer.extend_from_slice(account.owner().as_ref()); buffer.extend_from_slice(pubkey.as_ref()); hasher.update(&buffer); - AccountHash(Hash::new_from_array(hasher.finalize().into())) + hasher } fn write_accounts_to_storage<'a>( @@ -6291,7 +6258,7 @@ impl AccountsDb { continue; }; - let store_id = storage.append_vec_id(); + let store_id = storage.id(); for (i, offset) in stored_accounts_info.offsets.iter().enumerate() { infos.push(AccountInfo::new( StorageLocation::AppendVec(store_id, *offset), @@ -6765,7 +6732,7 @@ impl AccountsDb { oldest_slot = std::cmp::min(oldest_slot, slot); - total_alive_bytes += Self::page_align(store.alive_bytes() as u64); + total_alive_bytes += store.alive_bytes(); total_bytes += store.capacity(); } info!( @@ -6874,12 +6841,8 @@ impl AccountsDb { let hash_is_missing = loaded_hash == AccountHash(Hash::default()); if hash_is_missing { - let computed_hash = Self::hash_account_data( - loaded_account.lamports(), - loaded_account.owner(), - loaded_account.executable(), - loaded_account.rent_epoch(), - loaded_account.data(), + let computed_hash = Self::hash_account( + &loaded_account, loaded_account.pubkey(), ); loaded_hash = computed_hash; @@ -6942,18 +6905,6 @@ impl AccountsDb { ) } - /// iterate over a single storage, calling scanner on each item - fn scan_single_account_storage(storage: &AccountStorageEntry, scanner: &mut S) - where - S: AppendVecScan, - { - storage.accounts.scan_accounts(|account| { - if scanner.filter(account.pubkey()) { - scanner.found_account(&LoadedAccount::Stored(account)) - } - }); - } - fn update_old_slot_stats(&self, stats: &HashStats, storage: Option<&Arc>) { if let Some(storage) = storage { stats.roots_older_than_epoch.fetch_add(1, Ordering::Relaxed); @@ -7034,181 +6985,6 @@ impl AccountsDb { true } - /// Scan through all the account storage in parallel. - /// Returns a Vec of opened files. - /// Each file has serialized hash info, sorted by pubkey and then slot, from scanning the append vecs. - /// A single pubkey could be in multiple entries. The pubkey found in the latest entry is the one to use. - fn scan_account_storage_no_bank( - &self, - cache_hash_data: &CacheHashData, - config: &CalcAccountsHashConfig<'_>, - snapshot_storages: &SortedStorages, - scanner: S, - bin_range: &Range, - stats: &mut HashStats, - ) -> Vec - where - S: AppendVecScan, - { - let oldest_non_ancient_slot = self.get_oldest_non_ancient_slot_for_hash_calc_scan( - snapshot_storages.max_slot_inclusive(), - config, - ); - let splitter = SplitAncientStorages::new(oldest_non_ancient_slot, snapshot_storages); - - let slots_per_epoch = config - .rent_collector - .epoch_schedule - .get_slots_in_epoch(config.rent_collector.epoch); - let one_epoch_old = snapshot_storages - .range() - .end - .saturating_sub(slots_per_epoch); - - stats.scan_chunks = splitter.chunk_count; - - let cache_files = (0..splitter.chunk_count) - .into_par_iter() - .filter_map(|chunk| { - let range_this_chunk = splitter.get_slot_range(chunk)?; - - let mut load_from_cache = true; - let mut hasher = hash_map::DefaultHasher::new(); - bin_range.start.hash(&mut hasher); - bin_range.end.hash(&mut hasher); - let is_first_scan_pass = bin_range.start == 0; - - // calculate hash representing all storages in this chunk - let mut empty = true; - for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { - empty = false; - if is_first_scan_pass && slot < one_epoch_old { - self.update_old_slot_stats(stats, storage); - } - if let Some(storage) = storage { - let ok = Self::hash_storage_info(&mut hasher, storage, slot); - if !ok { - load_from_cache = false; - break; - } - } - } - if empty { - return None; - } - // we have a hash value for the storages in this chunk - // so, build a file name: - let hash = hasher.finish(); - let file_name = format!( - "{}.{}.{}.{}.{:016x}", - range_this_chunk.start, - range_this_chunk.end, - bin_range.start, - bin_range.end, - hash - ); - if load_from_cache { - if let Ok(mapped_file) = - cache_hash_data.get_file_reference_to_map_later(&file_name) - { - return Some(ScanAccountStorageResult::CacheFileAlreadyExists( - mapped_file, - )); - } - } - - // fall through and load normally - we failed to load from a cache file but there are storages present - Some(ScanAccountStorageResult::CacheFileNeedsToBeCreated(( - file_name, - range_this_chunk, - ))) - }) - .collect::>(); - - // Calculate the hits and misses of the hash data files cache. - // This is outside of the parallel loop above so that we only need to - // update each atomic stat value once. - // There are approximately 173 items in the cache files list, - // so should be very fast to iterate and compute. - // (173 cache files == 432,000 slots / 2,500 slots-per-cache-file) - let mut hits = 0; - let mut misses = 0; - for cache_file in &cache_files { - match cache_file { - ScanAccountStorageResult::CacheFileAlreadyExists(_) => hits += 1, - ScanAccountStorageResult::CacheFileNeedsToBeCreated(_) => misses += 1, - }; - } - cache_hash_data - .stats - .hits - .fetch_add(hits, Ordering::Relaxed); - cache_hash_data - .stats - .misses - .fetch_add(misses, Ordering::Relaxed); - - // deletes the old files that will not be used before creating new ones - cache_hash_data.delete_old_cache_files(); - - cache_files - .into_par_iter() - .map(|chunk| { - match chunk { - ScanAccountStorageResult::CacheFileAlreadyExists(file) => Some(file), - ScanAccountStorageResult::CacheFileNeedsToBeCreated(( - file_name, - range_this_chunk, - )) => { - let mut scanner = scanner.clone(); - let mut init_accum = true; - // load from cache failed, so create the cache file for this chunk - for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { - let ancient = - oldest_non_ancient_slot.is_some_and(|oldest_non_ancient_slot| { - slot < oldest_non_ancient_slot - }); - - let (_, scan_us) = measure_us!(if let Some(storage) = storage { - if init_accum { - let range = bin_range.end - bin_range.start; - scanner.init_accum(range); - init_accum = false; - } - scanner.set_slot(slot); - - Self::scan_single_account_storage(storage, &mut scanner); - }); - if ancient { - stats - .sum_ancient_scans_us - .fetch_add(scan_us, Ordering::Relaxed); - stats.count_ancient_scans.fetch_add(1, Ordering::Relaxed); - stats - .longest_ancient_scan_us - .fetch_max(scan_us, Ordering::Relaxed); - } - } - (!init_accum) - .then(|| { - let r = scanner.scanning_complete(); - assert!(!file_name.is_empty()); - (!r.is_empty() && r.iter().any(|b| !b.is_empty())).then(|| { - // error if we can't write this - cache_hash_data.save(&file_name, &r).unwrap(); - cache_hash_data - .get_file_reference_to_map_later(&file_name) - .unwrap() - }) - }) - .flatten() - } - } - }) - .filter_map(|x| x) - .collect() - } - /// storages are sorted by slot and have range info. /// add all stores older than slots_per_epoch to dirty_stores so clean visits these slots fn mark_old_slots_as_dirty( @@ -7453,68 +7229,19 @@ impl AccountsDb { self.incremental_accounts_hashes.lock().unwrap().clone() } - /// Purge accounts hashes that are older than `last_full_snapshot_slot` + /// Purge accounts hashes that are older than `latest_full_snapshot_slot` /// /// Should only be called by AccountsHashVerifier, since it consumes the accounts hashes and /// knows which ones are still needed. - pub fn purge_old_accounts_hashes(&self, last_full_snapshot_slot: Slot) { + pub fn purge_old_accounts_hashes(&self, latest_full_snapshot_slot: Slot) { self.accounts_hashes .lock() .unwrap() - .retain(|&slot, _| slot >= last_full_snapshot_slot); + .retain(|&slot, _| slot >= latest_full_snapshot_slot); self.incremental_accounts_hashes .lock() .unwrap() - .retain(|&slot, _| slot >= last_full_snapshot_slot); - } - - /// scan 'storages', return a vec of 'CacheHashDataFileReference', one per pass - fn scan_snapshot_stores_with_cache( - &self, - cache_hash_data: &CacheHashData, - storages: &SortedStorages, - stats: &mut crate::accounts_hash::HashStats, - bins: usize, - bin_range: &Range, - config: &CalcAccountsHashConfig<'_>, - ) -> Vec { - assert!(bin_range.start < bins); - assert!(bin_range.end <= bins); - assert!(bin_range.start < bin_range.end); - let _guard = self.active_stats.activate(ActiveStatItem::HashScan); - - let bin_calculator = PubkeyBinCalculator24::new(bins); - let mut time = Measure::start("scan all accounts"); - stats.num_snapshot_storage = storages.storage_count(); - stats.num_slots = storages.slot_count(); - let range = bin_range.end - bin_range.start; - let sort_time = Arc::new(AtomicU64::new(0)); - - let scanner = ScanState { - current_slot: Slot::default(), - accum: BinnedHashData::default(), - bin_calculator: &bin_calculator, - range, - bin_range, - sort_time: sort_time.clone(), - pubkey_to_bin_index: 0, - }; - - let result = self.scan_account_storage_no_bank( - cache_hash_data, - config, - storages, - scanner, - bin_range, - stats, - ); - - stats.sort_time_total_us += sort_time.load(Ordering::Relaxed); - - time.stop(); - stats.scan_time_total_us += time.as_us(); - - result + .retain(|&slot, _| slot >= latest_full_snapshot_slot); } fn sort_slot_storage_scan(accum: &mut BinnedHashData) -> u64 { @@ -7809,14 +7536,7 @@ impl AccountsDb { |accum: &DashMap, loaded_account: &LoadedAccount, _data| { let mut loaded_hash = loaded_account.loaded_hash(); if loaded_hash == AccountHash(Hash::default()) { - loaded_hash = Self::hash_account_data( - loaded_account.lamports(), - loaded_account.owner(), - loaded_account.executable(), - loaded_account.rent_epoch(), - loaded_account.data(), - loaded_account.pubkey(), - ) + loaded_hash = Self::hash_account(loaded_account, loaded_account.pubkey()) } accum.insert(*loaded_account.pubkey(), loaded_hash); }, @@ -8048,7 +7768,7 @@ impl AccountsDb { } fn should_not_shrink(alive_bytes: u64, total_bytes: u64) -> bool { - alive_bytes + PAGE_SIZE > total_bytes + alive_bytes >= total_bytes } fn is_shrinking_productive(slot: Slot, store: &AccountStorageEntry) -> bool { @@ -8059,10 +7779,11 @@ impl AccountsDb { if Self::should_not_shrink(alive_bytes, total_bytes) { trace!( - "shrink_slot_forced ({}): not able to shrink at all: alive/stored: {} ({}b / {}b) save: {}", + "shrink_slot_forced ({}): not able to shrink at all: alive/stored: {}/{} ({}b / {}b) save: {}", slot, alive_count, stored_count, + alive_bytes, total_bytes, total_bytes.saturating_sub(alive_bytes), ); @@ -8085,11 +7806,10 @@ impl AccountsDb { }; match self.shrink_ratio { AccountShrinkThreshold::TotalSpace { shrink_ratio: _ } => { - Self::page_align(store.alive_bytes() as u64) < total_bytes + (store.alive_bytes() as u64) < total_bytes } AccountShrinkThreshold::IndividualStore { shrink_ratio } => { - (Self::page_align(store.alive_bytes() as u64) as f64 / total_bytes as f64) - < shrink_ratio + (store.alive_bytes() as f64 / total_bytes as f64) < shrink_ratio } } } @@ -8810,6 +8530,16 @@ impl AccountsDb { (result, slots) } + /// Returns the latest full snapshot slot + pub fn latest_full_snapshot_slot(&self) -> Option { + self.latest_full_snapshot_slot.read() + } + + /// Sets the latest full snapshot slot to `slot` + pub fn set_latest_full_snapshot_slot(&self, slot: Slot) { + *self.latest_full_snapshot_slot.lock_write() = Some(slot); + } + /// return Some(lamports_to_top_off) if 'account' would collect rent fn stats_for_rent_payers( pubkey: &Pubkey, @@ -9013,7 +8743,7 @@ impl AccountsDb { // no storage at this slot, no information to pull out continue; }; - let store_id = storage.append_vec_id(); + let store_id = storage.id(); scan_time.stop(); scan_time_sum += scan_time.as_us(); @@ -9353,7 +9083,7 @@ impl AccountsDb { // store count and size for each storage let mut storage_size_storages_time = Measure::start("storage_size_storages"); for (_slot, store) in self.storage.iter() { - let id = store.append_vec_id(); + let id = store.id(); // Should be default at this point assert_eq!(store.alive_bytes(), 0); if let Some(entry) = stored_sizes_and_counts.get(&id) { @@ -9417,7 +9147,7 @@ impl AccountsDb { info!( " slot: {} id: {} count_and_status: {:?} approx_store_count: {} len: {} capacity: {}", slot, - entry.append_vec_id(), + entry.id(), entry.count_and_status.read(), entry.approx_store_count.load(Ordering::Relaxed), entry.accounts.len(), @@ -9719,11 +9449,17 @@ pub mod test_utils { .get_slot_storage_entry(slot) .is_none() { - let bytes_required = num * aligned_stored_size(data_size); + // Some callers relied on old behavior where the the file size was rounded up to the + // next page size because they append to the storage file after it was written. + // This behavior is not supported by a normal running validator. Since this function + // is only called by tests/benches, add some extra capacity to the file to not break + // the tests/benches. Those tests/benches should be updated though! Bypassing the + // write cache in general is not supported. + let bytes_required = num * aligned_stored_size(data_size) + 4096; // allocate an append vec for this slot that can hold all the test accounts. This prevents us from creating more than 1 append vec for this slot. _ = accounts.accounts_db.create_and_insert_store( slot, - AccountsDb::page_align(bytes_required as u64), + bytes_required as u64, "create_test_accounts", ); } @@ -9763,7 +9499,6 @@ pub mod tests { accounts_index::{tests::*, AccountSecondaryIndexesIncludeExclude}, ancient_append_vecs, append_vec::{test_utils::TempFile, AppendVec, AppendVecStoredAccountMeta}, - cache_hash_data::CacheHashDataFile, storable_accounts::AccountForStorage, }, assert_matches::assert_matches, @@ -9777,6 +9512,7 @@ pub mod tests { pubkey::PUBKEY_BYTES, }, std::{ + hash::DefaultHasher, iter::FromIterator, str::FromStr, sync::{atomic::AtomicBool, RwLock}, @@ -9793,34 +9529,7 @@ pub mod tests { ancestors } - fn empty_storages<'a>() -> SortedStorages<'a> { - SortedStorages::new(&[]) - } - impl AccountsDb { - fn scan_snapshot_stores( - &self, - storage: &SortedStorages, - stats: &mut crate::accounts_hash::HashStats, - bins: usize, - bin_range: &Range, - ) -> Vec { - let temp_dir = TempDir::new().unwrap(); - let accounts_hash_cache_path = temp_dir.path().to_path_buf(); - self.scan_snapshot_stores_with_cache( - &CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused), - storage, - stats, - bins, - bin_range, - &CalcAccountsHashConfig::default(), - ) - .iter() - .map(CacheHashDataFileReference::map) - .collect::, _>>() - .unwrap() - } - fn get_storage_for_slot(&self, slot: Slot) -> Option> { self.storage.get_slot_storage_entry(slot) } @@ -9868,8 +9577,8 @@ pub mod tests { impl CurrentAncientAccountsFile { /// note this requires that 'slot_and_accounts_file' is Some - fn append_vec_id(&self) -> AccountsFileId { - self.accounts_file().append_vec_id() + fn id(&self) -> AccountsFileId { + self.accounts_file().id() } } @@ -9906,6 +9615,7 @@ pub mod tests { } }; } + pub(crate) use define_accounts_db_test; fn run_generate_index_duplicates_within_slot_test(db: AccountsDb, reverse: bool) { let slot0 = 0; @@ -10363,36 +10073,7 @@ pub mod tests { ); } - #[test] - #[should_panic(expected = "bin_range.start < bins")] - fn test_accountsdb_scan_snapshot_stores_illegal_range_start() { - let mut stats = HashStats::default(); - let bounds = Range { start: 2, end: 2 }; - let accounts_db = AccountsDb::new_single_for_tests(); - - accounts_db.scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds); - } - #[test] - #[should_panic(expected = "bin_range.end <= bins")] - fn test_accountsdb_scan_snapshot_stores_illegal_range_end() { - let mut stats = HashStats::default(); - let bounds = Range { start: 1, end: 3 }; - - let accounts_db = AccountsDb::new_single_for_tests(); - accounts_db.scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds); - } - - #[test] - #[should_panic(expected = "bin_range.start < bin_range.end")] - fn test_accountsdb_scan_snapshot_stores_illegal_range_inverse() { - let mut stats = HashStats::default(); - let bounds = Range { start: 1, end: 0 }; - - let accounts_db = AccountsDb::new_single_for_tests(); - accounts_db.scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds); - } - - fn sample_storages_and_account_in_slot( + pub(crate) fn sample_storages_and_account_in_slot( slot: Slot, accounts: &AccountsDb, ) -> ( @@ -10467,7 +10148,7 @@ pub mod tests { (storages, raw_expected) } - fn sample_storages_and_accounts( + pub(crate) fn sample_storages_and_accounts( accounts: &AccountsDb, ) -> ( Vec>, @@ -10476,352 +10157,10 @@ pub mod tests { sample_storages_and_account_in_slot(1, accounts) } - fn get_storage_refs(input: &[Arc]) -> SortedStorages { + pub(crate) fn get_storage_refs(input: &[Arc]) -> SortedStorages { SortedStorages::new(input) } - /// helper to compare expected binned data with scan result in cache files - /// result: return from scanning - /// expected: binned data expected - /// bins: # bins total to divide pubkeys into - /// start_bin_index: bin # that was the minimum # we were scanning for 0<=start_bin_index, - expected: Vec, - bins: usize, - start_bin_index: usize, - bin_range: usize, - ) { - assert_eq!(expected.len(), result.len()); - - for cache_file in &result { - let mut result2 = (0..bin_range).map(|_| Vec::default()).collect::>(); - cache_file.load_all( - &mut result2, - start_bin_index, - &PubkeyBinCalculator24::new(bins), - ); - assert_eq!( - convert_to_slice(&[result2]), - expected, - "bins: {bins}, start_bin_index: {start_bin_index}" - ); - } - } - - define_accounts_db_test!( - test_accountsdb_scan_snapshot_stores_hash_not_stored, - |accounts_db| { - let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); - storages.iter().for_each(|storage| { - accounts_db.storage.remove(&storage.slot(), false); - }); - - // replace the sample storages, storing default hash values so that we rehash during scan - let storages = storages - .iter() - .map(|storage| { - let slot = storage.slot(); - let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); - let mut all_accounts = Vec::default(); - storage.accounts.scan_accounts(|acct| { - all_accounts.push((*acct.pubkey(), acct.to_account_shared_data())); - }); - let accounts = all_accounts - .iter() - .map(|stored| (&stored.0, &stored.1)) - .collect::>(); - let slice = &accounts[..]; - let storable_accounts = (slot, slice); - copied_storage - .accounts - .append_accounts(&storable_accounts, 0); - copied_storage - }) - .collect::>(); - - assert_test_scan(accounts_db, storages, raw_expected); - } - ); - - #[test] - fn test_accountsdb_scan_snapshot_stores_check_hash() { - solana_logger::setup(); - let accounts_db = AccountsDb::new_single_for_tests(); - let (storages, _raw_expected) = sample_storages_and_accounts(&accounts_db); - let max_slot = storages.iter().map(|storage| storage.slot()).max().unwrap(); - - // replace the sample storages, storing default hash values so that we rehash during scan - let storages = storages - .iter() - .map(|storage| { - let slot = storage.slot() + max_slot; - let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); - let mut all_accounts = Vec::default(); - storage.accounts.scan_accounts(|acct| { - all_accounts.push((*acct.pubkey(), acct.to_account_shared_data())); - }); - let accounts = all_accounts - .iter() - .map(|stored| (&stored.0, &stored.1)) - .collect::>(); - let slice = &accounts[..]; - let storable_accounts = (slot, slice); - copied_storage - .accounts - .append_accounts(&storable_accounts, 0); - copied_storage - }) - .collect::>(); - - let bins = 1; - let mut stats = HashStats::default(); - - accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: 0, - end: bins, - }, - ); - } - - define_accounts_db_test!(test_accountsdb_scan_snapshot_stores, |accounts_db| { - let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); - - assert_test_scan(accounts_db, storages, raw_expected); - }); - - fn assert_test_scan( - accounts_db: AccountsDb, - storages: Vec>, - raw_expected: Vec, - ) { - let bins = 1; - let mut stats = HashStats::default(); - - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: 0, - end: bins, - }, - ); - assert_scan(result, vec![vec![raw_expected.clone()]], bins, 0, bins); - - let bins = 2; - let accounts_db = AccountsDb::new_single_for_tests(); - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: 0, - end: bins, - }, - ); - let mut expected = vec![Vec::new(); bins]; - expected[0].push(raw_expected[0]); - expected[0].push(raw_expected[1]); - expected[bins - 1].push(raw_expected[2]); - expected[bins - 1].push(raw_expected[3]); - assert_scan(result, vec![expected], bins, 0, bins); - - let bins = 4; - let accounts_db = AccountsDb::new_single_for_tests(); - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: 0, - end: bins, - }, - ); - let mut expected = vec![Vec::new(); bins]; - expected[0].push(raw_expected[0]); - expected[1].push(raw_expected[1]); - expected[2].push(raw_expected[2]); - expected[bins - 1].push(raw_expected[3]); - assert_scan(result, vec![expected], bins, 0, bins); - - let bins = 256; - let accounts_db = AccountsDb::new_single_for_tests(); - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: 0, - end: bins, - }, - ); - let mut expected = vec![Vec::new(); bins]; - expected[0].push(raw_expected[0]); - expected[127].push(raw_expected[1]); - expected[128].push(raw_expected[2]); - expected[bins - 1].push(*raw_expected.last().unwrap()); - assert_scan(result, vec![expected], bins, 0, bins); - } - - define_accounts_db_test!( - test_accountsdb_scan_snapshot_stores_2nd_chunk, - |accounts_db| { - // enough stores to get to 2nd chunk - let bins = 1; - let slot = MAX_ITEMS_PER_CHUNK as Slot; - let (storages, raw_expected) = sample_storages_and_account_in_slot(slot, &accounts_db); - let storage_data = [(&storages[0], slot)]; - - let sorted_storages = - SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1); - - let mut stats = HashStats::default(); - let result = accounts_db.scan_snapshot_stores( - &sorted_storages, - &mut stats, - bins, - &Range { - start: 0, - end: bins, - }, - ); - - assert_scan(result, vec![vec![raw_expected]], bins, 0, bins); - } - ); - - define_accounts_db_test!( - test_accountsdb_scan_snapshot_stores_binning, - |accounts_db| { - let mut stats = HashStats::default(); - let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); - - // just the first bin of 2 - let bins = 2; - let half_bins = bins / 2; - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: 0, - end: half_bins, - }, - ); - let mut expected = vec![Vec::new(); half_bins]; - expected[0].push(raw_expected[0]); - expected[0].push(raw_expected[1]); - assert_scan(result, vec![expected], bins, 0, half_bins); - - // just the second bin of 2 - let accounts_db = AccountsDb::new_single_for_tests(); - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: 1, - end: bins, - }, - ); - - let mut expected = vec![Vec::new(); half_bins]; - let starting_bin_index = 0; - expected[starting_bin_index].push(raw_expected[2]); - expected[starting_bin_index].push(raw_expected[3]); - assert_scan(result, vec![expected], bins, 1, bins - 1); - - // 1 bin at a time of 4 - let bins = 4; - let accounts_db = AccountsDb::new_single_for_tests(); - - for (bin, expected_item) in raw_expected.iter().enumerate().take(bins) { - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: bin, - end: bin + 1, - }, - ); - let mut expected = vec![Vec::new(); 1]; - expected[0].push(*expected_item); - assert_scan(result, vec![expected], bins, bin, 1); - } - - let bins = 256; - let bin_locations = [0, 127, 128, 255]; - let range = 1; - for bin in 0..bins { - let accounts_db = AccountsDb::new_single_for_tests(); - let result = accounts_db.scan_snapshot_stores( - &get_storage_refs(&storages), - &mut stats, - bins, - &Range { - start: bin, - end: bin + range, - }, - ); - let mut expected = vec![]; - if let Some(index) = bin_locations.iter().position(|&r| r == bin) { - expected = vec![Vec::new(); range]; - expected[0].push(raw_expected[index]); - } - let mut result2 = (0..range).map(|_| Vec::default()).collect::>(); - if let Some(m) = result.first() { - m.load_all(&mut result2, bin, &PubkeyBinCalculator24::new(bins)); - } else { - result2 = vec![]; - } - - assert_eq!(result2, expected); - } - } - ); - - define_accounts_db_test!( - test_accountsdb_scan_snapshot_stores_binning_2nd_chunk, - |accounts_db| { - // enough stores to get to 2nd chunk - // range is for only 1 bin out of 256. - let bins = 256; - let slot = MAX_ITEMS_PER_CHUNK as Slot; - let (storages, raw_expected) = sample_storages_and_account_in_slot(slot, &accounts_db); - let storage_data = [(&storages[0], slot)]; - - let sorted_storages = - SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1); - - let mut stats = HashStats::default(); - let range = 1; - let start = 127; - let result = accounts_db.scan_snapshot_stores( - &sorted_storages, - &mut stats, - bins, - &Range { - start, - end: start + range, - }, - ); - assert_eq!(result.len(), 1); // 2 chunks, but 1 is empty so not included - let mut expected = vec![Vec::new(); range]; - expected[0].push(raw_expected[1]); - let mut result2 = (0..range).map(|_| Vec::default()).collect::>(); - result[0].load_all(&mut result2, 0, &PubkeyBinCalculator24::new(range)); - assert_eq!(result2.len(), 1); - assert_eq!(result2, expected); - } - ); - define_accounts_db_test!( test_accountsdb_calculate_accounts_hash_from_storages_simple, |db| { @@ -10877,116 +10216,6 @@ pub mod tests { (storages, size, slot_expected) } - #[derive(Clone)] - struct TestScan { - calls: Arc, - pubkey: Pubkey, - slot_expected: Slot, - accum: BinnedHashData, - current_slot: Slot, - value_to_use_for_lamports: u64, - } - - impl AppendVecScan for TestScan { - fn filter(&mut self, _pubkey: &Pubkey) -> bool { - true - } - fn set_slot(&mut self, slot: Slot) { - self.current_slot = slot; - } - fn init_accum(&mut self, _count: usize) {} - fn found_account(&mut self, loaded_account: &LoadedAccount) { - self.calls.fetch_add(1, Ordering::Relaxed); - assert_eq!(loaded_account.pubkey(), &self.pubkey); - assert_eq!(self.slot_expected, self.current_slot); - self.accum.push(vec![CalculateHashIntermediate { - hash: AccountHash(Hash::default()), - lamports: self.value_to_use_for_lamports, - pubkey: self.pubkey, - }]); - } - fn scanning_complete(self) -> BinnedHashData { - self.accum - } - } - - #[test_case(AccountsFileProvider::AppendVec)] - #[test_case(AccountsFileProvider::HotStorage)] - fn test_accountsdb_scan_account_storage_no_bank(accounts_file_provider: AccountsFileProvider) { - solana_logger::setup(); - - let expected = 1; - let tf = crate::append_vec::test_utils::get_append_vec_path( - "test_accountsdb_scan_account_storage_no_bank", - ); - let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); - let slot_expected: Slot = 0; - let size: usize = 123; - let mut data = AccountStorageEntry::new( - &paths[0], - slot_expected, - 0, - size as u64, - accounts_file_provider, - ); - let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); - data.accounts = av; - - let storage = Arc::new(data); - let pubkey = solana_sdk::pubkey::new_rand(); - let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner()); - let mark_alive = false; - append_single_account_with_default_hash(&storage, &pubkey, &acc, mark_alive, None); - - let calls = Arc::new(AtomicU64::new(0)); - let temp_dir = TempDir::new().unwrap(); - let accounts_hash_cache_path = temp_dir.path().to_path_buf(); - let accounts_db = AccountsDb::new_single_for_tests(); - - let test_scan = TestScan { - calls: calls.clone(), - pubkey, - slot_expected, - accum: Vec::default(), - current_slot: 0, - value_to_use_for_lamports: expected, - }; - - let result = accounts_db.scan_account_storage_no_bank( - &CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused), - &CalcAccountsHashConfig::default(), - &get_storage_refs(&[storage]), - test_scan, - &Range { start: 0, end: 1 }, - &mut HashStats::default(), - ); - let result2 = result - .iter() - .map(|file| file.map().unwrap()) - .collect::>(); - assert_eq!(calls.load(Ordering::Relaxed), 1); - assert_scan( - result2, - vec![vec![vec![CalculateHashIntermediate { - hash: AccountHash(Hash::default()), - lamports: expected, - pubkey, - }]]], - 1, - 0, - 1, - ); - } - - fn convert_to_slice( - input: &[Vec>], - ) -> Vec> { - input - .iter() - .map(|v| v.iter().map(|v| &v[..]).collect::>()) - .collect::>() - } - pub(crate) fn append_single_account_with_default_hash( storage: &AccountStorageEntry, pubkey: &Pubkey, @@ -11009,10 +10238,7 @@ pub mod tests { if let Some(index) = add_to_index { let account_info = AccountInfo::new( - StorageLocation::AppendVec( - storage.append_vec_id(), - stored_accounts_info.offsets[0], - ), + StorageLocation::AppendVec(storage.id(), stored_accounts_info.offsets[0]), account.lamports(), ); index.upsert( @@ -11028,53 +10254,6 @@ pub mod tests { } } - define_accounts_db_test!( - test_accountsdb_scan_account_storage_no_bank_one_slot, - |db| { - solana_logger::setup(); - let accounts_file_provider = db.accounts_file_provider; - - let expected = 1; - let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); - let slot_expected: Slot = 0; - let data = AccountStorageEntry::new( - &paths[0], - slot_expected, - 0, - 1024 * 1024, - accounts_file_provider, - ); - let storage = Arc::new(data); - let pubkey = solana_sdk::pubkey::new_rand(); - let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner()); - let mark_alive = false; - append_single_account_with_default_hash(&storage, &pubkey, &acc, mark_alive, None); - - let calls = Arc::new(AtomicU64::new(0)); - - let mut test_scan = TestScan { - calls: calls.clone(), - pubkey, - slot_expected, - accum: Vec::default(), - current_slot: 0, - value_to_use_for_lamports: expected, - }; - - AccountsDb::scan_single_account_storage(&storage, &mut test_scan); - let accum = test_scan.scanning_complete(); - assert_eq!(calls.load(Ordering::Relaxed), 1); - assert_eq!( - accum - .iter() - .flatten() - .map(|a| a.lamports) - .collect::>(), - vec![expected] - ); - } - ); - fn append_sample_data_to_storage( storage: &AccountStorageEntry, pubkey: &Pubkey, @@ -11089,7 +10268,7 @@ pub mod tests { append_single_account_with_default_hash(storage, pubkey, &acc, mark_alive, None); } - fn sample_storage_with_entries( + pub(crate) fn sample_storage_with_entries( tf: &TempFile, slot: Slot, pubkey: &Pubkey, @@ -11148,84 +10327,6 @@ pub mod tests { ) } - #[test] - fn test_accountsdb_scan_multiple_account_storage_no_bank_one_slot() { - solana_logger::setup(); - - let slot_expected: Slot = 0; - let tf = crate::append_vec::test_utils::get_append_vec_path( - "test_accountsdb_scan_account_storage_no_bank", - ); - let pubkey1 = solana_sdk::pubkey::new_rand(); - let pubkey2 = solana_sdk::pubkey::new_rand(); - let mark_alive = false; - let storage = sample_storage_with_entries(&tf, slot_expected, &pubkey1, mark_alive); - let lamports = storage - .accounts - .get_account_shared_data(0) - .unwrap() - .lamports(); - let calls = Arc::new(AtomicU64::new(0)); - let mut scanner = TestScanSimple { - current_slot: 0, - slot_expected, - pubkey1, - pubkey2, - accum: Vec::default(), - calls: calls.clone(), - }; - AccountsDb::scan_single_account_storage(&storage, &mut scanner); - let accum = scanner.scanning_complete(); - assert_eq!(calls.load(Ordering::Relaxed), 1); - assert_eq!( - accum - .iter() - .flatten() - .map(|a| a.lamports) - .collect::>(), - vec![lamports] - ); - } - - #[derive(Clone)] - struct TestScanSimple { - current_slot: Slot, - slot_expected: Slot, - calls: Arc, - accum: BinnedHashData, - pubkey1: Pubkey, - pubkey2: Pubkey, - } - - impl AppendVecScan for TestScanSimple { - fn set_slot(&mut self, slot: Slot) { - self.current_slot = slot; - } - fn filter(&mut self, _pubkey: &Pubkey) -> bool { - true - } - fn init_accum(&mut self, _count: usize) {} - fn found_account(&mut self, loaded_account: &LoadedAccount) { - self.calls.fetch_add(1, Ordering::Relaxed); - let first = loaded_account.pubkey() == &self.pubkey1; - assert!(first || loaded_account.pubkey() == &self.pubkey2); - assert_eq!(self.slot_expected, self.current_slot); - if first { - assert!(self.accum.is_empty()); - } else { - assert_eq!(self.accum.len(), 1); - } - self.accum.push(vec![CalculateHashIntermediate { - hash: AccountHash(Hash::default()), - lamports: loaded_account.lamports(), - pubkey: Pubkey::default(), - }]); - } - fn scanning_complete(self) -> BinnedHashData { - self.accum - } - } - define_accounts_db_test!(test_accountsdb_add_root, |db| { let key = Pubkey::default(); let account0 = AccountSharedData::new(1, 0, &key); @@ -11686,14 +10787,7 @@ pub mod tests { accounts.calculate_accounts_delta_hash(0); //slot is still there, since gc is lazy - assert_eq!( - accounts - .storage - .get_slot_storage_entry(0) - .unwrap() - .append_vec_id(), - id - ); + assert_eq!(accounts.storage.get_slot_storage_entry(0).unwrap().id(), id); //store causes clean accounts.store_for_tests(1, &[(&pubkey, &account)]); @@ -12105,13 +11199,13 @@ pub mod tests { // updates in later slots in slot 1 assert_eq!(accounts.alive_account_count_in_slot(0), 1); assert_eq!(accounts.alive_account_count_in_slot(1), 1); - accounts.clean_accounts(Some(0), false, None, &EpochSchedule::default()); + accounts.clean_accounts(Some(0), false, &EpochSchedule::default()); assert_eq!(accounts.alive_account_count_in_slot(0), 1); assert_eq!(accounts.alive_account_count_in_slot(1), 1); assert!(accounts.accounts_index.contains_with(&pubkey, None, None)); // Now the account can be cleaned up - accounts.clean_accounts(Some(1), false, None, &EpochSchedule::default()); + accounts.clean_accounts(Some(1), false, &EpochSchedule::default()); assert_eq!(accounts.alive_account_count_in_slot(0), 0); assert_eq!(accounts.alive_account_count_in_slot(1), 0); @@ -12569,7 +11663,7 @@ pub mod tests { } impl<'a> CalcAccountsHashConfig<'a> { - fn default() -> Self { + pub(crate) fn default() -> Self { Self { use_bg_thread_pool: false, ancestors: None, @@ -13014,7 +12108,7 @@ pub mod tests { accounts.shrink_candidate_slots(&epoch_schedule); } - accounts.shrink_all_slots(*startup, None, &EpochSchedule::default()); + accounts.shrink_all_slots(*startup, &EpochSchedule::default(), None); } } @@ -13072,7 +12166,7 @@ pub mod tests { ); // Now, do full-shrink. - accounts.shrink_all_slots(false, None, &EpochSchedule::default()); + accounts.shrink_all_slots(false, &EpochSchedule::default(), None); assert_eq!( pubkey_count_after_shrink, accounts.all_account_count_in_accounts_file(shrink_slot) @@ -13101,72 +12195,59 @@ pub mod tests { let db = AccountsDb::new_single_for_tests(); let common_store_path = Path::new(""); - let slot_id_1 = 12; - let store_file_size = 2 * PAGE_SIZE; + let store_file_size = 100; - let store1_id = 22; + let store1_slot = 11; let store1 = Arc::new(AccountStorageEntry::new( common_store_path, - slot_id_1, - store1_id, + store1_slot, + store1_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); + db.storage.insert(store1_slot, Arc::clone(&store1)); store1.alive_bytes.store(0, Ordering::Release); + candidates.insert(store1_slot); - candidates.insert(slot_id_1); - - let slot_id_2 = 13; - - let store2_id = 44; + let store2_slot = 22; let store2 = Arc::new(AccountStorageEntry::new( common_store_path, - slot_id_2, - store2_id, + store2_slot, + store2_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); - - // The store2's alive_ratio is 0.5: as its page aligned alive size is 1 page. - let store2_alive_bytes = (PAGE_SIZE - 1) as usize; + db.storage.insert(store2_slot, Arc::clone(&store2)); store2 .alive_bytes - .store(store2_alive_bytes, Ordering::Release); - candidates.insert(slot_id_2); + .store(store_file_size as usize / 2, Ordering::Release); + candidates.insert(store2_slot); - let slot_id_3 = 14; - let store3_id = 55; - let entry3 = Arc::new(AccountStorageEntry::new( + let store3_slot = 33; + let store3 = Arc::new(AccountStorageEntry::new( common_store_path, - slot_id_3, - store3_id, + store3_slot, + store3_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); - - db.storage.insert(slot_id_1, Arc::clone(&store1)); - db.storage.insert(slot_id_2, Arc::clone(&store2)); - db.storage.insert(slot_id_3, Arc::clone(&entry3)); - - // The store3's alive ratio is 1.0 as its page-aligned alive size is 2 pages - let store3_alive_bytes = (PAGE_SIZE + 1) as usize; - entry3 + db.storage.insert(store3_slot, Arc::clone(&store3)); + store3 .alive_bytes - .store(store3_alive_bytes, Ordering::Release); - - candidates.insert(slot_id_3); + .store(store_file_size as usize, Ordering::Release); + candidates.insert(store3_slot); // Set the target alive ratio to 0.6 so that we can just get rid of store1, the remaining two stores - // alive ratio can be > the target ratio: the actual ratio is 0.75 because of 3 alive pages / 4 total pages. + // alive ratio can be > the target ratio: the actual ratio is 0.75 because of 150 alive bytes / 200 total bytes. // The target ratio is also set to larger than store2's alive ratio: 0.5 so that it would be added // to the candidates list for next round. let target_alive_ratio = 0.6; let (selected_candidates, next_candidates) = db.select_candidates_by_total_usage(&candidates, target_alive_ratio, None); assert_eq!(1, selected_candidates.len()); - assert!(selected_candidates.contains(&slot_id_1)); + assert!(selected_candidates.contains(&store1_slot)); assert_eq!(1, next_candidates.len()); - assert!(next_candidates.contains(&slot_id_2)); + assert!(next_candidates.contains(&store2_slot)); } #[test] @@ -13177,64 +12258,55 @@ pub mod tests { let mut candidates = ShrinkCandidates::default(); let common_store_path = Path::new(""); - let slot_id_1 = 12; - let store_file_size = 2 * PAGE_SIZE; + let store_file_size = 100; - let store1_id = 22; + let store1_slot = 11; let store1 = Arc::new(AccountStorageEntry::new( common_store_path, - slot_id_1, - store1_id, + store1_slot, + store1_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); + db.storage.insert(store1_slot, Arc::clone(&store1)); store1.alive_bytes.store(0, Ordering::Release); - db.storage.insert(slot_id_1, Arc::clone(&store1)); - candidates.insert(slot_id_1); + candidates.insert(store1_slot); - let slot_id_2 = 13; - let store2_id = 44; + let store2_slot = 22; let store2 = Arc::new(AccountStorageEntry::new( common_store_path, - slot_id_2, - store2_id, + store2_slot, + store2_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); - db.storage.insert(slot_id_2, Arc::clone(&store2)); - - // The store2's alive_ratio is 0.5: as its page aligned alive size is 1 page. - let store2_alive_bytes = (PAGE_SIZE - 1) as usize; + db.storage.insert(store2_slot, Arc::clone(&store2)); store2 .alive_bytes - .store(store2_alive_bytes, Ordering::Release); - candidates.insert(slot_id_2); + .store(store_file_size as usize / 2, Ordering::Release); + candidates.insert(store2_slot); - let slot_id_3 = 14; - let store3_id = 55; - let entry3 = Arc::new(AccountStorageEntry::new( + let store3_slot = 33; + let store3 = Arc::new(AccountStorageEntry::new( common_store_path, - slot_id_3, - store3_id, + store3_slot, + store3_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); - - // The store3's alive ratio is 1.0 as its page-aligned alive size is 2 pages - let store3_alive_bytes = (PAGE_SIZE + 1) as usize; - entry3 + db.storage.insert(store3_slot, Arc::clone(&store3)); + store3 .alive_bytes - .store(store3_alive_bytes, Ordering::Release); - - candidates.insert(slot_id_3); + .store(store_file_size as usize, Ordering::Release); + candidates.insert(store3_slot); // Set the target ratio to default (0.8), both store1 and store2 must be selected and store3 is ignored. let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO; let (selected_candidates, next_candidates) = db.select_candidates_by_total_usage(&candidates, target_alive_ratio, None); assert_eq!(2, selected_candidates.len()); - assert!(selected_candidates.contains(&slot_id_1)); - assert!(selected_candidates.contains(&slot_id_2)); + assert!(selected_candidates.contains(&store1_slot)); + assert!(selected_candidates.contains(&store2_slot)); assert_eq!(0, next_candidates.len()); } @@ -13245,48 +12317,38 @@ pub mod tests { let db = AccountsDb::new_single_for_tests(); let mut candidates = ShrinkCandidates::default(); - let slot1 = 12; let common_store_path = Path::new(""); + let store_file_size = 100; - let store_file_size = 4 * PAGE_SIZE; - let store1_id = 22; + let store1_slot = 11; let store1 = Arc::new(AccountStorageEntry::new( common_store_path, - slot1, - store1_id, + store1_slot, + store1_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); - - // store1 has 1 page-aligned alive bytes, its alive ratio is 1/4: 0.25 - let store1_alive_bytes = (PAGE_SIZE - 1) as usize; + db.storage.insert(store1_slot, Arc::clone(&store1)); store1 .alive_bytes - .store(store1_alive_bytes, Ordering::Release); - - candidates.insert(slot1); - db.storage.insert(slot1, Arc::clone(&store1)); + .store(store_file_size as usize / 4, Ordering::Release); + candidates.insert(store1_slot); - let store2_id = 44; - let slot2 = 44; + let store2_slot = 22; let store2 = Arc::new(AccountStorageEntry::new( common_store_path, - slot2, - store2_id, + store2_slot, + store2_slot as AccountsFileId, store_file_size, AccountsFileProvider::AppendVec, )); - - // store2 has 2 page-aligned bytes, its alive ratio is 2/4: 0.5 - let store2_alive_bytes = (PAGE_SIZE + 1) as usize; + db.storage.insert(store2_slot, Arc::clone(&store2)); store2 .alive_bytes - .store(store2_alive_bytes, Ordering::Release); - - candidates.insert(slot2); - db.storage.insert(slot2, Arc::clone(&store2)); + .store(store_file_size as usize / 2, Ordering::Release); + candidates.insert(store2_slot); - for newest_ancient_slot in [None, Some(slot1), Some(slot2)] { + for newest_ancient_slot in [None, Some(store1_slot), Some(store2_slot)] { // Set the target ratio to default (0.8), both stores from the two different slots must be selected. let target_alive_ratio = DEFAULT_ACCOUNTS_SHRINK_RATIO; let (selected_candidates, next_candidates) = db.select_candidates_by_total_usage( @@ -13295,9 +12357,9 @@ pub mod tests { newest_ancient_slot.map(|newest_ancient_slot| newest_ancient_slot + 1), ); assert_eq!( - if newest_ancient_slot == Some(slot1) { + if newest_ancient_slot == Some(store1_slot) { 1 - } else if newest_ancient_slot == Some(slot2) { + } else if newest_ancient_slot == Some(store2_slot) { 0 } else { 2 @@ -13306,11 +12368,11 @@ pub mod tests { ); assert_eq!( newest_ancient_slot.is_none(), - selected_candidates.contains(&slot1) + selected_candidates.contains(&store1_slot) ); - if newest_ancient_slot != Some(slot2) { - assert!(selected_candidates.contains(&slot2)); + if newest_ancient_slot != Some(store2_slot) { + assert!(selected_candidates.contains(&store2_slot)); } assert_eq!(0, next_candidates.len()); } @@ -13525,7 +12587,7 @@ pub mod tests { #[test] #[should_panic(expected = "We've run out of storage ids!")] - fn test_wrapping_append_vec_id() { + fn test_wrapping_storage_id() { let db = AccountsDb::new_single_for_tests(); let zero_lamport_account = @@ -13551,7 +12613,7 @@ pub mod tests { #[test] #[should_panic(expected = "We've run out of storage ids!")] - fn test_reuse_append_vec_id() { + fn test_reuse_storage_id() { solana_logger::setup(); let db = AccountsDb::new_single_for_tests(); @@ -13593,7 +12655,7 @@ pub mod tests { db.add_root_and_flush_write_cache(1); // Only clean zero lamport accounts up to slot 0 - db.clean_accounts(Some(0), false, None, &EpochSchedule::default()); + db.clean_accounts(Some(0), false, &EpochSchedule::default()); // Should still be able to find zero lamport account in slot 1 assert_eq!( @@ -14746,7 +13808,7 @@ pub mod tests { db.calculate_accounts_delta_hash(1); // Clean to remove outdated entry from slot 0 - db.clean_accounts(Some(1), false, None, &EpochSchedule::default()); + db.clean_accounts(Some(1), false, &EpochSchedule::default()); // Shrink Slot 0 { @@ -14765,7 +13827,7 @@ pub mod tests { // Should be one store before clean for slot 0 db.get_and_assert_single_storage(0); db.calculate_accounts_delta_hash(2); - db.clean_accounts(Some(2), false, None, &EpochSchedule::default()); + db.clean_accounts(Some(2), false, &EpochSchedule::default()); // No stores should exist for slot 0 after clean assert_no_storages_at_slot(&db, 0); @@ -15349,26 +14411,34 @@ pub mod tests { #[test] fn test_shrink_productive() { solana_logger::setup(); - let s1 = - AccountStorageEntry::new(Path::new("."), 0, 0, 1024, AccountsFileProvider::AppendVec); - let store = Arc::new(s1); - assert!(!AccountsDb::is_shrinking_productive(0, &store)); + let path = Path::new(""); + let file_size = 100; + let slot = 11; - let s1 = AccountStorageEntry::new( - Path::new("."), - 0, - 0, - PAGE_SIZE * 4, + let store = Arc::new(AccountStorageEntry::new( + path, + slot, + slot as AccountsFileId, + file_size, AccountsFileProvider::AppendVec, - ); - let store = Arc::new(s1); - store.add_account((3 * PAGE_SIZE as usize) - 1); - store.add_account(10); - store.remove_accounts(10, false, 1); - assert!(AccountsDb::is_shrinking_productive(0, &store)); + )); + store.add_account(file_size as usize); + assert!(!AccountsDb::is_shrinking_productive(slot, &store)); + + let store = Arc::new(AccountStorageEntry::new( + path, + slot, + slot as AccountsFileId, + file_size, + AccountsFileProvider::AppendVec, + )); + store.add_account(file_size as usize / 2); + store.add_account(file_size as usize / 4); + store.remove_accounts(file_size as usize / 4, false, 1); + assert!(AccountsDb::is_shrinking_productive(slot, &store)); - store.add_account(PAGE_SIZE as usize); - assert!(!AccountsDb::is_shrinking_productive(0, &store)); + store.add_account(file_size as usize / 2); + assert!(!AccountsDb::is_shrinking_productive(slot, &store)); } #[test] @@ -15377,7 +14447,7 @@ pub mod tests { let mut accounts = AccountsDb::new_single_for_tests(); let common_store_path = Path::new(""); - let store_file_size = 2 * PAGE_SIZE; + let store_file_size = 100_000; let entry = Arc::new(AccountStorageEntry::new( common_store_path, 0, @@ -15396,14 +14466,24 @@ pub mod tests { panic!("Expect the default to be TotalSpace") } } - entry.alive_bytes.store(3000, Ordering::Release); + + entry + .alive_bytes + .store(store_file_size as usize - 1, Ordering::Release); assert!(accounts.is_candidate_for_shrink(&entry)); - entry.alive_bytes.store(5000, Ordering::Release); + entry + .alive_bytes + .store(store_file_size as usize, Ordering::Release); assert!(!accounts.is_candidate_for_shrink(&entry)); - accounts.shrink_ratio = AccountShrinkThreshold::TotalSpace { shrink_ratio: 0.3 }; - entry.alive_bytes.store(3000, Ordering::Release); + + let shrink_ratio = 0.3; + let file_size_shrink_limit = (store_file_size as f64 * shrink_ratio) as usize; + entry + .alive_bytes + .store(file_size_shrink_limit + 1, Ordering::Release); + accounts.shrink_ratio = AccountShrinkThreshold::TotalSpace { shrink_ratio }; assert!(accounts.is_candidate_for_shrink(&entry)); - accounts.shrink_ratio = AccountShrinkThreshold::IndividualStore { shrink_ratio: 0.3 }; + accounts.shrink_ratio = AccountShrinkThreshold::IndividualStore { shrink_ratio }; assert!(!accounts.is_candidate_for_shrink(&entry)); } @@ -15553,7 +14633,7 @@ pub mod tests { accounts.set_storage_count_and_alive_bytes(dashmap, &mut GenerateIndexTimings::default()); assert_eq!(accounts.storage.len(), 1); for (_, store) in accounts.storage.iter() { - assert_eq!(store.append_vec_id(), 0); + assert_eq!(store.id(), 0); assert_eq!(store.count_and_status.read().0, count); assert_eq!(store.alive_bytes.load(Ordering::Acquire), 2); } @@ -15604,8 +14684,9 @@ pub mod tests { assert!(db.storage.get_slot_storage_entry(slot).is_none()); } - // Test to make sure `clean_accounts()` works properly with the `last_full_snapshot_slot` - // parameter. Basically: + // Test to make sure `clean_accounts()` works properly with `latest_full_snapshot_slot` + // + // Basically: // // - slot 1: set Account1's balance to non-zero // - slot 2: set Account1's balance to a different non-zero amount @@ -15613,12 +14694,12 @@ pub mod tests { // - call `clean_accounts()` with `max_clean_root` set to 2 // - ensure Account1 has *not* been purged // - ensure the store from slot 1 is cleaned up - // - call `clean_accounts()` with `last_full_snapshot_slot` set to 2 + // - call `clean_accounts()` with `latest_full_snapshot_slot` set to 2 // - ensure Account1 has *not* been purged - // - call `clean_accounts()` with `last_full_snapshot_slot` set to 3 + // - call `clean_accounts()` with `latest_full_snapshot_slot` set to 3 // - ensure Account1 *has* been purged define_accounts_db_test!( - test_clean_accounts_with_last_full_snapshot_slot, + test_clean_accounts_with_latest_full_snapshot_slot, |accounts_db| { let pubkey = solana_sdk::pubkey::new_rand(); let owner = solana_sdk::pubkey::new_rand(); @@ -15644,13 +14725,16 @@ pub mod tests { assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 3); - accounts_db.clean_accounts(Some(slot2), false, Some(slot2), &EpochSchedule::default()); + accounts_db.set_latest_full_snapshot_slot(slot2); + accounts_db.clean_accounts(Some(slot2), false, &EpochSchedule::default()); assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 2); - accounts_db.clean_accounts(None, false, Some(slot2), &EpochSchedule::default()); + accounts_db.set_latest_full_snapshot_slot(slot2); + accounts_db.clean_accounts(None, false, &EpochSchedule::default()); assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 1); - accounts_db.clean_accounts(None, false, Some(slot3), &EpochSchedule::default()); + accounts_db.set_latest_full_snapshot_slot(slot3); + accounts_db.clean_accounts(None, false, &EpochSchedule::default()); assert_eq!(accounts_db.ref_count_for_pubkey(&pubkey), 0); } ); @@ -15661,7 +14745,7 @@ pub mod tests { let slot = 10; struct TestParameters { - last_full_snapshot_slot: Option, + latest_full_snapshot_slot: Option, max_clean_root: Option, should_contain: bool, } @@ -15678,9 +14762,11 @@ pub mod tests { purges_zero_lamports.insert(pubkey, (vec![(slot, account_info)], 1)); let accounts_db = AccountsDb::new_single_for_tests(); + if let Some(latest_full_snapshot_slot) = test_params.latest_full_snapshot_slot { + accounts_db.set_latest_full_snapshot_slot(latest_full_snapshot_slot); + } accounts_db.filter_zero_lamport_clean_for_incremental_snapshots( test_params.max_clean_root, - test_params.last_full_snapshot_slot, &store_counts, &mut purges_zero_lamports, ); @@ -15694,16 +14780,16 @@ pub mod tests { // Scenario 1: last full snapshot is NONE // In this scenario incremental snapshots are OFF, so always purge { - let last_full_snapshot_slot = None; + let latest_full_snapshot_slot = None; do_test(TestParameters { - last_full_snapshot_slot, + latest_full_snapshot_slot, max_clean_root: Some(slot), should_contain: true, }); do_test(TestParameters { - last_full_snapshot_slot, + latest_full_snapshot_slot, max_clean_root: None, should_contain: true, }); @@ -15713,22 +14799,22 @@ pub mod tests { // In this scenario always purge, and just test the various permutations of // `should_filter_for_incremental_snapshots` based on `max_clean_root`. { - let last_full_snapshot_slot = Some(slot + 1); + let latest_full_snapshot_slot = Some(slot + 1); do_test(TestParameters { - last_full_snapshot_slot, - max_clean_root: last_full_snapshot_slot, + latest_full_snapshot_slot, + max_clean_root: latest_full_snapshot_slot, should_contain: true, }); do_test(TestParameters { - last_full_snapshot_slot, - max_clean_root: last_full_snapshot_slot.map(|s| s + 1), + latest_full_snapshot_slot, + max_clean_root: latest_full_snapshot_slot.map(|s| s + 1), should_contain: true, }); do_test(TestParameters { - last_full_snapshot_slot, + latest_full_snapshot_slot, max_clean_root: None, should_contain: true, }); @@ -15737,22 +14823,22 @@ pub mod tests { // Scenario 3: last full snapshot is EQUAL TO zero lamport account slot // In this scenario always purge, as it's the same as Scenario 2. { - let last_full_snapshot_slot = Some(slot); + let latest_full_snapshot_slot = Some(slot); do_test(TestParameters { - last_full_snapshot_slot, - max_clean_root: last_full_snapshot_slot, + latest_full_snapshot_slot, + max_clean_root: latest_full_snapshot_slot, should_contain: true, }); do_test(TestParameters { - last_full_snapshot_slot, - max_clean_root: last_full_snapshot_slot.map(|s| s + 1), + latest_full_snapshot_slot, + max_clean_root: latest_full_snapshot_slot.map(|s| s + 1), should_contain: true, }); do_test(TestParameters { - last_full_snapshot_slot, + latest_full_snapshot_slot, max_clean_root: None, should_contain: true, }); @@ -15762,22 +14848,22 @@ pub mod tests { // In this scenario do *not* purge, except when `should_filter_for_incremental_snapshots` // is false { - let last_full_snapshot_slot = Some(slot - 1); + let latest_full_snapshot_slot = Some(slot - 1); do_test(TestParameters { - last_full_snapshot_slot, - max_clean_root: last_full_snapshot_slot, + latest_full_snapshot_slot, + max_clean_root: latest_full_snapshot_slot, should_contain: true, }); do_test(TestParameters { - last_full_snapshot_slot, - max_clean_root: last_full_snapshot_slot.map(|s| s + 1), + latest_full_snapshot_slot, + max_clean_root: latest_full_snapshot_slot.map(|s| s + 1), should_contain: false, }); do_test(TestParameters { - last_full_snapshot_slot, + latest_full_snapshot_slot, max_clean_root: None, should_contain: false, }); @@ -16044,15 +15130,15 @@ pub mod tests { let db = AccountsDb::new_single_for_tests(); let size = 1; let existing_store = db.create_and_insert_store(slot, size, "test"); - let old_id = existing_store.append_vec_id(); + let old_id = existing_store.id(); let dead_storages = db.mark_dirty_dead_stores(slot, add_dirty_stores, None, false); assert!(db.storage.get_slot_storage_entry(slot).is_none()); assert_eq!(dead_storages.len(), 1); - assert_eq!(dead_storages.first().unwrap().append_vec_id(), old_id); + assert_eq!(dead_storages.first().unwrap().id(), old_id); if add_dirty_stores { assert_eq!(1, db.dirty_stores.len()); let dirty_store = db.dirty_stores.get(&slot).unwrap(); - assert_eq!(dirty_store.append_vec_id(), old_id); + assert_eq!(dirty_store.id(), old_id); } else { assert!(db.dirty_stores.is_empty()); } @@ -16069,17 +15155,17 @@ pub mod tests { let db = AccountsDb::new_single_for_tests(); let size = 1; let old_store = db.create_and_insert_store(slot, size, "test"); - let old_id = old_store.append_vec_id(); + let old_id = old_store.id(); let shrink_in_progress = db.get_store_for_shrink(slot, 100); let dead_storages = db.mark_dirty_dead_stores(slot, add_dirty_stores, Some(shrink_in_progress), false); assert!(db.storage.get_slot_storage_entry(slot).is_some()); assert_eq!(dead_storages.len(), 1); - assert_eq!(dead_storages.first().unwrap().append_vec_id(), old_id); + assert_eq!(dead_storages.first().unwrap().id(), old_id); if add_dirty_stores { assert_eq!(1, db.dirty_stores.len()); let dirty_store = db.dirty_stores.get(&slot).unwrap(); - assert_eq!(dirty_store.append_vec_id(), old_id); + assert_eq!(dirty_store.id(), old_id); } else { assert!(db.dirty_stores.is_empty()); } @@ -16468,16 +15554,14 @@ pub mod tests { #[test] fn test_hash_storage_info() { { - let hasher = hash_map::DefaultHasher::new(); + let hasher = DefaultHasher::new(); let hash = hasher.finish(); assert_eq!(15130871412783076140, hash); } { - let mut hasher = hash_map::DefaultHasher::new(); + let mut hasher = DefaultHasher::new(); let slot: Slot = 0; - let tf = crate::append_vec::test_utils::get_append_vec_path( - "test_accountsdb_scan_account_storage_no_bank", - ); + let tf = crate::append_vec::test_utils::get_append_vec_path("test_hash_storage_info"); let pubkey1 = solana_sdk::pubkey::new_rand(); let mark_alive = false; let storage = sample_storage_with_entries(&tf, slot, &pubkey1, mark_alive); @@ -16487,20 +15571,20 @@ pub mod tests { // can't assert hash here - it is a function of mod date assert!(load); let slot = 2; // changed this - let mut hasher = hash_map::DefaultHasher::new(); + let mut hasher = DefaultHasher::new(); let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot); let hash2 = hasher.finish(); assert_ne!(hash, hash2); // slot changed, these should be different // can't assert hash here - it is a function of mod date assert!(load); - let mut hasher = hash_map::DefaultHasher::new(); + let mut hasher = DefaultHasher::new(); append_sample_data_to_storage(&storage, &solana_sdk::pubkey::new_rand(), false, None); let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot); let hash3 = hasher.finish(); assert_ne!(hash2, hash3); // moddate and written size changed // can't assert hash here - it is a function of mod date assert!(load); - let mut hasher = hash_map::DefaultHasher::new(); + let mut hasher = DefaultHasher::new(); let load = AccountsDb::hash_storage_info(&mut hasher, &storage, slot); let hash4 = hasher.finish(); assert_eq!(hash4, hash3); // same @@ -16674,15 +15758,12 @@ pub mod tests { let append_vec = db.create_and_insert_store(slot, size, "test"); let mut current_ancient = CurrentAncientAccountsFile::new(slot, append_vec.clone()); assert_eq!(current_ancient.slot(), slot); - assert_eq!(current_ancient.append_vec_id(), append_vec.append_vec_id()); - assert_eq!( - current_ancient.accounts_file().append_vec_id(), - append_vec.append_vec_id() - ); + assert_eq!(current_ancient.id(), append_vec.id()); + assert_eq!(current_ancient.accounts_file().id(), append_vec.id()); let _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db, 0); assert_eq!(current_ancient.slot(), slot); - assert_eq!(current_ancient.append_vec_id(), append_vec.append_vec_id()); + assert_eq!(current_ancient.id(), append_vec.id()); } { @@ -16693,14 +15774,14 @@ pub mod tests { let mut current_ancient = CurrentAncientAccountsFile::default(); let mut _shrink_in_progress = current_ancient.create_if_necessary(slot2, &db, 0); - let id = current_ancient.append_vec_id(); + let id = current_ancient.id(); assert_eq!(current_ancient.slot(), slot2); assert!(is_ancient(¤t_ancient.accounts_file().accounts)); let slot3 = 3; // should do nothing let _shrink_in_progress = current_ancient.create_if_necessary(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot2); - assert_eq!(current_ancient.append_vec_id(), id); + assert_eq!(current_ancient.id(), id); assert!(is_ancient(¤t_ancient.accounts_file().accounts)); } @@ -16715,7 +15796,7 @@ pub mod tests { let _shrink_in_progress = current_ancient.create_ancient_accounts_file(slot2, &db, 0); } - let id = current_ancient.append_vec_id(); + let id = current_ancient.id(); assert_eq!(current_ancient.slot(), slot2); assert!(is_ancient(¤t_ancient.accounts_file().accounts)); @@ -16726,7 +15807,7 @@ pub mod tests { current_ancient.create_ancient_accounts_file(slot3, &db, 0); assert_eq!(current_ancient.slot(), slot3); assert!(is_ancient(¤t_ancient.accounts_file().accounts)); - assert_ne!(current_ancient.append_vec_id(), id); + assert_ne!(current_ancient.id(), id); } } @@ -17498,7 +16579,7 @@ pub mod tests { if let Some(storage) = db.get_storage_for_slot(slot) { storage.accounts.scan_accounts(|account| { let info = AccountInfo::new( - StorageLocation::AppendVec(storage.append_vec_id(), account.offset()), + StorageLocation::AppendVec(storage.id(), account.offset()), account.lamports(), ); db.accounts_index.upsert( @@ -17544,7 +16625,7 @@ pub mod tests { let starting_id = db .storage .iter() - .map(|storage| storage.1.append_vec_id()) + .map(|storage| storage.1.id()) .max() .unwrap_or(999); for (i, account_data_size) in account_data_sizes.iter().enumerate().take(num_slots) { @@ -17591,7 +16672,7 @@ pub mod tests { let starting_id = db .storage .iter() - .map(|storage| storage.1.append_vec_id()) + .map(|storage| storage.1.id()) .max() .unwrap_or(999); for i in 0..num_slots { @@ -17730,7 +16811,7 @@ pub mod tests { fn test_handle_dropped_roots_for_ancient_assert() { solana_logger::setup(); let common_store_path = Path::new(""); - let store_file_size = 2 * PAGE_SIZE; + let store_file_size = 10_000; let entry = Arc::new(AccountStorageEntry::new( common_store_path, 0, @@ -17777,7 +16858,7 @@ pub mod tests { // should have kept the same 'current_ancient' assert_eq!(current_ancient.slot(), slot5); assert_eq!(current_ancient.accounts_file().slot(), slot5); - assert_eq!(current_ancient.append_vec_id(), storage.append_vec_id()); + assert_eq!(current_ancient.id(), storage.id()); // slot is not ancient, so it is good to move assert!(should_move); @@ -17798,7 +16879,7 @@ pub mod tests { CAN_RANDOMLY_SHRINK_FALSE, ); assert!(!should_move); - assert_eq!(current_ancient.append_vec_id(), ancient1.append_vec_id()); + assert_eq!(current_ancient.id(), ancient1.id()); assert_eq!(current_ancient.slot(), slot1_ancient); // current is ancient1 @@ -17819,7 +16900,7 @@ pub mod tests { CAN_RANDOMLY_SHRINK_FALSE, ); assert!(!should_move); - assert_eq!(current_ancient.append_vec_id(), ancient2.append_vec_id()); + assert_eq!(current_ancient.id(), ancient2.id()); assert_eq!(current_ancient.slot(), slot2_ancient); // now try a full ancient append vec @@ -17836,10 +16917,7 @@ pub mod tests { CAN_RANDOMLY_SHRINK_FALSE, ); assert!(!should_move); - assert_eq!( - current_ancient.append_vec_id(), - full_ancient_3.new_storage().append_vec_id() - ); + assert_eq!(current_ancient.id(), full_ancient_3.new_storage().id()); assert_eq!(current_ancient.slot(), slot3_full_ancient); // now set current_ancient to something @@ -17851,10 +16929,7 @@ pub mod tests { CAN_RANDOMLY_SHRINK_FALSE, ); assert!(!should_move); - assert_eq!( - current_ancient.append_vec_id(), - full_ancient_3.new_storage().append_vec_id() - ); + assert_eq!(current_ancient.id(), full_ancient_3.new_storage().id()); assert_eq!(current_ancient.slot(), slot3_full_ancient); // now mark the full ancient as candidate for shrink @@ -17881,7 +16956,7 @@ pub mod tests { CAN_RANDOMLY_SHRINK_FALSE, ); assert!(should_move); - assert_eq!(current_ancient.append_vec_id(), ancient1.append_vec_id()); + assert_eq!(current_ancient.id(), ancient1.id()); assert_eq!(current_ancient.slot(), slot1_ancient); } @@ -17958,7 +17033,7 @@ pub mod tests { // calculate the full accounts hash let full_accounts_hash = { - accounts_db.clean_accounts(Some(slot - 1), false, None, &EpochSchedule::default()); + accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default()); let (storages, _) = accounts_db.get_snapshot_storages(..=slot); let storages = SortedStorages::new(&storages); accounts_db.calculate_accounts_hash( @@ -18023,12 +17098,8 @@ pub mod tests { // calculate the incremental accounts hash let incremental_accounts_hash = { - accounts_db.clean_accounts( - Some(slot - 1), - false, - Some(full_accounts_hash_slot), - &EpochSchedule::default(), - ); + accounts_db.set_latest_full_snapshot_slot(full_accounts_hash_slot); + accounts_db.clean_accounts(Some(slot - 1), false, &EpochSchedule::default()); let (storages, _) = accounts_db.get_snapshot_storages(full_accounts_hash_slot + 1..=slot); let storages = SortedStorages::new(&storages); diff --git a/accounts-db/src/accounts_db/scan_account_storage.rs b/accounts-db/src/accounts_db/scan_account_storage.rs new file mode 100644 index 00000000000000..5a9f0739cb0ada --- /dev/null +++ b/accounts-db/src/accounts_db/scan_account_storage.rs @@ -0,0 +1,1015 @@ +use { + super::{AccountStorageEntry, AccountsDb, BinnedHashData, LoadedAccount, SplitAncientStorages}, + crate::{ + accounts_hash::{ + AccountHash, CalcAccountsHashConfig, CalculateHashIntermediate, HashStats, + }, + active_stats::ActiveStatItem, + cache_hash_data::{CacheHashData, CacheHashDataFileReference}, + pubkey_bins::PubkeyBinCalculator24, + sorted_storages::SortedStorages, + }, + rayon::prelude::*, + solana_measure::{measure::Measure, measure_us}, + solana_sdk::{account::ReadableAccount as _, clock::Slot, hash::Hash, pubkey::Pubkey}, + std::{ + hash::{DefaultHasher, Hash as _, Hasher as _}, + ops::Range, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + }, +}; + +/// called on a struct while scanning append vecs +trait AppendVecScan: Send + Sync + Clone { + /// return true if this pubkey should be included + fn filter(&mut self, pubkey: &Pubkey) -> bool; + /// set current slot of the scan + fn set_slot(&mut self, slot: Slot, is_ancient: bool); + /// found `account` in the append vec + fn found_account(&mut self, account: &LoadedAccount); + /// scanning is done + fn scanning_complete(self) -> BinnedHashData; + /// initialize accumulator + fn init_accum(&mut self, count: usize); +} + +#[derive(Clone)] +/// state to keep while scanning append vec accounts for hash calculation +/// These would have been captured in a fn from within the scan function. +/// Some of these are constant across all pubkeys, some are constant across a slot. +/// Some could be unique per pubkey. +struct ScanState<'a> { + /// slot we're currently scanning + current_slot: Slot, + /// accumulated results + accum: BinnedHashData, + bin_calculator: &'a PubkeyBinCalculator24, + bin_range: &'a Range, + range: usize, + sort_time: Arc, + pubkey_to_bin_index: usize, + is_ancient: bool, + stats_num_zero_lamport_accounts_ancient: Arc, +} + +impl<'a> AppendVecScan for ScanState<'a> { + fn set_slot(&mut self, slot: Slot, is_ancient: bool) { + self.current_slot = slot; + self.is_ancient = is_ancient; + } + fn filter(&mut self, pubkey: &Pubkey) -> bool { + self.pubkey_to_bin_index = self.bin_calculator.bin_from_pubkey(pubkey); + self.bin_range.contains(&self.pubkey_to_bin_index) + } + fn init_accum(&mut self, count: usize) { + if self.accum.is_empty() { + self.accum.append(&mut vec![Vec::new(); count]); + } + } + fn found_account(&mut self, loaded_account: &LoadedAccount) { + let pubkey = loaded_account.pubkey(); + assert!(self.bin_range.contains(&self.pubkey_to_bin_index)); // get rid of this once we have confidence + + // when we are scanning with bin ranges, we don't need to use exact bin numbers. + // Subtract to make first bin we care about at index 0. + self.pubkey_to_bin_index -= self.bin_range.start; + + let balance = loaded_account.lamports(); + let mut account_hash = loaded_account.loaded_hash(); + + let hash_is_missing = account_hash == AccountHash(Hash::default()); + if hash_is_missing { + let computed_hash = AccountsDb::hash_account(loaded_account, loaded_account.pubkey()); + account_hash = computed_hash; + } + + if balance == 0 && self.is_ancient { + self.stats_num_zero_lamport_accounts_ancient + .fetch_add(1, Ordering::Relaxed); + } + + let source_item = CalculateHashIntermediate { + hash: account_hash, + lamports: balance, + pubkey: *pubkey, + }; + self.init_accum(self.range); + self.accum[self.pubkey_to_bin_index].push(source_item); + } + fn scanning_complete(mut self) -> BinnedHashData { + let timing = AccountsDb::sort_slot_storage_scan(&mut self.accum); + self.sort_time.fetch_add(timing, Ordering::Relaxed); + self.accum + } +} + +enum ScanAccountStorageResult { + /// this data has already been scanned and cached + CacheFileAlreadyExists(CacheHashDataFileReference), + /// this data needs to be scanned and cached + CacheFileNeedsToBeCreated((String, Range)), +} + +impl AccountsDb { + /// scan 'storages', return a vec of 'CacheHashDataFileReference', one per pass + pub(crate) fn scan_snapshot_stores_with_cache( + &self, + cache_hash_data: &CacheHashData, + storages: &SortedStorages, + stats: &mut HashStats, + bins: usize, + bin_range: &Range, + config: &CalcAccountsHashConfig<'_>, + ) -> Vec { + assert!(bin_range.start < bins); + assert!(bin_range.end <= bins); + assert!(bin_range.start < bin_range.end); + let _guard = self.active_stats.activate(ActiveStatItem::HashScan); + + let bin_calculator = PubkeyBinCalculator24::new(bins); + let mut time = Measure::start("scan all accounts"); + stats.num_snapshot_storage = storages.storage_count(); + stats.num_slots = storages.slot_count(); + let range = bin_range.end - bin_range.start; + let sort_time = Arc::new(AtomicU64::new(0)); + + let scanner = ScanState { + current_slot: Slot::default(), + accum: BinnedHashData::default(), + bin_calculator: &bin_calculator, + range, + bin_range, + sort_time: sort_time.clone(), + pubkey_to_bin_index: 0, + is_ancient: false, + stats_num_zero_lamport_accounts_ancient: Arc::clone( + &stats.num_zero_lamport_accounts_ancient, + ), + }; + + let result = self.scan_account_storage_no_bank( + cache_hash_data, + config, + storages, + scanner, + bin_range, + stats, + ); + + stats.sort_time_total_us += sort_time.load(Ordering::Relaxed); + + time.stop(); + stats.scan_time_total_us += time.as_us(); + + result + } + + /// Scan through all the account storage in parallel. + /// Returns a Vec of opened files. + /// Each file has serialized hash info, sorted by pubkey and then slot, from scanning the append vecs. + /// A single pubkey could be in multiple entries. The pubkey found in the latest entry is the one to use. + fn scan_account_storage_no_bank( + &self, + cache_hash_data: &CacheHashData, + config: &CalcAccountsHashConfig<'_>, + snapshot_storages: &SortedStorages, + scanner: S, + bin_range: &Range, + stats: &mut HashStats, + ) -> Vec + where + S: AppendVecScan, + { + let oldest_non_ancient_slot = self.get_oldest_non_ancient_slot_for_hash_calc_scan( + snapshot_storages.max_slot_inclusive(), + config, + ); + let splitter = SplitAncientStorages::new(oldest_non_ancient_slot, snapshot_storages); + + let slots_per_epoch = config + .rent_collector + .epoch_schedule + .get_slots_in_epoch(config.rent_collector.epoch); + let one_epoch_old = snapshot_storages + .range() + .end + .saturating_sub(slots_per_epoch); + + stats.scan_chunks = splitter.chunk_count; + + let cache_files = (0..splitter.chunk_count) + .into_par_iter() + .filter_map(|chunk| { + let range_this_chunk = splitter.get_slot_range(chunk)?; + + let mut load_from_cache = true; + let mut hasher = DefaultHasher::new(); + bin_range.start.hash(&mut hasher); + bin_range.end.hash(&mut hasher); + let is_first_scan_pass = bin_range.start == 0; + + // calculate hash representing all storages in this chunk + let mut empty = true; + for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { + empty = false; + if is_first_scan_pass && slot < one_epoch_old { + self.update_old_slot_stats(stats, storage); + } + if let Some(storage) = storage { + let ok = Self::hash_storage_info(&mut hasher, storage, slot); + if !ok { + load_from_cache = false; + break; + } + } + } + if empty { + return None; + } + // we have a hash value for the storages in this chunk + // so, build a file name: + let hash = hasher.finish(); + let file_name = format!( + "{}.{}.{}.{}.{:016x}", + range_this_chunk.start, + range_this_chunk.end, + bin_range.start, + bin_range.end, + hash + ); + if load_from_cache { + if let Ok(mapped_file) = + cache_hash_data.get_file_reference_to_map_later(&file_name) + { + return Some(ScanAccountStorageResult::CacheFileAlreadyExists( + mapped_file, + )); + } + } + + // fall through and load normally - we failed to load from a cache file but there are storages present + Some(ScanAccountStorageResult::CacheFileNeedsToBeCreated(( + file_name, + range_this_chunk, + ))) + }) + .collect::>(); + + // Calculate the hits and misses of the hash data files cache. + // This is outside of the parallel loop above so that we only need to + // update each atomic stat value once. + // There are approximately 173 items in the cache files list, + // so should be very fast to iterate and compute. + // (173 cache files == 432,000 slots / 2,500 slots-per-cache-file) + let mut hits = 0; + let mut misses = 0; + for cache_file in &cache_files { + match cache_file { + ScanAccountStorageResult::CacheFileAlreadyExists(_) => hits += 1, + ScanAccountStorageResult::CacheFileNeedsToBeCreated(_) => misses += 1, + }; + } + cache_hash_data + .stats + .hits + .fetch_add(hits, Ordering::Relaxed); + cache_hash_data + .stats + .misses + .fetch_add(misses, Ordering::Relaxed); + + // deletes the old files that will not be used before creating new ones + cache_hash_data.delete_old_cache_files(); + + cache_files + .into_par_iter() + .map(|chunk| { + match chunk { + ScanAccountStorageResult::CacheFileAlreadyExists(file) => Some(file), + ScanAccountStorageResult::CacheFileNeedsToBeCreated(( + file_name, + range_this_chunk, + )) => { + let mut scanner = scanner.clone(); + let mut init_accum = true; + // load from cache failed, so create the cache file for this chunk + for (slot, storage) in snapshot_storages.iter_range(&range_this_chunk) { + let ancient = + oldest_non_ancient_slot.is_some_and(|oldest_non_ancient_slot| { + slot < oldest_non_ancient_slot + }); + + let (_, scan_us) = measure_us!(if let Some(storage) = storage { + if init_accum { + let range = bin_range.end - bin_range.start; + scanner.init_accum(range); + init_accum = false; + } + scanner.set_slot(slot, ancient); + + Self::scan_single_account_storage(storage, &mut scanner); + }); + if ancient { + stats + .sum_ancient_scans_us + .fetch_add(scan_us, Ordering::Relaxed); + stats.count_ancient_scans.fetch_add(1, Ordering::Relaxed); + stats + .longest_ancient_scan_us + .fetch_max(scan_us, Ordering::Relaxed); + } + } + (!init_accum) + .then(|| { + let r = scanner.scanning_complete(); + assert!(!file_name.is_empty()); + (!r.is_empty() && r.iter().any(|b| !b.is_empty())).then(|| { + // error if we can't write this + cache_hash_data.save(&file_name, &r).unwrap(); + cache_hash_data + .get_file_reference_to_map_later(&file_name) + .unwrap() + }) + }) + .flatten() + } + } + }) + .filter_map(|x| x) + .collect() + } + + /// iterate over a single storage, calling scanner on each item + fn scan_single_account_storage(storage: &AccountStorageEntry, scanner: &mut S) + where + S: AppendVecScan, + { + storage.accounts.scan_accounts(|account| { + if scanner.filter(account.pubkey()) { + scanner.found_account(&LoadedAccount::Stored(account)) + } + }); + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + accounts_db::{ + get_temp_accounts_paths, + tests::{ + append_single_account_with_default_hash, define_accounts_db_test, + get_storage_refs, sample_storage_with_entries, + sample_storages_and_account_in_slot, sample_storages_and_accounts, + }, + MAX_ITEMS_PER_CHUNK, + }, + accounts_file::{AccountsFile, AccountsFileProvider}, + append_vec::AppendVec, + cache_hash_data::{CacheHashDataFile, DeletionPolicy as CacheHashDeletionPolicy}, + }, + solana_sdk::account::AccountSharedData, + tempfile::TempDir, + test_case::test_case, + }; + + impl AccountsDb { + fn scan_snapshot_stores( + &self, + storage: &SortedStorages, + stats: &mut crate::accounts_hash::HashStats, + bins: usize, + bin_range: &Range, + ) -> Vec { + let temp_dir = TempDir::new().unwrap(); + let accounts_hash_cache_path = temp_dir.path().to_path_buf(); + self.scan_snapshot_stores_with_cache( + &CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused), + storage, + stats, + bins, + bin_range, + &CalcAccountsHashConfig::default(), + ) + .iter() + .map(CacheHashDataFileReference::map) + .collect::, _>>() + .unwrap() + } + } + + #[derive(Clone)] + struct TestScan { + calls: Arc, + pubkey: Pubkey, + slot_expected: Slot, + accum: BinnedHashData, + current_slot: Slot, + value_to_use_for_lamports: u64, + } + + impl AppendVecScan for TestScan { + fn filter(&mut self, _pubkey: &Pubkey) -> bool { + true + } + fn set_slot(&mut self, slot: Slot, _is_ancient: bool) { + self.current_slot = slot; + } + fn init_accum(&mut self, _count: usize) {} + fn found_account(&mut self, loaded_account: &LoadedAccount) { + self.calls.fetch_add(1, Ordering::Relaxed); + assert_eq!(loaded_account.pubkey(), &self.pubkey); + assert_eq!(self.slot_expected, self.current_slot); + self.accum.push(vec![CalculateHashIntermediate { + hash: AccountHash(Hash::default()), + lamports: self.value_to_use_for_lamports, + pubkey: self.pubkey, + }]); + } + fn scanning_complete(self) -> BinnedHashData { + self.accum + } + } + + #[derive(Clone)] + struct TestScanSimple { + current_slot: Slot, + slot_expected: Slot, + calls: Arc, + accum: BinnedHashData, + pubkey1: Pubkey, + pubkey2: Pubkey, + } + + impl AppendVecScan for TestScanSimple { + fn set_slot(&mut self, slot: Slot, _is_ancient: bool) { + self.current_slot = slot; + } + fn filter(&mut self, _pubkey: &Pubkey) -> bool { + true + } + fn init_accum(&mut self, _count: usize) {} + fn found_account(&mut self, loaded_account: &LoadedAccount) { + self.calls.fetch_add(1, Ordering::Relaxed); + let first = loaded_account.pubkey() == &self.pubkey1; + assert!(first || loaded_account.pubkey() == &self.pubkey2); + assert_eq!(self.slot_expected, self.current_slot); + if first { + assert!(self.accum.is_empty()); + } else { + assert_eq!(self.accum.len(), 1); + } + self.accum.push(vec![CalculateHashIntermediate { + hash: AccountHash(Hash::default()), + lamports: loaded_account.lamports(), + pubkey: Pubkey::default(), + }]); + } + fn scanning_complete(self) -> BinnedHashData { + self.accum + } + } + + #[test] + #[should_panic(expected = "bin_range.start < bins")] + fn test_accountsdb_scan_snapshot_stores_illegal_range_start() { + let mut stats = HashStats::default(); + let bounds = Range { start: 2, end: 2 }; + let accounts_db = AccountsDb::new_single_for_tests(); + + accounts_db.scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds); + } + #[test] + #[should_panic(expected = "bin_range.end <= bins")] + fn test_accountsdb_scan_snapshot_stores_illegal_range_end() { + let mut stats = HashStats::default(); + let bounds = Range { start: 1, end: 3 }; + + let accounts_db = AccountsDb::new_single_for_tests(); + accounts_db.scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds); + } + + #[test] + #[should_panic(expected = "bin_range.start < bin_range.end")] + fn test_accountsdb_scan_snapshot_stores_illegal_range_inverse() { + let mut stats = HashStats::default(); + let bounds = Range { start: 1, end: 0 }; + + let accounts_db = AccountsDb::new_single_for_tests(); + accounts_db.scan_snapshot_stores(&empty_storages(), &mut stats, 2, &bounds); + } + + #[test_case(AccountsFileProvider::AppendVec)] + #[test_case(AccountsFileProvider::HotStorage)] + fn test_accountsdb_scan_account_storage_no_bank(accounts_file_provider: AccountsFileProvider) { + solana_logger::setup(); + + let expected = 1; + let tf = crate::append_vec::test_utils::get_append_vec_path( + "test_accountsdb_scan_account_storage_no_bank", + ); + let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); + let slot_expected: Slot = 0; + let size: usize = 123; + let mut data = AccountStorageEntry::new( + &paths[0], + slot_expected, + 0, + size as u64, + accounts_file_provider, + ); + let av = AccountsFile::AppendVec(AppendVec::new(&tf.path, true, 1024 * 1024)); + data.accounts = av; + + let storage = Arc::new(data); + let pubkey = solana_sdk::pubkey::new_rand(); + let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner()); + let mark_alive = false; + append_single_account_with_default_hash(&storage, &pubkey, &acc, mark_alive, None); + + let calls = Arc::new(AtomicU64::new(0)); + let temp_dir = TempDir::new().unwrap(); + let accounts_hash_cache_path = temp_dir.path().to_path_buf(); + let accounts_db = AccountsDb::new_single_for_tests(); + + let test_scan = TestScan { + calls: calls.clone(), + pubkey, + slot_expected, + accum: Vec::default(), + current_slot: 0, + value_to_use_for_lamports: expected, + }; + + let result = accounts_db.scan_account_storage_no_bank( + &CacheHashData::new(accounts_hash_cache_path, CacheHashDeletionPolicy::AllUnused), + &CalcAccountsHashConfig::default(), + &get_storage_refs(&[storage]), + test_scan, + &Range { start: 0, end: 1 }, + &mut HashStats::default(), + ); + let result2 = result + .iter() + .map(|file| file.map().unwrap()) + .collect::>(); + assert_eq!(calls.load(Ordering::Relaxed), 1); + assert_scan( + result2, + vec![vec![vec![CalculateHashIntermediate { + hash: AccountHash(Hash::default()), + lamports: expected, + pubkey, + }]]], + 1, + 0, + 1, + ); + } + + #[test] + fn test_accountsdb_scan_multiple_account_storage_no_bank_one_slot() { + solana_logger::setup(); + + let slot_expected: Slot = 0; + let tf = crate::append_vec::test_utils::get_append_vec_path( + "test_accountsdb_scan_account_storage_no_bank", + ); + let pubkey1 = solana_sdk::pubkey::new_rand(); + let pubkey2 = solana_sdk::pubkey::new_rand(); + let mark_alive = false; + let storage = sample_storage_with_entries(&tf, slot_expected, &pubkey1, mark_alive); + let lamports = storage + .accounts + .get_account_shared_data(0) + .unwrap() + .lamports(); + let calls = Arc::new(AtomicU64::new(0)); + let mut scanner = TestScanSimple { + current_slot: 0, + slot_expected, + pubkey1, + pubkey2, + accum: Vec::default(), + calls: calls.clone(), + }; + AccountsDb::scan_single_account_storage(&storage, &mut scanner); + let accum = scanner.scanning_complete(); + assert_eq!(calls.load(Ordering::Relaxed), 1); + assert_eq!( + accum + .iter() + .flatten() + .map(|a| a.lamports) + .collect::>(), + vec![lamports] + ); + } + + define_accounts_db_test!( + test_accountsdb_scan_account_storage_no_bank_one_slot, + |db| { + solana_logger::setup(); + let accounts_file_provider = db.accounts_file_provider; + + let expected = 1; + let (_temp_dirs, paths) = get_temp_accounts_paths(1).unwrap(); + let slot_expected: Slot = 0; + let data = AccountStorageEntry::new( + &paths[0], + slot_expected, + 0, + 1024 * 1024, + accounts_file_provider, + ); + let storage = Arc::new(data); + let pubkey = solana_sdk::pubkey::new_rand(); + let acc = AccountSharedData::new(1, 48, AccountSharedData::default().owner()); + let mark_alive = false; + append_single_account_with_default_hash(&storage, &pubkey, &acc, mark_alive, None); + + let calls = Arc::new(AtomicU64::new(0)); + + let mut test_scan = TestScan { + calls: calls.clone(), + pubkey, + slot_expected, + accum: Vec::default(), + current_slot: 0, + value_to_use_for_lamports: expected, + }; + + AccountsDb::scan_single_account_storage(&storage, &mut test_scan); + let accum = test_scan.scanning_complete(); + assert_eq!(calls.load(Ordering::Relaxed), 1); + assert_eq!( + accum + .iter() + .flatten() + .map(|a| a.lamports) + .collect::>(), + vec![expected] + ); + } + ); + + define_accounts_db_test!( + test_accountsdb_scan_snapshot_stores_hash_not_stored, + |accounts_db| { + let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); + storages.iter().for_each(|storage| { + accounts_db.storage.remove(&storage.slot(), false); + }); + + // replace the sample storages, storing default hash values so that we rehash during scan + let storages = storages + .iter() + .map(|storage| { + let slot = storage.slot(); + let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); + let mut all_accounts = Vec::default(); + storage.accounts.scan_accounts(|acct| { + all_accounts.push((*acct.pubkey(), acct.to_account_shared_data())); + }); + let accounts = all_accounts + .iter() + .map(|stored| (&stored.0, &stored.1)) + .collect::>(); + let slice = &accounts[..]; + let storable_accounts = (slot, slice); + copied_storage + .accounts + .append_accounts(&storable_accounts, 0); + copied_storage + }) + .collect::>(); + + assert_test_scan(accounts_db, storages, raw_expected); + } + ); + + #[test] + fn test_accountsdb_scan_snapshot_stores_check_hash() { + solana_logger::setup(); + let accounts_db = AccountsDb::new_single_for_tests(); + let (storages, _raw_expected) = sample_storages_and_accounts(&accounts_db); + let max_slot = storages.iter().map(|storage| storage.slot()).max().unwrap(); + + // replace the sample storages, storing default hash values so that we rehash during scan + let storages = storages + .iter() + .map(|storage| { + let slot = storage.slot() + max_slot; + let copied_storage = accounts_db.create_and_insert_store(slot, 10000, "test"); + let mut all_accounts = Vec::default(); + storage.accounts.scan_accounts(|acct| { + all_accounts.push((*acct.pubkey(), acct.to_account_shared_data())); + }); + let accounts = all_accounts + .iter() + .map(|stored| (&stored.0, &stored.1)) + .collect::>(); + let slice = &accounts[..]; + let storable_accounts = (slot, slice); + copied_storage + .accounts + .append_accounts(&storable_accounts, 0); + copied_storage + }) + .collect::>(); + + let bins = 1; + let mut stats = HashStats::default(); + + accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: 0, + end: bins, + }, + ); + } + + define_accounts_db_test!(test_accountsdb_scan_snapshot_stores, |accounts_db| { + let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); + + assert_test_scan(accounts_db, storages, raw_expected); + }); + + define_accounts_db_test!( + test_accountsdb_scan_snapshot_stores_2nd_chunk, + |accounts_db| { + // enough stores to get to 2nd chunk + let bins = 1; + let slot = MAX_ITEMS_PER_CHUNK as Slot; + let (storages, raw_expected) = sample_storages_and_account_in_slot(slot, &accounts_db); + let storage_data = [(&storages[0], slot)]; + + let sorted_storages = + SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1); + + let mut stats = HashStats::default(); + let result = accounts_db.scan_snapshot_stores( + &sorted_storages, + &mut stats, + bins, + &Range { + start: 0, + end: bins, + }, + ); + + assert_scan(result, vec![vec![raw_expected]], bins, 0, bins); + } + ); + + define_accounts_db_test!( + test_accountsdb_scan_snapshot_stores_binning, + |accounts_db| { + let mut stats = HashStats::default(); + let (storages, raw_expected) = sample_storages_and_accounts(&accounts_db); + + // just the first bin of 2 + let bins = 2; + let half_bins = bins / 2; + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: 0, + end: half_bins, + }, + ); + let mut expected = vec![Vec::new(); half_bins]; + expected[0].push(raw_expected[0]); + expected[0].push(raw_expected[1]); + assert_scan(result, vec![expected], bins, 0, half_bins); + + // just the second bin of 2 + let accounts_db = AccountsDb::new_single_for_tests(); + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: 1, + end: bins, + }, + ); + + let mut expected = vec![Vec::new(); half_bins]; + let starting_bin_index = 0; + expected[starting_bin_index].push(raw_expected[2]); + expected[starting_bin_index].push(raw_expected[3]); + assert_scan(result, vec![expected], bins, 1, bins - 1); + + // 1 bin at a time of 4 + let bins = 4; + let accounts_db = AccountsDb::new_single_for_tests(); + + for (bin, expected_item) in raw_expected.iter().enumerate().take(bins) { + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: bin, + end: bin + 1, + }, + ); + let mut expected = vec![Vec::new(); 1]; + expected[0].push(*expected_item); + assert_scan(result, vec![expected], bins, bin, 1); + } + + let bins = 256; + let bin_locations = [0, 127, 128, 255]; + let range = 1; + for bin in 0..bins { + let accounts_db = AccountsDb::new_single_for_tests(); + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: bin, + end: bin + range, + }, + ); + let mut expected = vec![]; + if let Some(index) = bin_locations.iter().position(|&r| r == bin) { + expected = vec![Vec::new(); range]; + expected[0].push(raw_expected[index]); + } + let mut result2 = (0..range).map(|_| Vec::default()).collect::>(); + if let Some(m) = result.first() { + m.load_all(&mut result2, bin, &PubkeyBinCalculator24::new(bins)); + } else { + result2 = vec![]; + } + + assert_eq!(result2, expected); + } + } + ); + + define_accounts_db_test!( + test_accountsdb_scan_snapshot_stores_binning_2nd_chunk, + |accounts_db| { + // enough stores to get to 2nd chunk + // range is for only 1 bin out of 256. + let bins = 256; + let slot = MAX_ITEMS_PER_CHUNK as Slot; + let (storages, raw_expected) = sample_storages_and_account_in_slot(slot, &accounts_db); + let storage_data = [(&storages[0], slot)]; + + let sorted_storages = + SortedStorages::new_debug(&storage_data[..], 0, MAX_ITEMS_PER_CHUNK as usize + 1); + + let mut stats = HashStats::default(); + let range = 1; + let start = 127; + let result = accounts_db.scan_snapshot_stores( + &sorted_storages, + &mut stats, + bins, + &Range { + start, + end: start + range, + }, + ); + assert_eq!(result.len(), 1); // 2 chunks, but 1 is empty so not included + let mut expected = vec![Vec::new(); range]; + expected[0].push(raw_expected[1]); + let mut result2 = (0..range).map(|_| Vec::default()).collect::>(); + result[0].load_all(&mut result2, 0, &PubkeyBinCalculator24::new(range)); + assert_eq!(result2.len(), 1); + assert_eq!(result2, expected); + } + ); + + fn assert_test_scan( + accounts_db: AccountsDb, + storages: Vec>, + raw_expected: Vec, + ) { + let bins = 1; + let mut stats = HashStats::default(); + + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: 0, + end: bins, + }, + ); + assert_scan(result, vec![vec![raw_expected.clone()]], bins, 0, bins); + + let bins = 2; + let accounts_db = AccountsDb::new_single_for_tests(); + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: 0, + end: bins, + }, + ); + let mut expected = vec![Vec::new(); bins]; + expected[0].push(raw_expected[0]); + expected[0].push(raw_expected[1]); + expected[bins - 1].push(raw_expected[2]); + expected[bins - 1].push(raw_expected[3]); + assert_scan(result, vec![expected], bins, 0, bins); + + let bins = 4; + let accounts_db = AccountsDb::new_single_for_tests(); + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: 0, + end: bins, + }, + ); + let mut expected = vec![Vec::new(); bins]; + expected[0].push(raw_expected[0]); + expected[1].push(raw_expected[1]); + expected[2].push(raw_expected[2]); + expected[bins - 1].push(raw_expected[3]); + assert_scan(result, vec![expected], bins, 0, bins); + + let bins = 256; + let accounts_db = AccountsDb::new_single_for_tests(); + let result = accounts_db.scan_snapshot_stores( + &get_storage_refs(&storages), + &mut stats, + bins, + &Range { + start: 0, + end: bins, + }, + ); + let mut expected = vec![Vec::new(); bins]; + expected[0].push(raw_expected[0]); + expected[127].push(raw_expected[1]); + expected[128].push(raw_expected[2]); + expected[bins - 1].push(*raw_expected.last().unwrap()); + assert_scan(result, vec![expected], bins, 0, bins); + } + + /// helper to compare expected binned data with scan result in cache files + /// result: return from scanning + /// expected: binned data expected + /// bins: # bins total to divide pubkeys into + /// start_bin_index: bin # that was the minimum # we were scanning for 0<=start_bin_index, + expected: Vec, + bins: usize, + start_bin_index: usize, + bin_range: usize, + ) { + assert_eq!(expected.len(), result.len()); + + for cache_file in &result { + let mut result2 = (0..bin_range).map(|_| Vec::default()).collect::>(); + cache_file.load_all( + &mut result2, + start_bin_index, + &PubkeyBinCalculator24::new(bins), + ); + assert_eq!( + convert_to_slice(&[result2]), + expected, + "bins: {bins}, start_bin_index: {start_bin_index}" + ); + } + } + + fn empty_storages<'a>() -> SortedStorages<'a> { + SortedStorages::new(&[]) + } + + fn convert_to_slice( + input: &[Vec>], + ) -> Vec> { + input + .iter() + .map(|v| v.iter().map(|v| &v[..]).collect::>()) + .collect::>() + } +} diff --git a/accounts-db/src/accounts_hash.rs b/accounts-db/src/accounts_hash.rs index 7c7779f44581e5..4dddbded4d219e 100644 --- a/accounts-db/src/accounts_hash.rs +++ b/accounts-db/src/accounts_hash.rs @@ -9,9 +9,10 @@ use { log::*, memmap2::MmapMut, rayon::prelude::*, + solana_lattice_hash::lt_hash::LtHash, solana_measure::{measure::Measure, measure_us}, solana_sdk::{ - hash::{Hash, Hasher}, + hash::{Hash, Hasher, HASH_BYTES}, pubkey::Pubkey, rent_collector::RentCollector, slot_history::Slot, @@ -205,6 +206,8 @@ pub struct HashStats { pub sum_ancient_scans_us: AtomicU64, pub count_ancient_scans: AtomicU64, pub pubkey_bin_search_us: AtomicU64, + pub num_zero_lamport_accounts: AtomicU64, + pub num_zero_lamport_accounts_ancient: Arc, } impl HashStats { pub fn calc_storage_size_quartiles(&mut self, storages: &[Arc]) { @@ -306,6 +309,17 @@ impl HashStats { self.pubkey_bin_search_us.load(Ordering::Relaxed), i64 ), + ( + "num_zero_lamport_accounts", + self.num_zero_lamport_accounts.load(Ordering::Relaxed), + i64 + ), + ( + "num_zero_lamport_accounts_ancient", + self.num_zero_lamport_accounts_ancient + .load(Ordering::Relaxed), + i64 + ), ); } } @@ -1166,6 +1180,9 @@ impl<'a> AccountsHasher<'a> { .expect("summing lamports cannot overflow"); hashes.write(&item.hash.0); } else { + stats + .num_zero_lamport_accounts + .fetch_add(1, Ordering::Relaxed); // if lamports == 0, check if they should be included if self.zero_lamport_accounts == ZeroLamportAccounts::Included { // For incremental accounts hash, the hash of a zero lamport account is @@ -1236,6 +1253,18 @@ pub struct AccountHash(pub Hash); // This also ensures there are no padding bytes, which is required to safely implement Pod const _: () = assert!(std::mem::size_of::() == std::mem::size_of::()); +/// The AccountHash for a zero-lamport account +pub const ZERO_LAMPORT_ACCOUNT_HASH: AccountHash = + AccountHash(Hash::new_from_array([0; HASH_BYTES])); + +/// Lattice hash of an account +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct AccountLtHash(pub LtHash); + +/// The AccountLtHash for a zero-lamport account +pub const ZERO_LAMPORT_ACCOUNT_LT_HASH: AccountLtHash = + AccountLtHash(LtHash([0; LtHash::NUM_ELEMENTS])); + /// Hash of accounts #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum AccountsHashKind { diff --git a/accounts-db/src/accounts_index.rs b/accounts-db/src/accounts_index.rs index 08058934b1781f..dc10df29d67e35 100644 --- a/accounts-db/src/accounts_index.rs +++ b/accounts-db/src/accounts_index.rs @@ -35,7 +35,7 @@ use { path::PathBuf, sync::{ atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, - Arc, Mutex, OnceLock, RwLock, RwLockWriteGuard, + Arc, Mutex, OnceLock, RwLock, }, }, thiserror::Error, @@ -666,6 +666,8 @@ pub struct AccountsIndex + Into> { /// when a scan's accumulated data exceeds this limit, abort the scan pub scan_results_limit_bytes: Option, + pub purge_older_root_entries_one_slot_list: AtomicUsize, + /// # roots added since last check pub roots_added: AtomicUsize, /// # roots removed since last check @@ -690,6 +692,7 @@ impl + Into> AccountsIndex { .and_then(|config| config.scan_results_limit_bytes); let (account_maps, bin_calculator, storage) = Self::allocate_accounts_index(config, exit); Self { + purge_older_root_entries_one_slot_list: AtomicUsize::default(), account_maps, bin_calculator, program_id_index: SecondaryIndex::::new( @@ -1160,10 +1163,10 @@ impl + Into> AccountsIndex { fn slot_list_mut( &self, pubkey: &Pubkey, - user: impl for<'a> FnOnce(&mut RwLockWriteGuard<'a, SlotList>) -> RT, + user_fn: impl FnOnce(&mut SlotList) -> RT, ) -> Option { let read_lock = self.get_bin(pubkey); - read_lock.slot_list_mut(pubkey, user) + read_lock.slot_list_mut(pubkey, user_fn) } /// Remove keys from the account index if the key's slot list is empty. @@ -1817,6 +1820,10 @@ impl + Into> AccountsIndex { reclaims: &mut SlotList, max_clean_root_inclusive: Option, ) { + if slot_list.len() <= 1 { + self.purge_older_root_entries_one_slot_list + .fetch_add(1, Ordering::Relaxed); + } let newest_root_in_slot_list; let max_clean_root_inclusive = { let roots_tracker = &self.roots_tracker.read().unwrap(); @@ -1920,7 +1927,12 @@ impl + Into> AccountsIndex { self.roots_added.fetch_add(1, Ordering::Relaxed); let mut w_roots_tracker = self.roots_tracker.write().unwrap(); // `AccountsDb::flush_accounts_cache()` relies on roots being added in order - assert!(slot >= w_roots_tracker.alive_roots.max_inclusive()); + assert!( + slot >= w_roots_tracker.alive_roots.max_inclusive(), + "Roots must be added in order: {} < {}", + slot, + w_roots_tracker.alive_roots.max_inclusive() + ); // 'slot' is a root, so it is both 'root' and 'original' w_roots_tracker.alive_roots.insert(slot); } diff --git a/accounts-db/src/accounts_index/in_mem_accounts_index.rs b/accounts-db/src/accounts_index/in_mem_accounts_index.rs index f3318470effba3..c19e15d87b35d5 100644 --- a/accounts-db/src/accounts_index/in_mem_accounts_index.rs +++ b/accounts-db/src/accounts_index/in_mem_accounts_index.rs @@ -18,7 +18,7 @@ use { ops::{Bound, RangeBounds, RangeInclusive}, sync::{ atomic::{AtomicBool, AtomicU64, Ordering}, - Arc, Mutex, RwLock, RwLockWriteGuard, + Arc, Mutex, RwLock, }, }, }; @@ -469,16 +469,20 @@ impl + Into> InMemAccountsIndex( &self, pubkey: &Pubkey, - user: impl for<'a> FnOnce(&mut RwLockWriteGuard<'a, SlotList>) -> RT, + user_fn: impl FnOnce(&mut SlotList) -> RT, ) -> Option { self.get_internal_inner(pubkey, |entry| { ( true, entry.map(|entry| { - let result = user(&mut entry.slot_list.write().unwrap()); + let result = user_fn(&mut entry.slot_list.write().unwrap()); + // note that to be safe here, we ALWAYS mark the entry as dirty entry.set_dirty(true); result }), diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index f9486d56e35e61..998781d8c04850 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -168,8 +168,15 @@ impl AncientSlotInfos { // shrink enough slots to write 'percent_of_alive_shrunk_data'% of the total alive data // from slots that exceeded the shrink threshold. // The goal is to limit overall i/o in this pass while making progress. + // Simultaneously, we cannot allow the overall budget to be dominated by ancient storages that need to be shrunk. + // So, we have to limit how much of the total resulting budget can be allocated to re-packing/shrinking ancient storages. let threshold_bytes = - self.total_alive_bytes_shrink.0 * tuning.percent_of_alive_shrunk_data / 100; + (self.total_alive_bytes_shrink.0 * tuning.percent_of_alive_shrunk_data / 100).min( + u64::from(tuning.max_resulting_storages) + * u64::from(tuning.ideal_storage_size) + * tuning.percent_of_alive_shrunk_data + / 100, + ); for info_index in &self.shrink_indexes { let info = &mut self.all_infos[*info_index]; if bytes_to_shrink_due_to_ratio.0 >= threshold_bytes { @@ -437,6 +444,16 @@ impl AccountsDb { return; } + // for the accounts which are one ref and can be put anywhere, we want to put the accounts from the LARGEST storages at the end. + // This causes us to keep the accounts we're re-packing from already existing ancient storages together with other normal one ref accounts. + // The alternative could cause us to mix newly ancient slots produced by flush (containing accounts touched more recently) with previously + // packed ancient storages which over time contained enough dead accounts that the storage needed to be shrunk by being re-packed. + // The end result of this sort should cause older, colder accounts (previously packed into large storages and then re-packed/shrunk) to + // be re-packed together with other older/colder accounts. + accounts_to_combine + .accounts_to_combine + .sort_unstable_by(|a, b| a.capacity.cmp(&b.capacity)); + // pack the accounts with 1 ref or refs > 1 but the slot we're packing is the highest alive slot for the pubkey. // Note the `chain` below combining the 2 types of refs. let pack = PackedAncientStorage::pack( @@ -565,6 +582,20 @@ impl AccountsDb { } } } + let mut total_dead_bytes = 0; + let should_shrink_count = infos + .all_infos + .iter() + .filter(|info| info.should_shrink) + .map(|info| total_dead_bytes += info.capacity.saturating_sub(info.alive_bytes)) + .count() + .saturating_sub(randoms as usize); + self.shrink_ancient_stats + .slots_eligible_to_shrink + .fetch_add(should_shrink_count as u64, Ordering::Relaxed); + self.shrink_ancient_stats + .total_dead_bytes + .fetch_add(total_dead_bytes, Ordering::Relaxed); if randoms > 0 { self.shrink_ancient_stats .random_shrink @@ -2073,11 +2104,11 @@ pub mod tests { assert_eq!( shrinks_in_progress .iter() - .map(|(_, shrink_in_progress)| shrink_in_progress.old_storage().append_vec_id()) + .map(|(_, shrink_in_progress)| shrink_in_progress.old_storage().id()) .collect::>(), storages .iter() - .map(|storage| storage.append_vec_id()) + .map(|storage| storage.id()) .collect::>() ); // assert that we wrote the 2_ref account to the newly shrunk append vec @@ -2294,7 +2325,7 @@ pub mod tests { let map = |info: &SlotInfo| { ( - info.storage.append_vec_id(), + info.storage.id(), info.slot, info.capacity, info.alive_bytes, @@ -2435,7 +2466,7 @@ pub mod tests { } fn assert_storage_info(info: &SlotInfo, storage: &AccountStorageEntry, should_shrink: bool) { - assert_eq!(storage.append_vec_id(), info.storage.append_vec_id()); + assert_eq!(storage.id(), info.storage.id()); assert_eq!(storage.slot(), info.slot); assert_eq!(storage.capacity(), info.capacity); assert_eq!(storage.alive_bytes(), info.alive_bytes as usize); @@ -3267,8 +3298,8 @@ pub mod tests { assert_eq!(1, one.len()); assert_eq!(target_slot, one.first().unwrap().0); assert_eq!( - one.first().unwrap().1.old_storage().append_vec_id(), - storages[combine_into].append_vec_id() + one.first().unwrap().1.old_storage().id(), + storages[combine_into].id() ); // make sure the single new append vec contains all the same accounts let mut two = Vec::default(); diff --git a/accounts-db/src/append_vec.rs b/accounts-db/src/append_vec.rs index 286a99632207df..d0dcee18cb3ac5 100644 --- a/accounts-db/src/append_vec.rs +++ b/accounts-db/src/append_vec.rs @@ -6,9 +6,7 @@ use { crate::{ - account_storage::meta::{ - AccountMeta, StoredAccountMeta, StoredMeta, StoredMetaWriteVersion, - }, + account_storage::meta::{AccountMeta, StoredAccountMeta, StoredMeta}, accounts_file::{ AccountsFileError, InternalsForArchive, MatchAccountOwnerError, Result, StorageAccess, StoredAccountsInfo, ALIGN_BOUNDARY_OFFSET, @@ -143,10 +141,6 @@ impl<'append_vec> AppendVecStoredAccountMeta<'append_vec> { self.meta.data_len } - pub fn write_version(&self) -> StoredMetaWriteVersion { - self.meta.write_version_obsolete - } - pub fn meta(&self) -> &StoredMeta { self.meta } @@ -282,10 +276,28 @@ const fn page_align(size: u64) -> u64 { (size + (PAGE_SIZE - 1)) & !(PAGE_SIZE - 1) } +/// Buffer size to use when scanning *without* needing account data +/// +/// When scanning without needing account data, it is desirable to only read the account metadata +/// and skip over the account data. In theory, we could read a single account's metadata at a time, +/// then skip ahead to the next account, entirely bypassing the account's data. However this comes +/// at the cost of requiring one syscall per scanning each account, which is expensive. Ideally +/// we'd like to use the fewest syscalls and also read the least amount of extraneous account data. +/// As a compromise, we use a much smaller buffer, yet still large enough to amortize syscall cost. +/// +/// On mnb, the overwhelming majority of accounts are token accounts, which use 165 bytes of data. +/// Including storage overhead and alignment, that's 304 bytes per account. +/// Per slot, *with* rent rewrites, we store 1,200 to 1,500 accounts. With a 64 KiB buffer, we'd +/// be able to hold about 215 accounts, so there would not be many syscalls needed to scan +/// the file. Since we also expect some larger accounts, this will also avoid reading/copying +/// large account data. This should be a decent starting value, and can be modified over time. +#[cfg_attr(feature = "dev-context-only-utils", qualifier_attr::qualifiers(pub))] +const SCAN_BUFFER_SIZE_WITHOUT_DATA: usize = 1 << 16; + lazy_static! { pub static ref APPEND_VEC_MMAPPED_FILES_OPEN: AtomicU64 = AtomicU64::default(); pub static ref APPEND_VEC_MMAPPED_FILES_DIRTY: AtomicU64 = AtomicU64::default(); - pub static ref APPEND_VEC_REOPEN_AS_FILE_IO: AtomicU64 = AtomicU64::default(); + pub static ref APPEND_VEC_OPEN_AS_FILE_IO: AtomicU64 = AtomicU64::default(); } impl Drop for AppendVec { @@ -298,7 +310,7 @@ impl Drop for AppendVec { } } AppendVecFileBacking::File(_) => { - APPEND_VEC_REOPEN_AS_FILE_IO.fetch_sub(1, Ordering::Relaxed); + APPEND_VEC_OPEN_AS_FILE_IO.fetch_sub(1, Ordering::Relaxed); } } if self.remove_file_on_drop.load(Ordering::Acquire) { @@ -437,9 +449,14 @@ impl AppendVec { self.flush().expect("flush must succeed"); // we are re-opening the file, so don't remove the file on disk when the old mmapped one is dropped self.remove_file_on_drop.store(false, Ordering::Release); - AppendVec::new_from_file(self.path.clone(), self.len(), StorageAccess::File) - .ok() - .map(|(av, _size)| av) + + // The file should have already been sanitized. Don't need to check when we open the file again. + AppendVec::new_from_file_unchecked( + self.path.clone(), + self.len(), + StorageAccess::File, + ) + .ok() } } } @@ -472,8 +489,6 @@ impl AppendVec { let (sanitized, num_accounts) = new.sanitize_layout_and_length(); if !sanitized { - // This info show the failing accountvec file path. It helps debugging - // the appendvec data corrupution issues related to recycling. return Err(AccountsFileError::AppendVecError( AppendVecError::IncorrectLayout(new.path.clone()), )); @@ -503,6 +518,7 @@ impl AppendVec { // we must use mmap on non-linux if storage_access == StorageAccess::File { APPEND_VEC_MMAPPED_FILES_OPEN.fetch_add(1, Ordering::Relaxed); + APPEND_VEC_OPEN_AS_FILE_IO.fetch_add(1, Ordering::Relaxed); return Ok(AppendVec { path, @@ -575,19 +591,13 @@ impl AppendVec { /// Also return the offset of the first byte after the requested data that /// falls on a 64-byte boundary. fn get_slice(slice: ValidSlice, offset: usize, size: usize) -> Option<(&[u8], usize)> { - let (next, overflow) = offset.overflowing_add(size); - if overflow || next > slice.0.len() { - return None; - } - let data = &slice.0[offset..next]; - let next = u64_align!(next); - - Some(( - //UNSAFE: This unsafe creates a slice that represents a chunk of self.map memory - //The lifetime of this slice is tied to &self, since it points to self.map memory - unsafe { std::slice::from_raw_parts(data.as_ptr(), size) }, - next, - )) + // SAFETY: Wrapping math is safe here because if `end` does wrap, the Range + // parameter to `.get()` will be invalid, and `.get()` will correctly return None. + let end = offset.wrapping_add(size); + slice + .0 + .get(offset..end) + .map(|subslice| (subslice, u64_align!(end))) } /// Copy `len` bytes from `src` to the first 64-byte boundary after position `offset` of @@ -839,8 +849,12 @@ impl AppendVec { /// data is at the end of each account and is variable sized /// the next account is then aligned on a 64 bit boundary. /// With these helpers, we can skip over reading some of the data depending on what the caller wants. + /// + /// *Safety* - The caller must ensure that the `stored_meta.data_len` won't overflow the calculation. fn next_account_offset(start_offset: usize, stored_meta: &StoredMeta) -> AccountOffsets { - let stored_size_unaligned = STORE_META_OVERHEAD + stored_meta.data_len as usize; + let stored_size_unaligned = STORE_META_OVERHEAD + .checked_add(stored_meta.data_len as usize) + .expect("stored size cannot overflow"); let stored_size_aligned = u64_align!(stored_size_unaligned); let offset_to_end_of_data = start_offset + stored_size_unaligned; let next_account_offset = start_offset + stored_size_aligned; @@ -855,9 +869,11 @@ impl AppendVec { /// Iterate over all accounts and call `callback` with `IndexInfo` for each. /// This fn can help generate an index of the data in this storage. pub(crate) fn scan_index(&self, mut callback: impl FnMut(IndexInfo)) { - let mut offset = 0; + // self.len() is an atomic load, so only do it once + let self_len = self.len(); match &self.backing { AppendVecFileBacking::Mmap(Mmap { mmap, .. }) => { + let mut offset = 0; let slice = self.get_valid_slice_from_mmap(mmap); loop { let Some((stored_meta, next)) = Self::get_type::(slice, offset) @@ -871,10 +887,10 @@ impl AppendVec { }; if account_meta.lamports == 0 && stored_meta.pubkey == Pubkey::default() { // we passed the last useful account - return; + break; } let next = Self::next_account_offset(offset, stored_meta); - if next.offset_to_end_of_data > self.len() { + if next.offset_to_end_of_data > self_len { // data doesn't fit, so don't include this account break; } @@ -895,30 +911,36 @@ impl AppendVec { } } AppendVecFileBacking::File(file) => { + let buffer_size = std::cmp::min(SCAN_BUFFER_SIZE, self_len); let mut reader = - BufferedReader::new(SCAN_BUFFER_SIZE, self.len(), file, STORE_META_OVERHEAD); + BufferedReader::new(buffer_size, self_len, file, STORE_META_OVERHEAD); while reader.read().ok() == Some(BufferedReaderStatus::Success) { - let (offset, bytes_subset) = reader.get_offset_and_data(); - let (meta, next): (&StoredMeta, _) = Self::get_type(bytes_subset, 0).unwrap(); - let (account_meta, next): (&AccountMeta, _) = - Self::get_type(bytes_subset, next).unwrap(); - let (_hash, next): (&AccountHash, _) = - Self::get_type(bytes_subset, next).unwrap(); - let stored_size_aligned = u64_align!(next + (meta.data_len as usize)); + let (offset, bytes) = reader.get_offset_and_data(); + let (stored_meta, next) = Self::get_type::(bytes, 0).unwrap(); + let (account_meta, _) = Self::get_type::(bytes, next).unwrap(); + if account_meta.lamports == 0 && stored_meta.pubkey == Pubkey::default() { + // we passed the last useful account + break; + } + let next = Self::next_account_offset(offset, stored_meta); + if next.offset_to_end_of_data > self_len { + // data doesn't fit, so don't include this account + break; + } callback(IndexInfo { index_info: { IndexInfoInner { - pubkey: meta.pubkey, + pubkey: stored_meta.pubkey, lamports: account_meta.lamports, offset, - data_len: meta.data_len, + data_len: stored_meta.data_len, executable: account_meta.executable, rent_epoch: account_meta.rent_epoch, } }, - stored_size_aligned, + stored_size_aligned: next.stored_size_aligned, }); - reader.advance_offset(stored_size_aligned); + reader.advance_offset(next.stored_size_aligned); } } } @@ -982,7 +1004,9 @@ impl AppendVec { /// for each offset in `sorted_offsets`, get the size of the account. No other information is needed for the account. pub(crate) fn get_account_sizes(&self, sorted_offsets: &[usize]) -> Vec { - let mut result = Vec::with_capacity(sorted_offsets.len()); + // self.len() is an atomic load, so only do it once + let self_len = self.len(); + let mut account_sizes = Vec::with_capacity(sorted_offsets.len()); match &self.backing { AppendVecFileBacking::Mmap(Mmap { mmap, .. }) => { let slice = self.get_valid_slice_from_mmap(mmap); @@ -991,22 +1015,35 @@ impl AppendVec { break; }; let next = Self::next_account_offset(offset, stored_meta); - if next.offset_to_end_of_data > self.len() { + if next.offset_to_end_of_data > self_len { // data doesn't fit, so don't include break; } - result.push(next.stored_size_aligned); + account_sizes.push(next.stored_size_aligned); } } - AppendVecFileBacking::File(_file) => { + AppendVecFileBacking::File(file) => { + let mut buffer = [0u8; mem::size_of::()]; for &offset in sorted_offsets { - self.get_stored_account_meta_callback(offset, |stored_meta| { - result.push(stored_meta.stored_size()); - }); + let Some(bytes_read) = + read_into_buffer(file, self_len, offset, &mut buffer).ok() + else { + break; + }; + let bytes = ValidSlice(&buffer[..bytes_read]); + let Some((stored_meta, _)) = Self::get_type::(bytes, 0) else { + break; + }; + let next = Self::next_account_offset(offset, stored_meta); + if next.offset_to_end_of_data > self_len { + // data doesn't fit, so don't include + break; + } + account_sizes.push(next.stored_size_aligned); } } } - result + account_sizes } /// iterate over all pubkeys and call `callback`. @@ -1014,10 +1051,12 @@ impl AppendVec { /// `data` is completely ignored, for example. /// Also, no references have to be maintained/returned from an iterator function. /// This fn can operate on a batch of data at once. - pub(crate) fn scan_pubkeys(&self, mut callback: impl FnMut(&Pubkey)) { - let mut offset = 0; + pub fn scan_pubkeys(&self, mut callback: impl FnMut(&Pubkey)) { + // self.len() is an atomic load, so only do it once + let self_len = self.len(); match &self.backing { AppendVecFileBacking::Mmap(Mmap { mmap, .. }) => { + let mut offset = 0; let slice = self.get_valid_slice_from_mmap(mmap); loop { let Some((stored_meta, _)) = Self::get_type::(slice, offset) else { @@ -1025,7 +1064,7 @@ impl AppendVec { break; }; let next = Self::next_account_offset(offset, stored_meta); - if next.offset_to_end_of_data > self.len() { + if next.offset_to_end_of_data > self_len { // data doesn't fit, so don't include this pubkey break; } @@ -1033,10 +1072,22 @@ impl AppendVec { offset = next.next_account_offset; } } - AppendVecFileBacking::File(_file) => { - self.scan_accounts(|stored_meta| { - callback(stored_meta.pubkey()); - }); + AppendVecFileBacking::File(file) => { + let buffer_size = std::cmp::min(SCAN_BUFFER_SIZE_WITHOUT_DATA, self_len); + let mut reader = + BufferedReader::new(buffer_size, self_len, file, STORE_META_OVERHEAD); + while reader.read().ok() == Some(BufferedReaderStatus::Success) { + let (offset, bytes) = reader.get_offset_and_data(); + let (stored_meta, _) = Self::get_type::(bytes, 0).unwrap(); + let next = Self::next_account_offset(offset, stored_meta); + if next.offset_to_end_of_data > self.len() { + // data doesn't fit, so don't include this pubkey + break; + } + callback(&stored_meta.pubkey); + // since we only needed to read the pubkey, skip ahead to the next account + reader.advance_offset(next.stored_size_aligned); + } } } } @@ -1054,7 +1105,7 @@ impl AppendVec { skip: usize, ) -> Option { let _lock = self.append_lock.lock().unwrap(); - let default_hash: Hash = Hash::default(); // [0_u8; 32]; + let default_hash = Hash::default(); let mut offset = self.len(); let len = accounts.len(); // Here we have `len - skip` number of accounts. The +1 extra capacity @@ -1080,16 +1131,15 @@ impl AppendVec { data_len: account.data().len() as u64, write_version_obsolete: 0, }; - let meta_ptr = &stored_meta as *const StoredMeta; - let account_meta_ptr = &account_meta as *const AccountMeta; - let data_len = stored_meta.data_len as usize; - let data_ptr = account.data().as_ptr(); + let stored_meta_ptr = ptr::from_ref(&stored_meta).cast(); + let account_meta_ptr = ptr::from_ref(&account_meta).cast(); let hash_ptr = bytemuck::bytes_of(&default_hash).as_ptr(); + let data_ptr = account.data().as_ptr(); let ptrs = [ - (meta_ptr as *const u8, mem::size_of::()), - (account_meta_ptr as *const u8, mem::size_of::()), + (stored_meta_ptr, mem::size_of::()), + (account_meta_ptr, mem::size_of::()), (hash_ptr, mem::size_of::()), - (data_ptr, data_len), + (data_ptr, stored_meta.data_len as usize), ]; if let Some(start_offset) = self.append_ptrs_locked(&mut offset, &ptrs) { offsets.push(start_offset) @@ -1809,4 +1859,286 @@ pub mod tests { let result = av.get_stored_account_meta_callback(0, |_| true); assert!(result.is_none()); // Expect None to be returned. } + + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_get_account_sizes(storage_access: StorageAccess) { + const NUM_ACCOUNTS: usize = 37; + let pubkeys: Vec<_> = std::iter::repeat_with(Pubkey::new_unique) + .take(NUM_ACCOUNTS) + .collect(); + + let mut rng = thread_rng(); + let mut accounts = Vec::with_capacity(pubkeys.len()); + let mut stored_sizes = Vec::with_capacity(pubkeys.len()); + for _ in &pubkeys { + let lamports = rng.gen(); + let data_len = rng.gen_range(0..MAX_PERMITTED_DATA_LENGTH) as usize; + let account = AccountSharedData::new(lamports, data_len, &Pubkey::default()); + accounts.push(account); + stored_sizes.push(aligned_stored_size(data_len)); + } + let accounts = accounts; + let stored_sizes = stored_sizes; + let total_stored_size = stored_sizes.iter().sum(); + + let temp_file = get_append_vec_path("test_get_account_sizes"); + let account_offsets = { + let append_vec = AppendVec::new(&temp_file.path, true, total_stored_size); + // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped + let append_vec = ManuallyDrop::new(append_vec); + let slot = 77; // the specific slot does not matter + let storable_accounts: Vec<_> = std::iter::zip(&pubkeys, &accounts).collect(); + let stored_accounts_info = append_vec + .append_accounts(&(slot, storable_accounts.as_slice()), 0) + .unwrap(); + append_vec.flush().unwrap(); + stored_accounts_info.offsets + }; + + // now open the append vec with the given storage access method + // then get the account sizes to ensure they are correct + let (append_vec, _) = + AppendVec::new_from_file(&temp_file.path, total_stored_size, storage_access).unwrap(); + + let account_sizes = append_vec.get_account_sizes(account_offsets.as_slice()); + assert_eq!(account_sizes, stored_sizes); + } + + /// A helper function for testing different scenario for scan_*. + /// + /// `modify_fn` is used to (optionally) modify the append vec before checks are performed. + /// `check_fn` performs the check for the scan. + fn test_scan_helper( + storage_access: StorageAccess, + modify_fn: impl Fn(&PathBuf, usize) -> usize, + check_fn: impl Fn(&AppendVec, &[Pubkey], &[usize], &[AccountSharedData]), + ) { + const NUM_ACCOUNTS: usize = 37; + let pubkeys: Vec<_> = std::iter::repeat_with(solana_sdk::pubkey::new_rand) + .take(NUM_ACCOUNTS) + .collect(); + + let mut rng = thread_rng(); + let mut accounts = Vec::with_capacity(pubkeys.len()); + let mut total_stored_size = 0; + for _ in &pubkeys { + let lamports = rng.gen(); + let data_len = rng.gen_range(0..MAX_PERMITTED_DATA_LENGTH) as usize; + let account = AccountSharedData::new(lamports, data_len, &Pubkey::default()); + accounts.push(account); + total_stored_size += aligned_stored_size(data_len); + } + let accounts = accounts; + let total_stored_size = total_stored_size; + + let temp_file = get_append_vec_path("test_scan"); + let account_offsets = { + // wrap AppendVec in ManuallyDrop to ensure we do not remove the backing file when dropped + let append_vec = + ManuallyDrop::new(AppendVec::new(&temp_file.path, true, total_stored_size)); + let slot = 42; // the specific slot does not matter + let storable_accounts: Vec<_> = std::iter::zip(&pubkeys, &accounts).collect(); + let stored_accounts_info = append_vec + .append_accounts(&(slot, storable_accounts.as_slice()), 0) + .unwrap(); + append_vec.flush().unwrap(); + stored_accounts_info.offsets + }; + + let total_stored_size = modify_fn(&temp_file.path, total_stored_size); + // now open the append vec with the given storage access method + // then perform the scan and check it is correct + let append_vec = ManuallyDrop::new( + AppendVec::new_from_file_unchecked(&temp_file.path, total_stored_size, storage_access) + .unwrap(), + ); + + check_fn(&append_vec, &pubkeys, &account_offsets, &accounts); + } + + /// A helper fn to test `scan_pubkeys`. + fn test_scan_pubkeys_helper( + storage_access: StorageAccess, + modify_fn: impl Fn(&PathBuf, usize) -> usize, + ) { + test_scan_helper( + storage_access, + modify_fn, + |append_vec, pubkeys, _account_offsets, _accounts| { + let mut i = 0; + append_vec.scan_pubkeys(|pubkey| { + assert_eq!(pubkey, pubkeys.get(i).unwrap()); + i += 1; + }); + assert_eq!(i, pubkeys.len()); + }, + ) + } + + /// Test `scan_pubkey` for a valid account storage. + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_scan_pubkeys(storage_access: StorageAccess) { + test_scan_pubkeys_helper(storage_access, |_, size| size); + } + + /// Test `scan_pubkey` for storage with incomplete account meta data. + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_scan_pubkeys_incomplete_data(storage_access: StorageAccess) { + test_scan_pubkeys_helper(storage_access, |path, size| { + // Append 1 byte of data at the end of the storage file to simulate + // incomplete account's meta data. + let mut f = OpenOptions::new() + .read(true) + .append(true) + .open(path) + .unwrap(); + f.write_all(&[0xFF]).unwrap(); + size + 1 + }); + } + + /// Test `scan_pubkey` for storage which is missing the last account data + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_scan_pubkeys_missing_account_data(storage_access: StorageAccess) { + test_scan_pubkeys_helper(storage_access, |path, size| { + let fake_stored_meta = StoredMeta { + write_version_obsolete: 0, + data_len: 100, + pubkey: solana_sdk::pubkey::new_rand(), + }; + let fake_account_meta = AccountMeta { + lamports: 100, + rent_epoch: 10, + owner: solana_sdk::pubkey::new_rand(), + executable: false, + }; + + let stored_meta_slice: &[u8] = unsafe { + std::slice::from_raw_parts( + (&fake_stored_meta as *const StoredMeta) as *const u8, + mem::size_of::(), + ) + }; + let account_meta_slice: &[u8] = unsafe { + std::slice::from_raw_parts( + (&fake_account_meta as *const AccountMeta) as *const u8, + mem::size_of::(), + ) + }; + + let mut f = OpenOptions::new() + .read(true) + .append(true) + .open(path) + .unwrap(); + + f.write_all(stored_meta_slice).unwrap(); + f.write_all(account_meta_slice).unwrap(); + + size + mem::size_of::() + mem::size_of::() + }); + } + + /// A helper fn to test scan_index + fn test_scan_index_helper( + storage_access: StorageAccess, + modify_fn: impl Fn(&PathBuf, usize) -> usize, + ) { + test_scan_helper( + storage_access, + modify_fn, + |append_vec, pubkeys, account_offsets, accounts| { + let mut i = 0; + append_vec.scan_index(|index_info| { + let pubkey = pubkeys.get(i).unwrap(); + let account = accounts.get(i).unwrap(); + let offset = account_offsets.get(i).unwrap(); + + assert_eq!( + index_info.stored_size_aligned, + aligned_stored_size(account.data().len()), + ); + assert_eq!(index_info.index_info.offset, *offset); + assert_eq!(index_info.index_info.pubkey, *pubkey); + assert_eq!(index_info.index_info.lamports, account.lamports()); + assert_eq!(index_info.index_info.rent_epoch, account.rent_epoch()); + assert_eq!(index_info.index_info.executable, account.executable()); + assert_eq!(index_info.index_info.data_len, account.data().len() as u64); + + i += 1; + }); + assert_eq!(i, accounts.len()); + }, + ) + } + + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_scan_index(storage_access: StorageAccess) { + test_scan_index_helper(storage_access, |_, size| size); + } + + /// Test `scan_index` for storage with incomplete account meta data. + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_scan_index_incomplete_data(storage_access: StorageAccess) { + test_scan_index_helper(storage_access, |path, size| { + // Append 1 byte of data at the end of the storage file to simulate + // incomplete account's meta data. + let mut f = OpenOptions::new() + .read(true) + .append(true) + .open(path) + .unwrap(); + f.write_all(&[0xFF]).unwrap(); + size + 1 + }); + } + + /// Test `scan_index` for storage which is missing the last account data + #[test_case(StorageAccess::Mmap)] + #[test_case(StorageAccess::File)] + fn test_scan_index_missing_account_data(storage_access: StorageAccess) { + test_scan_index_helper(storage_access, |path, size| { + let fake_stored_meta = StoredMeta { + write_version_obsolete: 0, + data_len: 100, + pubkey: solana_sdk::pubkey::new_rand(), + }; + let fake_account_meta = AccountMeta { + lamports: 100, + rent_epoch: 10, + owner: solana_sdk::pubkey::new_rand(), + executable: false, + }; + + let stored_meta_slice: &[u8] = unsafe { + std::slice::from_raw_parts( + (&fake_stored_meta as *const StoredMeta) as *const u8, + mem::size_of::(), + ) + }; + let account_meta_slice: &[u8] = unsafe { + std::slice::from_raw_parts( + (&fake_account_meta as *const AccountMeta) as *const u8, + mem::size_of::(), + ) + }; + + let mut f = OpenOptions::new() + .read(true) + .append(true) + .open(path) + .unwrap(); + + f.write_all(stored_meta_slice).unwrap(); + f.write_all(account_meta_slice).unwrap(); + + size + mem::size_of::() + mem::size_of::() + }); + } } diff --git a/accounts-db/src/bucket_map_holder_stats.rs b/accounts-db/src/bucket_map_holder_stats.rs index 9b5cd20f0cd9b5..35a80c228d7278 100644 --- a/accounts-db/src/bucket_map_holder_stats.rs +++ b/accounts-db/src/bucket_map_holder_stats.rs @@ -468,6 +468,16 @@ impl BucketMapHolderStats { .unwrap_or_default(), i64 ), + ( + "index_exceptional_entry", + disk.map(|disk| disk + .stats + .index + .index_uses_uncommon_slot_list_len_or_refcount + .load(Ordering::Relaxed)) + .unwrap_or_default(), + i64 + ), ( "disk_index_data_file_size", disk.map(|disk| disk.stats.data.total_file_size.load(Ordering::Relaxed)) diff --git a/accounts-db/src/cache_hash_data.rs b/accounts-db/src/cache_hash_data.rs index 029b4c31f9395e..350fdec560caed 100644 --- a/accounts-db/src/cache_hash_data.rs +++ b/accounts-db/src/cache_hash_data.rs @@ -31,9 +31,11 @@ pub struct Header { // In order to safely guarantee Header is Pod, it cannot have any padding // This is obvious by inspection, but this will also catch any inadvertent // changes in the future (i.e. it is a test). +// Additionally, we compare the header size with `u64` instead of `usize` +// to ensure binary compatibility doesn't break. const _: () = assert!( - std::mem::size_of::
() == std::mem::size_of::(), - "Header cannot have any padding" + std::mem::size_of::
() == std::mem::size_of::(), + "Header cannot have any padding and must be the same size as u64", ); /// cache hash data file to be mmapped later @@ -332,11 +334,7 @@ impl CacheHashData { let _ignored = remove_file(&cache_path); let cell_size = std::mem::size_of::() as u64; let mut m1 = Measure::start("create save"); - let entries = data - .iter() - .map(|x: &Vec| x.len()) - .collect::>(); - let entries = entries.iter().sum::(); + let entries = data.iter().map(Vec::len).sum::(); let capacity = cell_size * (entries as u64) + std::mem::size_of::
() as u64; let mmap = CacheHashDataFile::new_map(&cache_path, capacity)?; diff --git a/accounts-db/src/sorted_storages.rs b/accounts-db/src/sorted_storages.rs index f47dadb249a077..a8b21db11430f3 100644 --- a/accounts-db/src/sorted_storages.rs +++ b/accounts-db/src/sorted_storages.rs @@ -297,7 +297,7 @@ mod tests { assert!( (slot != 2 && slot != 4) ^ storage - .map(|storage| storage.append_vec_id() == (slot as AccountsFileId)) + .map(|storage| storage.id() == (slot as AccountsFileId)) .unwrap_or(false), "slot: {slot}, storage: {storage:?}" ); @@ -434,10 +434,7 @@ mod tests { ); assert_eq!(result.slot_count(), 1); assert_eq!(result.storages.len(), 1); - assert_eq!( - result.get(slot).unwrap().append_vec_id(), - store.append_vec_id() - ); + assert_eq!(result.get(slot).unwrap().id(), store.id()); } fn create_sample_store(id: AccountsFileId) -> Arc { @@ -479,13 +476,7 @@ mod tests { assert!(result.get(5).is_none()); assert!(result.get(6).is_none()); assert!(result.get(8).is_none()); - assert_eq!( - result.get(slots[0]).unwrap().append_vec_id(), - store.append_vec_id() - ); - assert_eq!( - result.get(slots[1]).unwrap().append_vec_id(), - store2.append_vec_id() - ); + assert_eq!(result.get(slots[0]).unwrap().id(), store.id()); + assert_eq!(result.get(slots[1]).unwrap().id(), store2.id()); } } diff --git a/accounts-db/src/utils.rs b/accounts-db/src/utils.rs index 74731e01ad1456..d946ffe1e0e3ef 100644 --- a/accounts-db/src/utils.rs +++ b/accounts-db/src/utils.rs @@ -1,7 +1,7 @@ use { lazy_static, log::*, - solana_measure::measure, + solana_measure::measure_time, std::{ collections::HashSet, fs, io, @@ -121,7 +121,7 @@ pub fn move_and_async_delete_path(path: impl AsRef) { .name("solDeletePath".to_string()) .spawn(move || { trace!("background deleting {}...", path_delete.display()); - let (result, measure_delete) = measure!(fs::remove_dir_all(&path_delete)); + let (result, measure_delete) = measure_time!(fs::remove_dir_all(&path_delete)); if let Err(err) = result { panic!("Failed to async delete '{}': {err}", path_delete.display()); } diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index ed791d94499a0f..67ca53f88324c9 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -9,7 +9,7 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -clap = { version = "3.1.8", features = ["derive"] } +clap = { version = "3.1.8", features = ["derive", "cargo"] } crossbeam-channel = { workspace = true } log = { workspace = true } rand = { workspace = true } diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index a1605ff9888c47..7d194d044aa85c 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -474,6 +474,7 @@ fn main() { Arc::new(connection_cache), bank_forks.clone(), &Arc::new(PrioritizationFeeCache::new(0u64)), + false, ); // This is so that the signal_receiver does not go out of scope after the closure. diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index c08a41c5d91a6b..002e77b0549061 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -28,7 +28,6 @@ use { send_transaction_service::{SendTransactionService, TransactionInfo}, tpu_info::NullTpuInfo, }, - solana_svm::transaction_results::TransactionExecutionResult, std::{ io, net::{Ipv4Addr, SocketAddr}, @@ -350,20 +349,18 @@ impl Banks for BanksServer { ) -> BanksTransactionResultWithMetadata { let bank = self.bank_forks.read().unwrap().working_bank(); match bank.process_transaction_with_metadata(transaction) { - TransactionExecutionResult::NotExecuted(error) => BanksTransactionResultWithMetadata { + Err(error) => BanksTransactionResultWithMetadata { result: Err(error), metadata: None, }, - TransactionExecutionResult::Executed { details, .. } => { - BanksTransactionResultWithMetadata { - result: details.status, - metadata: Some(TransactionMetadata { - compute_units_consumed: details.executed_units, - log_messages: details.log_messages.unwrap_or_default(), - return_data: details.return_data, - }), - } - } + Ok(details) => BanksTransactionResultWithMetadata { + result: details.status, + metadata: Some(TransactionMetadata { + compute_units_consumed: details.executed_units, + log_messages: details.log_messages.unwrap_or_default(), + return_data: details.return_data, + }), + }, } } diff --git a/bench-tps/src/bench.rs b/bench-tps/src/bench.rs index ccf601214c1743..d8c550c9312e01 100644 --- a/bench-tps/src/bench.rs +++ b/bench-tps/src/bench.rs @@ -11,8 +11,9 @@ use { log::*, rand::distributions::{Distribution, Uniform}, rayon::prelude::*, - solana_client::{nonce_utils, rpc_request::MAX_MULTIPLE_ACCOUNTS}, + solana_client::nonce_utils, solana_metrics::{self, datapoint_info}, + solana_rpc_client_api::request::MAX_MULTIPLE_ACCOUNTS, solana_sdk::{ account::Account, clock::{DEFAULT_MS_PER_SLOT, DEFAULT_S_PER_SLOT, MAX_PROCESSING_AGE}, @@ -1223,7 +1224,7 @@ pub fn fund_keypairs( mod tests { use { super::*, - solana_runtime::{bank::Bank, bank_client::BankClient}, + solana_runtime::{bank::Bank, bank_client::BankClient, bank_forks::BankForks}, solana_sdk::{ commitment_config::CommitmentConfig, feature_set::FeatureSet, @@ -1234,16 +1235,18 @@ mod tests { }, }; - fn bank_with_all_features(genesis_config: &GenesisConfig) -> Arc { + fn bank_with_all_features( + genesis_config: &GenesisConfig, + ) -> (Arc, Arc>) { let mut bank = Bank::new_for_tests(genesis_config); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - bank.wrap_with_bank_forks_for_tests().0 + bank.wrap_with_bank_forks_for_tests() } #[test] fn test_bench_tps_bank_client() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); - let bank = bank_with_all_features(&genesis_config); + let (bank, _bank_forks) = bank_with_all_features(&genesis_config); let client = Arc::new(BankClient::new_shared(bank)); let config = Config { @@ -1264,7 +1267,7 @@ mod tests { #[test] fn test_bench_tps_fund_keys() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); - let bank = bank_with_all_features(&genesis_config); + let (bank, _bank_forks) = bank_with_all_features(&genesis_config); let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 20; let lamports = 20; @@ -1289,7 +1292,7 @@ mod tests { let (mut genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); let fee_rate_governor = FeeRateGovernor::new(11, 0); genesis_config.fee_rate_governor = fee_rate_governor; - let bank = bank_with_all_features(&genesis_config); + let (bank, _bank_forks) = bank_with_all_features(&genesis_config); let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 20; let lamports = 20; @@ -1307,7 +1310,7 @@ mod tests { #[test] fn test_bench_tps_create_durable_nonce() { let (genesis_config, id) = create_genesis_config(sol_to_lamports(10_000.0)); - let bank = bank_with_all_features(&genesis_config); + let (bank, _bank_forks) = bank_with_all_features(&genesis_config); let client = Arc::new(BankClient::new_shared(bank)); let keypair_count = 10; let lamports = 10_000_000; diff --git a/bench-tps/src/log_transaction_service.rs b/bench-tps/src/log_transaction_service.rs index 15deab12057f1c..09ad3561cf55e0 100644 --- a/bench-tps/src/log_transaction_service.rs +++ b/bench-tps/src/log_transaction_service.rs @@ -7,8 +7,8 @@ use { crossbeam_channel::{select, tick, unbounded, Receiver, Sender}, log::*, serde::Serialize, - solana_client::rpc_config::RpcBlockConfig, solana_measure::measure::Measure, + solana_rpc_client_api::config::RpcBlockConfig, solana_sdk::{ clock::{DEFAULT_MS_PER_SLOT, MAX_PROCESSING_AGE}, commitment_config::{CommitmentConfig, CommitmentLevel}, diff --git a/bloom/Cargo.toml b/bloom/Cargo.toml index 11e80be72aedc4..640dfbb644a173 100644 --- a/bloom/Cargo.toml +++ b/bloom/Cargo.toml @@ -14,7 +14,6 @@ bv = { workspace = true, features = ["serde"] } fnv = { workspace = true } log = { workspace = true } rand = { workspace = true } -rayon = { workspace = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } @@ -22,6 +21,9 @@ solana-frozen-abi-macro = { workspace = true, optional = true } solana-sanitize = { workspace = true } solana-sdk = { workspace = true } +[dev-dependencies] +rayon = { workspace = true } + [lib] crate-type = ["lib"] name = "solana_bloom" diff --git a/bucket_map/src/bucket.rs b/bucket_map/src/bucket.rs index c72a3b3ad14b5a..ebd77178450b37 100644 --- a/bucket_map/src/bucket.rs +++ b/bucket_map/src/bucket.rs @@ -528,20 +528,21 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { // fail early if the data bucket we need doesn't exist - we don't want the index entry partially allocated return Err(BucketMapError::DataNoSpace((best_fit_bucket, 0))); } + let max_search = self.index.max_search(); let (elem, elem_ix) = Self::find_index_entry_mut(&self.index, key, self.random)?; - let elem = if let Some(elem) = elem { - elem - } else { - let is_resizing = false; - self.index.occupy(elem_ix, is_resizing).unwrap(); - let elem_allocate = IndexEntryPlaceInBucket::new(elem_ix); - // These fields will be overwritten after allocation by callers. - // Since this part of the mmapped file could have previously been used by someone else, there can be garbage here. - elem_allocate.init(&mut self.index, key); - elem_allocate - }; if !requires_data_bucket { + let elem = if let Some(elem) = elem { + elem + } else { + let is_resizing = false; + self.index.occupy(elem_ix, is_resizing).unwrap(); + let elem_allocate = IndexEntryPlaceInBucket::new(elem_ix); + // These fields will be overwritten after allocation by callers. + // Since this part of the mmapped file could have previously been used by someone else, there can be garbage here. + elem_allocate.init(&mut self.index, key); + elem_allocate + }; // new data stored should be stored in IndexEntry and NOT in data file // new data len is 0 or 1 if let OccupiedEnum::MultipleSlots(multiple_slots) = @@ -557,6 +558,10 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { if let Some(single_element) = data.next() { OccupiedEnum::OneSlotInIndex(single_element) } else { + self.stats + .index + .index_uses_uncommon_slot_list_len_or_refcount + .store(true, Ordering::Relaxed); OccupiedEnum::ZeroSlots }, ); @@ -566,7 +571,10 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { // storing the slot list requires using the data file let mut old_data_entry_to_free = None; // see if old elements were in a data file - if let Some(multiple_slots) = elem.get_multiple_slots_mut(&mut self.index) { + if let Some(multiple_slots) = elem + .as_ref() + .and_then(|elem| elem.get_multiple_slots_mut(&mut self.index)) + { let bucket_ix = multiple_slots.data_bucket_ix() as usize; let current_bucket = &mut self.data[bucket_ix]; let elem_loc = multiple_slots.data_loc(current_bucket); @@ -634,10 +642,23 @@ impl<'b, T: Clone + Copy + PartialEq + std::fmt::Debug + 'static> Bucket { } // update index bucket after data bucket has been updated. - elem.set_slot_count_enum_value( + elem.unwrap_or_else(|| { + let is_resizing = false; + self.index.occupy(elem_ix, is_resizing).unwrap(); + let elem_allocate = IndexEntryPlaceInBucket::new(elem_ix); + // These fields will be overwritten after allocation by callers. + // Since this part of the mmapped file could have previously been used by someone else, there can be garbage here. + elem_allocate.init(&mut self.index, key); + elem_allocate + }) + .set_slot_count_enum_value( &mut self.index, OccupiedEnum::MultipleSlots(&multiple_slots), ); + self.stats + .index + .index_uses_uncommon_slot_list_len_or_refcount + .store(true, Ordering::Relaxed); success = true; break; } @@ -1221,9 +1242,11 @@ mod tests { // This causes it to be skipped. let entry = IndexEntryPlaceInBucket::new(ix); entry.init(&mut index, &(other.0)); + entry.set_slot_count_enum_value(&mut index, OccupiedEnum::ZeroSlots); let entry = IndexEntryPlaceInBucket::new(ix + 1); // sets pubkey value and enum value of ZeroSlots. Leaving it at zero is illegal at startup, so we'll assert when we find this duplicate. entry.init(&mut index, &(raw[0].0)); + entry.set_slot_count_enum_value(&mut index, OccupiedEnum::ZeroSlots); // since the same key is already in use with a different value, it is a duplicate. // But, it is a zero length entry. This is not supported at startup. Startup would have never generated a zero length occupied entry. diff --git a/bucket_map/src/bucket_stats.rs b/bucket_map/src/bucket_stats.rs index ed862e084c69ca..63138e77c1521a 100644 --- a/bucket_map/src/bucket_stats.rs +++ b/bucket_map/src/bucket_stats.rs @@ -1,5 +1,5 @@ use std::sync::{ - atomic::{AtomicU64, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, }; @@ -22,6 +22,7 @@ pub struct BucketStats { pub file_count: AtomicU64, pub total_file_size: AtomicU64, pub startup: StartupBucketStats, + pub index_uses_uncommon_slot_list_len_or_refcount: AtomicBool, } impl BucketStats { diff --git a/bucket_map/src/bucket_storage.rs b/bucket_map/src/bucket_storage.rs index dbddbc169db321..a55923c142d850 100644 --- a/bucket_map/src/bucket_storage.rs +++ b/bucket_map/src/bucket_storage.rs @@ -283,15 +283,15 @@ impl BucketStorage { /// 'is_resizing' false if caller is adding an item to the index (so increment count) pub fn occupy(&mut self, ix: u64, is_resizing: bool) -> Result<(), BucketStorageError> { debug_assert!(ix < self.capacity(), "occupy: bad index size"); - let mut e = Err(BucketStorageError::AlreadyOccupied); //debug!("ALLOC {} {}", ix, uid); if self.try_lock(ix) { - e = Ok(()); if !is_resizing { self.count.fetch_add(1, Ordering::Relaxed); } + Ok(()) + } else { + Err(BucketStorageError::AlreadyOccupied) } - e } pub fn free(&mut self, ix: u64) { @@ -396,17 +396,16 @@ impl BucketStorage { .read(true) .write(true) .create(create) - .open(path.clone()); - if let Err(e) = data { + .open(&path); + if let Err(err) = data { if !create { // we can't load this file, so bail without error return None; } panic!( - "Unable to create data file {:?} in current dir({:?}): {:?}", - path, + "Unable to create data file '{}' in current dir ({:?}): {err}", + path.as_ref().display(), std::env::current_dir(), - e ); } let mut data = data.unwrap(); @@ -427,15 +426,13 @@ impl BucketStorage { .fetch_add(measure_flush.end_as_us(), Ordering::Relaxed); } let mut measure_mmap = Measure::start("measure_mmap"); - let res = unsafe { MmapMut::map_mut(&data) }; - if let Err(e) = res { + let mmap = unsafe { MmapMut::map_mut(&data) }.unwrap_or_else(|err| { panic!( - "Unable to mmap file {:?} in current dir({:?}): {:?}", - path, + "Unable to mmap file '{}' in current dir ({:?}): {err}", + path.as_ref().display(), std::env::current_dir(), - e ); - } + }); measure_mmap.stop(); stats .new_file_us @@ -443,7 +440,7 @@ impl BucketStorage { stats .mmap_us .fetch_add(measure_mmap.as_us(), Ordering::Relaxed); - res.ok() + Some(mmap) } /// allocate a new memory mapped file of size `bytes` on one of `drives` @@ -544,12 +541,15 @@ impl BucketStorage { mod test { use { super::*, - crate::{bucket_storage::BucketOccupied, index_entry::IndexBucket}, + crate::{ + bucket_storage::BucketOccupied, + index_entry::{BucketWithHeader, IndexBucket}, + }, tempfile::tempdir, }; #[test] - fn test_bucket_storage() { + fn test_bucket_storage_index_bucket() { let tmpdir = tempdir().unwrap(); let paths: Vec = vec![tmpdir.path().to_path_buf()]; assert!(!paths.is_empty()); @@ -560,6 +560,7 @@ mod test { let max_search = 1; let stats = Arc::default(); let count = Arc::default(); + // this uses `IndexBucket`. `IndexBucket` doesn't change state on `occupy()` let mut storage = BucketStorage::>::new( drives, num_elems, elem_size, max_search, stats, count, ) @@ -567,6 +568,27 @@ mod test { let ix = 0; assert!(storage.is_free(ix)); assert!(storage.occupy(ix, false).is_ok()); + } + + #[test] + fn test_bucket_storage_using_header() { + let tmpdir = tempdir().unwrap(); + let paths: Vec = vec![tmpdir.path().to_path_buf()]; + assert!(!paths.is_empty()); + + let drives = Arc::new(paths); + let num_elems = 1; + let elem_size = std::mem::size_of::>() as u64; + let max_search = 1; + let stats = Arc::default(); + let count = Arc::default(); + let mut storage = BucketStorage::::new( + drives, num_elems, elem_size, max_search, stats, count, + ) + .0; + let ix = 0; + assert!(storage.is_free(ix)); + assert!(storage.occupy(ix, false).is_ok()); assert!(storage.occupy(ix, false).is_err()); assert!(!storage.is_free(ix)); storage.free(ix); diff --git a/bucket_map/src/index_entry.rs b/bucket_map/src/index_entry.rs index a5aad5f87eee19..a56a42aa413d77 100644 --- a/bucket_map/src/index_entry.rs +++ b/bucket_map/src/index_entry.rs @@ -83,7 +83,9 @@ impl BucketOccupied for BucketWithHeader { #[derive(Debug)] pub struct IndexBucketUsingBitVecBits { /// 2 bits per entry that represent a 4 state enum tag - pub enum_tag: BitVec, + pub enum_tag_first_bit: BitVec, + /// second will be empty in all healthy cases because in real use, we only use enum values 0 and 2 (and we use the high bit for first) + pub enum_tag_second_bit: BitVec, /// number of elements allocated capacity: u64, _phantom: PhantomData<&'static T>, @@ -91,13 +93,28 @@ pub struct IndexBucketUsingBitVecBits { impl IndexBucketUsingBitVecBits { /// set the 2 bits (first and second) in `enum_tag` - fn set_bits(&mut self, ix: u64, first: bool, second: bool) { - self.enum_tag.set(ix * 2, first); - self.enum_tag.set(ix * 2 + 1, second); + pub(crate) fn set_bits(&mut self, ix: u64, first: bool, second: bool) { + self.enum_tag_first_bit.set(ix, first); + if self.enum_tag_second_bit.is_empty() { + if !second { + // enum_tag_second_bit can remain empty. + // The first time someone sets the second bit, we have to allocate and check it. + return; + } + self.enum_tag_second_bit = BitVec::new_fill(false, self.capacity); + } + self.enum_tag_second_bit.set(ix, second); } /// get the 2 bits (first and second) in `enum_tag` fn get_bits(&self, ix: u64) -> (bool, bool) { - (self.enum_tag.get(ix * 2), self.enum_tag.get(ix * 2 + 1)) + ( + self.enum_tag_first_bit.get(ix), + if self.enum_tag_second_bit.is_empty() { + false + } else { + self.enum_tag_second_bit.get(ix) + }, + ) } /// turn the tag into bits and store them fn set_enum_tag(&mut self, ix: u64, value: OccupiedEnumTag) { @@ -115,7 +132,6 @@ impl IndexBucketUsingBitVecBits { impl BucketOccupied for IndexBucketUsingBitVecBits { fn occupy(&mut self, element: &mut [u8], ix: usize) { assert!(self.is_free(element, ix)); - self.set_enum_tag(ix as u64, OccupiedEnumTag::ZeroSlots); } fn free(&mut self, element: &mut [u8], ix: usize) { assert!(!self.is_free(element, ix)); @@ -130,8 +146,10 @@ impl BucketOccupied for IndexBucketUsingBitVecBit } fn new(capacity: Capacity) -> Self { Self { - // note: twice as many bits allocated as `num_elements` because we store 2 bits per element - enum_tag: BitVec::new_fill(false, capacity.capacity() * 2), + // up to 2 bits per element + // 1 bit per element in the ideal case, so don't allocate the 2nd bits until necessary + enum_tag_first_bit: BitVec::new_fill(false, capacity.capacity()), + enum_tag_second_bit: BitVec::new(), capacity: capacity.capacity(), _phantom: PhantomData, } @@ -295,6 +313,7 @@ enum OccupiedEnumTag { #[default] Free = 0, ZeroSlots = 1, + /// this should be value 2 so that we can store Free and OneSlotInIndex in only 1 bit. These are the primary states. OneSlotInIndex = 2, MultipleSlots = 3, } @@ -382,7 +401,6 @@ impl IndexEntryPlaceInBucket { } pub fn init(&self, index_bucket: &mut BucketStorage>, pubkey: &Pubkey) { - self.set_slot_count_enum_value(index_bucket, OccupiedEnum::ZeroSlots); let index_entry = index_bucket.get_mut::>(self.ix); index_entry.key = *pubkey; } diff --git a/builtins-default-costs/Cargo.toml b/builtins-default-costs/Cargo.toml new file mode 100644 index 00000000000000..4b2c5bad2f33e4 --- /dev/null +++ b/builtins-default-costs/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "solana-builtins-default-costs" +description = "Solana builtins default costs" +documentation = "https://docs.rs/solana-builtins-default-costs" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +ahash = { workspace = true } +lazy_static = { workspace = true } +log = { workspace = true } +solana-address-lookup-table-program = { workspace = true } +solana-bpf-loader-program = { workspace = true } +solana-compute-budget-program = { workspace = true } +solana-config-program = { workspace = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-loader-v4-program = { workspace = true } +solana-sdk = { workspace = true } +solana-stake-program = { workspace = true } +solana-system-program = { workspace = true } +solana-vote-program = { workspace = true } +# Add additional builtin programs here + +[lib] +crate-type = ["lib"] +name = "solana_builtins_default_costs" + +[dev-dependencies] +rand = "0.8.5" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[build-dependencies] +rustc_version = { workspace = true } + +[features] +frozen-abi = [ + "dep:solana-frozen-abi", + "solana-vote-program/frozen-abi", +] diff --git a/builtins-default-costs/benches/builtin_instruction_costs.rs b/builtins-default-costs/benches/builtin_instruction_costs.rs new file mode 100644 index 00000000000000..04443655c01300 --- /dev/null +++ b/builtins-default-costs/benches/builtin_instruction_costs.rs @@ -0,0 +1,55 @@ +#![feature(test)] +extern crate test; +use { + rand::Rng, + solana_builtins_default_costs::BUILTIN_INSTRUCTION_COSTS, + solana_sdk::{ + address_lookup_table, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, + compute_budget, ed25519_program, loader_v4, pubkey::Pubkey, secp256k1_program, + }, + test::Bencher, +}; + +struct BenchSetup { + pubkeys: [Pubkey; 12], +} + +const NUM_TRANSACTIONS_PER_ITER: usize = 1024; +const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; + +fn setup() -> BenchSetup { + let pubkeys: [Pubkey; 12] = [ + solana_stake_program::id(), + solana_config_program::id(), + solana_vote_program::id(), + solana_system_program::id(), + compute_budget::id(), + address_lookup_table::program::id(), + bpf_loader_upgradeable::id(), + bpf_loader_deprecated::id(), + bpf_loader::id(), + loader_v4::id(), + secp256k1_program::id(), + ed25519_program::id(), + ]; + + BenchSetup { pubkeys } +} + +#[bench] +fn bench_hash_find(bencher: &mut Bencher) { + let BenchSetup { pubkeys } = setup(); + + bencher.iter(|| { + for _t in 0..NUM_TRANSACTIONS_PER_ITER { + let idx = rand::thread_rng().gen_range(0..pubkeys.len()); + let ix_execution_cost = + if let Some(builtin_cost) = BUILTIN_INSTRUCTION_COSTS.get(&pubkeys[idx]) { + *builtin_cost + } else { + u64::from(DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT) + }; + assert!(ix_execution_cost != DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT as u64); + } + }); +} diff --git a/builtins-default-costs/build.rs b/builtins-default-costs/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/builtins-default-costs/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/builtins-default-costs/src/lib.rs b/builtins-default-costs/src/lib.rs new file mode 100644 index 00000000000000..71659c23fd3b63 --- /dev/null +++ b/builtins-default-costs/src/lib.rs @@ -0,0 +1,33 @@ +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] +#![allow(clippy::arithmetic_side_effects)] +use { + ahash::AHashMap, + lazy_static::lazy_static, + solana_sdk::{ + address_lookup_table, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, + compute_budget, ed25519_program, loader_v4, pubkey::Pubkey, secp256k1_program, + }, +}; + +// Number of compute units for each built-in programs +lazy_static! { + /// Number of compute units for each built-in programs + pub static ref BUILTIN_INSTRUCTION_COSTS: AHashMap = [ + (solana_stake_program::id(), solana_stake_program::stake_instruction::DEFAULT_COMPUTE_UNITS), + (solana_config_program::id(), solana_config_program::config_processor::DEFAULT_COMPUTE_UNITS), + (solana_vote_program::id(), solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS), + (solana_system_program::id(), solana_system_program::system_processor::DEFAULT_COMPUTE_UNITS), + (compute_budget::id(), solana_compute_budget_program::DEFAULT_COMPUTE_UNITS), + (address_lookup_table::program::id(), solana_address_lookup_table_program::processor::DEFAULT_COMPUTE_UNITS), + (bpf_loader_upgradeable::id(), solana_bpf_loader_program::UPGRADEABLE_LOADER_COMPUTE_UNITS), + (bpf_loader_deprecated::id(), solana_bpf_loader_program::DEPRECATED_LOADER_COMPUTE_UNITS), + (bpf_loader::id(), solana_bpf_loader_program::DEFAULT_LOADER_COMPUTE_UNITS), + (loader_v4::id(), solana_loader_v4_program::DEFAULT_COMPUTE_UNITS), + // Note: These are precompile, run directly in bank during sanitizing; + (secp256k1_program::id(), 0), + (ed25519_program::id(), 0), + ] + .iter() + .cloned() + .collect(); +} diff --git a/cargo-build-bpf b/cargo-build-bpf deleted file mode 100755 index 0ef191aa1dadbf..00000000000000 --- a/cargo-build-bpf +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash - -here=$(dirname "$0") - -maybe_bpf_sdk="--bpf-sdk $here/sdk/bpf" -for a in "$@"; do - if [[ $a = --bpf-sdk ]]; then - maybe_bpf_sdk= - fi -done - -set -ex -if [[ ! -f "$here"/sdk/bpf/syscalls.txt ]]; then - cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml -fi -if [[ ! -f "$here"/target/debug/cargo-build-sbf ]]; then - cargo build --manifest-path "$here"/sdk/cargo-build-sbf/Cargo.toml -fi -exec cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@" diff --git a/cargo-test-bpf b/cargo-test-bpf deleted file mode 100755 index 78b5a3673930fd..00000000000000 --- a/cargo-test-bpf +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -here=$(dirname "$0") - -maybe_bpf_sdk="--bpf-sdk $here/sdk/bpf" -for a in "$@"; do - if [[ $a = --bpf-sdk ]]; then - maybe_bpf_sdk= - fi -done - -export CARGO_BUILD_BPF="$here"/cargo-build-bpf -set -x -if [[ ! -f "$here"/target/debug/cargo-build-sbf ]]; then - cargo build --manifest-path "$here"/sdk/cargo-build-sbf/Cargo.toml -fi -if [[ ! -f "$here"/target/debug/cargo-test-sbf ]]; then - cargo build --manifest-path "$here"/sdk/cargo-test-sbf/Cargo.toml -fi -exec cargo run --manifest-path "$here"/sdk/cargo-test-bpf/Cargo.toml -- $maybe_bpf_sdk "$@" diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index e46eb0ab4acee3..4fc27feeac69e1 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -14,12 +14,48 @@ if [[ -n $CI_PULL_REQUEST ]]; then pr_number=${BASH_REMATCH[1]} echo "get affected files from PR: $pr_number" - # Fetch the number of commits in the PR - commits_no=$(gh pr view "$pr_number" --json commits --jq '.commits | length') - echo "number of commits in this PR: $commits_no" + if [[ $BUILDKITE_REPO =~ ^https:\/\/github\.com\/([^\/]+)\/([^\/\.]+) ]]; then + owner="${BASH_REMATCH[1]}" + repo="${BASH_REMATCH[2]}" + elif [[ $BUILDKITE_REPO =~ ^git@github\.com:([^\/]+)\/([^\/\.]+) ]]; then + owner="${BASH_REMATCH[1]}" + repo="${BASH_REMATCH[2]}" + else + echo "couldn't parse owner and repo. use defaults" + owner="anza-xyz" + repo="agave" + fi + + # ref: https://github.com/cli/cli/issues/5368#issuecomment-1087515074 + # + # Variable value contains dollar prefixed words that look like bash variable + # references. This is intentional. + # shellcheck disable=SC2016 + query=' + query($owner: String!, $repo: String!, $pr: Int!, $endCursor: String) { + repository(owner: $owner, name: $repo) { + pullRequest(number: $pr) { + files(first: 100, after: $endCursor) { + pageInfo{ hasNextPage, endCursor } + nodes { + path + } + } + } + } + }' # get affected files - readarray -t affected_files < <(git diff HEAD~"$commits_no"..HEAD --name-status | cut -f2) + readarray -t affected_files < <( + gh api graphql \ + -f query="$query" \ + -F pr="$pr_number" \ + -F owner="$owner" \ + -F repo="$repo" \ + --paginate \ + --jq '.data.repository.pullRequest.files.nodes.[].path' + ) + if [[ ${#affected_files[*]} -eq 0 ]]; then echo "Unable to determine the files affected by this PR" exit 1 @@ -148,6 +184,7 @@ all_test_steps() { command_step checks2 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-bins" 15 check command_step checks3 "ci/docker-run-default-image.sh ci/test-dev-context-only-utils.sh check-all-targets" 15 check command_step miri "ci/docker-run-default-image.sh ci/test-miri.sh" 5 check + command_step frozen-abi "ci/docker-run-default-image.sh ./test-abi.sh" 15 check wait_step # Full test suite @@ -181,8 +218,6 @@ all_test_steps() { ^fetch-perf-libs.sh \ ^programs/ \ ^sdk/ \ - cargo-build-bpf$ \ - cargo-test-bpf$ \ cargo-build-sbf$ \ cargo-test-sbf$ \ ; then @@ -199,6 +234,19 @@ EOF "Stable-SBF skipped as no relevant files were modified" fi + # Shuttle tests + if affects \ + .rs$ \ + Cargo.lock$ \ + Cargo.toml$ \ + ^ci/rust-version.sh \ + ; then + command_step shuttle "ci/docker-run-default-image.sh ci/test-shuttle.sh" 10 + else + annotate --style info \ + "test-shuttle skipped as no relevant files were modified" + fi + # Downstream backwards compatibility if affects \ .rs$ \ @@ -212,8 +260,6 @@ EOF ^fetch-perf-libs.sh \ ^programs/ \ ^sdk/ \ - cargo-build-bpf$ \ - cargo-test-bpf$ \ cargo-build-sbf$ \ cargo-test-sbf$ \ ^ci/downstream-projects \ diff --git a/ci/nits.sh b/ci/nits.sh index 39e20f57cabced..856a4d323cddf0 100755 --- a/ci/nits.sh +++ b/ci/nits.sh @@ -28,6 +28,7 @@ declare print_free_tree=( ':sdk/sbf/rust/rust-utils/**.rs' ':sdk/**.rs' ':^sdk/cargo-build-sbf/**.rs' + ':^sdk/msg/src/lib.rs' ':^sdk/program/src/program_option.rs' ':^sdk/program/src/program_stubs.rs' ':programs/**.rs' diff --git a/ci/test-checks.sh b/ci/test-checks.sh index a2022204df7ad4..873c3a9469e497 100755 --- a/ci/test-checks.sh +++ b/ci/test-checks.sh @@ -54,7 +54,8 @@ export RUSTFLAGS="-D warnings -A incomplete_features" # Only force up-to-date lock files on edge if [[ $CI_BASE_BRANCH = "$EDGE_CHANNEL" ]]; then - if _ scripts/cargo-for-all-lock-files.sh "+${rust_nightly}" check --locked --workspace --all-targets --features dummy-for-ci-check; then + if _ scripts/cargo-for-all-lock-files.sh "+${rust_nightly}" check \ + --locked --workspace --all-targets --features dummy-for-ci-check,frozen-abi; then true else check_status=$? diff --git a/ci/test-shuttle.sh b/ci/test-shuttle.sh new file mode 100755 index 00000000000000..f51c2475afdab7 --- /dev/null +++ b/ci/test-shuttle.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -eo pipefail + +source ci/_ + +cargo nextest run --profile ci --config-file ./nextest.toml --manifest-path="svm/Cargo.toml" --features="shuttle-test" --test concurrent_tests --jobs 1 \ No newline at end of file diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 22a0172458adf1..1e05e67eba420e 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -36,7 +36,8 @@ solana-cli-output = { workspace = true } solana-client = { workspace = true } solana-compute-budget = { workspace = true } solana-config-program = { workspace = true } -solana-faucet = { workspace = true } +solana-connection-cache = { workspace = true } +solana-decode-error = { workspace = true } solana-loader-v4-program = { workspace = true } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } @@ -45,7 +46,7 @@ solana-quic-client = { workspace = true } solana-remote-wallet = { workspace = true, features = ["default"] } solana-rpc-client = { workspace = true, features = ["default"] } solana-rpc-client-api = { workspace = true } -solana-rpc-client-nonce-utils = { workspace = true } +solana-rpc-client-nonce-utils = { workspace = true, features = ["clap"] } solana-sdk = { workspace = true } solana-streamer = { workspace = true } solana-tps-client = { workspace = true } @@ -61,6 +62,7 @@ tiny-bip39 = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } +solana-faucet = { workspace = true } solana-rpc = { workspace = true } solana-streamer = { workspace = true } solana-test-validator = { workspace = true } diff --git a/cli/src/cli.rs b/cli/src/cli.rs index af1b84493298fa..643782e418b161 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -13,6 +13,7 @@ use { display::println_name_value, CliSignature, CliValidatorsSortOrder, OutputFormat, }, solana_client::connection_cache::ConnectionCache, + solana_decode_error::DecodeError, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ @@ -23,7 +24,6 @@ use { solana_sdk::{ clock::{Epoch, Slot}, commitment_config::CommitmentConfig, - decode_error::DecodeError, hash::Hash, instruction::InstructionError, offchain_message::OffchainMessage, @@ -229,7 +229,6 @@ pub enum CliCommand { nonce_authority: SignerIndex, memo: Option, fee_payer: SignerIndex, - redelegation_stake_account: Option, compute_unit_price: Option, }, SplitStake { @@ -718,8 +717,10 @@ pub fn parse_command( ("delegate-stake", Some(matches)) => { parse_stake_delegate_stake(matches, default_signer, wallet_manager) } - ("redelegate-stake", Some(matches)) => { - parse_stake_delegate_stake(matches, default_signer, wallet_manager) + ("redelegate-stake", _) => { + Err(CliError::CommandNotRecognized( + "`redelegate-stake` no longer exists and will be completely removed in a future release".to_string(), + )) } ("withdraw-stake", Some(matches)) => { parse_stake_withdraw_stake(matches, default_signer, wallet_manager) @@ -1242,7 +1243,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { nonce_authority, memo, fee_payer, - redelegation_stake_account, compute_unit_price, } => process_delegate_stake( &rpc_client, @@ -1258,7 +1258,6 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *nonce_authority, memo.as_ref(), *fee_payer, - *redelegation_stake_account, *compute_unit_price, ), CliCommand::SplitStake { diff --git a/cli/src/compute_budget.rs b/cli/src/compute_budget.rs index a994607d91efea..71373dca462992 100644 --- a/cli/src/compute_budget.rs +++ b/cli/src/compute_budget.rs @@ -1,8 +1,8 @@ use { solana_clap_utils::compute_budget::ComputeUnitLimit, - solana_client::rpc_config::RpcSimulateTransactionConfig, - solana_compute_budget::compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT, + solana_compute_budget::compute_budget_limits::MAX_COMPUTE_UNIT_LIMIT, solana_rpc_client::rpc_client::RpcClient, + solana_rpc_client_api::config::RpcSimulateTransactionConfig, solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, diff --git a/cli/src/stake.rs b/cli/src/stake.rs index 3c94ffb9507903..0e32a6216935a0 100644 --- a/cli/src/stake.rs +++ b/cli/src/stake.rs @@ -11,7 +11,7 @@ use { nonce::check_nonce_account, spend_utils::{resolve_spend_tx_and_check_account_balances, SpendAmount}, }, - clap::{value_t, App, Arg, ArgGroup, ArgMatches, SubCommand}, + clap::{value_t, App, AppSettings, Arg, ArgGroup, ArgMatches, SubCommand}, solana_clap_utils::{ compute_budget::{compute_unit_price_arg, ComputeUnitLimit, COMPUTE_UNIT_PRICE_ARG}, fee_payer::{fee_payer_arg, FEE_PAYER_ARG}, @@ -328,49 +328,13 @@ impl StakeSubCommands for App<'_, '_> { ) .subcommand( SubCommand::with_name("redelegate-stake") - .about("Redelegate active stake to another vote account") + .setting(AppSettings::Hidden) .arg( - Arg::with_name("force") - .long("force") - .takes_value(false) - .hidden(hidden_unless_forced()) // Don't document this argument to discourage its use - .help("Override vote account sanity checks (use carefully!)"), - ) - .arg(pubkey!( - Arg::with_name("stake_account_pubkey") - .index(1) - .value_name("STAKE_ACCOUNT_ADDRESS") - .required(true), - "Existing delegated stake account that has been fully activated. On success \ - this stake account will be scheduled for deactivation and the rent-exempt \ - balance may be withdrawn once fully deactivated." - )) - .arg(pubkey!( - Arg::with_name("vote_account_pubkey") - .index(2) - .value_name("REDELEGATED_VOTE_ACCOUNT_ADDRESS") - .required(true), - "Vote account to which the stake will be redelegated." - )) - .arg( - Arg::with_name("redelegation_stake_account") - .index(3) - .value_name("REDELEGATION_STAKE_ACCOUNT") - .takes_value(true) - .required(true) - .validator(is_valid_signer) - .help( - "Stake account to create for the redelegation. On success this stake \ - account will be created and scheduled for activation with all the \ - stake in the existing stake account, exclusive of the rent-exempt \ - balance retained in the existing account", - ), - ) - .arg(stake_authority_arg()) - .offline_args() - .nonce_args(false) - .arg(fee_payer_arg()) - .arg(memo_arg()), + // Consume all positional arguments + Arg::with_name("arg") + .multiple(true) + .hidden(hidden_unless_forced()), + ), ) .subcommand( SubCommand::with_name("stake-authorize") @@ -911,8 +875,6 @@ pub fn parse_stake_delegate_stake( pubkey_of_signer(matches, "stake_account_pubkey", wallet_manager)?.unwrap(); let vote_account_pubkey = pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap(); - let (redelegation_stake_account, redelegation_stake_account_pubkey) = - signer_of(matches, "redelegation_stake_account", wallet_manager)?; let force = matches.is_present("force"); let sign_only = matches.is_present(SIGN_ONLY_ARG.name); let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); @@ -929,9 +891,6 @@ pub fn parse_stake_delegate_stake( if nonce_account.is_some() { bulk_signers.push(nonce_authority); } - if redelegation_stake_account.is_some() { - bulk_signers.push(redelegation_stake_account); - } let signer_info = default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; let compute_unit_price = value_of(matches, COMPUTE_UNIT_PRICE_ARG.name); @@ -949,8 +908,6 @@ pub fn parse_stake_delegate_stake( nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), memo, fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), - redelegation_stake_account: redelegation_stake_account_pubkey - .and_then(|_| signer_info.index_of(redelegation_stake_account_pubkey)), compute_unit_price, }, signers: signer_info.signers, @@ -2681,30 +2638,12 @@ pub fn process_delegate_stake( nonce_authority: SignerIndex, memo: Option<&String>, fee_payer: SignerIndex, - redelegation_stake_account: Option, compute_unit_price: Option, ) -> ProcessResult { check_unique_pubkeys( (&config.signers[0].pubkey(), "cli keypair".to_string()), (stake_account_pubkey, "stake_account_pubkey".to_string()), )?; - let redelegation_stake_account = redelegation_stake_account.map(|index| config.signers[index]); - if let Some(redelegation_stake_account) = &redelegation_stake_account { - check_unique_pubkeys( - (stake_account_pubkey, "stake_account_pubkey".to_string()), - ( - &redelegation_stake_account.pubkey(), - "redelegation_stake_account".to_string(), - ), - )?; - check_unique_pubkeys( - (&config.signers[0].pubkey(), "cli keypair".to_string()), - ( - &redelegation_stake_account.pubkey(), - "redelegation_stake_account".to_string(), - ), - )?; - } let stake_authority = config.signers[stake_authority]; if !sign_only { @@ -2758,20 +2697,11 @@ pub fn process_delegate_stake( let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; - let ixs = if let Some(redelegation_stake_account) = &redelegation_stake_account { - stake_instruction::redelegate( - stake_account_pubkey, - &stake_authority.pubkey(), - vote_account_pubkey, - &redelegation_stake_account.pubkey(), - ) - } else { - vec![stake_instruction::delegate_stake( - stake_account_pubkey, - &stake_authority.pubkey(), - vote_account_pubkey, - )] - } + let ixs = vec![stake_instruction::delegate_stake( + stake_account_pubkey, + &stake_authority.pubkey(), + vote_account_pubkey, + )] .with_memo(memo) .with_compute_unit_config(&ComputeUnitConfig { compute_unit_price, @@ -4173,7 +4103,6 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], @@ -4206,7 +4135,6 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![ @@ -4239,7 +4167,6 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], @@ -4275,7 +4202,6 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], @@ -4306,7 +4232,6 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![Box::new(read_keypair_file(&default_keypair_file).unwrap())], @@ -4347,7 +4272,6 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 1, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![ @@ -4397,7 +4321,6 @@ mod tests { nonce_authority: 2, memo: None, fee_payer: 1, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![ @@ -4435,7 +4358,6 @@ mod tests { nonce_authority: 0, memo: None, fee_payer: 1, - redelegation_stake_account: None, compute_unit_price: None, }, signers: vec![ @@ -4445,49 +4367,6 @@ mod tests { } ); - // Test RedelegateStake Subcommand (minimal test due to the significant implementation - // overlap with DelegateStake) - let (redelegation_stake_account_keypair_file, mut redelegation_stake_account_tmp_file) = - make_tmp_file(); - let redelegation_stake_account_keypair = Keypair::new(); - write_keypair( - &redelegation_stake_account_keypair, - redelegation_stake_account_tmp_file.as_file_mut(), - ) - .unwrap(); - - let test_redelegate_stake = test_commands.clone().get_matches_from(vec![ - "test", - "redelegate-stake", - &stake_account_string, - &vote_account_string, - &redelegation_stake_account_keypair_file, - ]); - assert_eq!( - parse_command(&test_redelegate_stake, &default_signer, &mut None).unwrap(), - CliCommandInfo { - command: CliCommand::DelegateStake { - stake_account_pubkey, - vote_account_pubkey, - stake_authority: 0, - force: false, - sign_only: false, - dump_transaction_message: false, - blockhash_query: BlockhashQuery::default(), - nonce_account: None, - nonce_authority: 0, - memo: None, - fee_payer: 0, - redelegation_stake_account: Some(1), - compute_unit_price: None, - }, - signers: vec![ - Box::new(read_keypair_file(&default_keypair_file).unwrap()), - Box::new(read_keypair_file(&redelegation_stake_account_keypair_file).unwrap()) - ], - } - ); - // Test WithdrawStake Subcommand let test_withdraw_stake = test_commands.clone().get_matches_from(vec![ "test", diff --git a/cli/tests/program.rs b/cli/tests/program.rs index 45d243c5babcd2..6bec3bcc28b36f 100644 --- a/cli/tests/program.rs +++ b/cli/tests/program.rs @@ -11,12 +11,10 @@ use { test_utils::wait_n_slots, }, solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, - solana_client::{ - rpc_client::GetConfirmedSignaturesForAddress2Config, rpc_config::RpcTransactionConfig, - }, solana_faucet::faucet::run_local_faucet, solana_rpc::rpc::JsonRpcConfig, - solana_rpc_client::rpc_client::RpcClient, + solana_rpc_client::rpc_client::{GetConfirmedSignaturesForAddress2Config, RpcClient}, + solana_rpc_client_api::config::RpcTransactionConfig, solana_rpc_client_nonce_utils::blockhash_query::BlockhashQuery, solana_sdk::{ account::ReadableAccount, diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index 3aeeb47d9c0b8a..bcc5f6bb7b39ef 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -11,9 +11,7 @@ use { solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_faucet::faucet::run_local_faucet, solana_rpc_client::rpc_client::RpcClient, - solana_rpc_client_api::{ - request::DELINQUENT_VALIDATOR_SLOT_DISTANCE, response::StakeActivationState, - }, + solana_rpc_client_api::request::DELINQUENT_VALIDATOR_SLOT_DISTANCE, solana_rpc_client_nonce_utils::blockhash_query::{self, BlockhashQuery}, solana_sdk::{ account_utils::StateMut, @@ -28,276 +26,13 @@ use { stake::{ self, instruction::LockupArgs, - state::{Delegation, Lockup, StakeActivationStatus, StakeAuthorize, StakeStateV2}, + state::{Lockup, StakeAuthorize, StakeStateV2}, }, - sysvar::stake_history::{self, StakeHistory}, }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::{TestValidator, TestValidatorGenesis}, }; -#[test] -fn test_stake_redelegation() { - let mint_keypair = Keypair::new(); - let mint_pubkey = mint_keypair.pubkey(); - let authorized_withdrawer = Keypair::new().pubkey(); - let faucet_addr = run_local_faucet(mint_keypair, None); - - let slots_per_epoch = 32; - let test_validator = TestValidatorGenesis::default() - .fee_rate_governor(FeeRateGovernor::new(0, 0)) - .rent(Rent { - lamports_per_byte_year: 1, - exemption_threshold: 1.0, - ..Rent::default() - }) - .epoch_schedule(EpochSchedule::custom( - slots_per_epoch, - slots_per_epoch, - /* enable_warmup_epochs = */ false, - )) - .faucet_addr(Some(faucet_addr)) - .start_with_mint_address(mint_pubkey, SocketAddrSpace::Unspecified) - .expect("validator start failed"); - - let rpc_client = - RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); - let default_signer = Keypair::new(); - - let mut config = CliConfig::recent_for_tests(); - config.json_rpc_url = test_validator.rpc_url(); - config.signers = vec![&default_signer]; - - request_and_confirm_airdrop( - &rpc_client, - &config, - &config.signers[0].pubkey(), - 100_000_000_000, - ) - .unwrap(); - - // Create vote account - let vote_keypair = Keypair::new(); - config.signers = vec![&default_signer, &vote_keypair]; - config.command = CliCommand::CreateVoteAccount { - vote_account: 1, - seed: None, - identity_account: 0, - authorized_voter: None, - authorized_withdrawer, - commission: 0, - sign_only: false, - dump_transaction_message: false, - blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), - nonce_account: None, - nonce_authority: 0, - memo: None, - fee_payer: 0, - compute_unit_price: None, - }; - process_command(&config).unwrap(); - - // Create second vote account - let vote2_keypair = Keypair::new(); - config.signers = vec![&default_signer, &vote2_keypair]; - config.command = CliCommand::CreateVoteAccount { - vote_account: 1, - seed: None, - identity_account: 0, - authorized_voter: None, - authorized_withdrawer, - commission: 0, - sign_only: false, - dump_transaction_message: false, - blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), - nonce_account: None, - nonce_authority: 0, - memo: None, - fee_payer: 0, - compute_unit_price: None, - }; - process_command(&config).unwrap(); - - // Create stake account - let stake_keypair = Keypair::new(); - config.signers = vec![&default_signer, &stake_keypair]; - config.command = CliCommand::CreateStakeAccount { - stake_account: 1, - seed: None, - staker: None, - withdrawer: None, - withdrawer_signer: None, - lockup: Lockup::default(), - amount: SpendAmount::Some(50_000_000_000), - sign_only: false, - dump_transaction_message: false, - blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), - nonce_account: None, - nonce_authority: 0, - memo: None, - fee_payer: 0, - from: 0, - compute_unit_price: None, - }; - process_command(&config).unwrap(); - - // Delegate stake to `vote_keypair` - config.signers = vec![&default_signer]; - config.command = CliCommand::DelegateStake { - stake_account_pubkey: stake_keypair.pubkey(), - vote_account_pubkey: vote_keypair.pubkey(), - stake_authority: 0, - force: true, - sign_only: false, - dump_transaction_message: false, - blockhash_query: BlockhashQuery::default(), - nonce_account: None, - nonce_authority: 0, - memo: None, - fee_payer: 0, - redelegation_stake_account: None, - compute_unit_price: None, - }; - process_command(&config).unwrap(); - - // wait for new epoch plus one additional slot for rewards payout - wait_for_next_epoch_plus_n_slots(&rpc_client, 1); - - let check_activation_status = |delegation: &Delegation, - expected_state: StakeActivationState, - expected_active_stake: u64| { - let stake_history_account = rpc_client.get_account(&stake_history::id()).unwrap(); - let stake_history: StakeHistory = - solana_sdk::account::from_account(&stake_history_account).unwrap(); - let current_epoch = rpc_client.get_epoch_info().unwrap().epoch; - let StakeActivationStatus { - effective, - activating, - deactivating, - } = delegation.stake_activating_and_deactivating(current_epoch, &stake_history, None); - let stake_activation_state = if deactivating > 0 { - StakeActivationState::Deactivating - } else if activating > 0 { - StakeActivationState::Activating - } else if effective > 0 { - StakeActivationState::Active - } else { - StakeActivationState::Inactive - }; - assert_eq!(stake_activation_state, expected_state); - assert_eq!(effective, expected_active_stake); - }; - - // `stake_keypair` should now be delegated to `vote_keypair` and fully activated - let stake_account = rpc_client.get_account(&stake_keypair.pubkey()).unwrap(); - let stake_state: StakeStateV2 = stake_account.state().unwrap(); - - let rent_exempt_reserve = match stake_state { - StakeStateV2::Stake(meta, stake, _) => { - assert_eq!(stake.delegation.voter_pubkey, vote_keypair.pubkey()); - check_activation_status( - &stake.delegation, - StakeActivationState::Active, - 50_000_000_000 - meta.rent_exempt_reserve, - ); - meta.rent_exempt_reserve - } - _ => panic!("Unexpected stake state!"), - }; - - check_balance!(50_000_000_000, &rpc_client, &stake_keypair.pubkey()); - - let stake2_keypair = Keypair::new(); - - // Add an extra `rent_exempt_reserve` amount into `stake2_keypair` before redelegation to - // account for the `rent_exempt_reserve` balance that'll be pealed off the stake during the - // redelegation process - request_and_confirm_airdrop( - &rpc_client, - &config, - &stake2_keypair.pubkey(), - rent_exempt_reserve, - ) - .unwrap(); - - // wait for a new epoch to ensure the `Redelegate` happens as soon as possible (i.e. till the - // last reward distribution block in the new epoch) to reduce the risk of a race condition - // when checking the stake account correctly enters the deactivating state for the - // remainder of the current epoch. - wait_for_next_epoch_plus_n_slots(&rpc_client, 1); - - // Redelegate to `vote2_keypair` via `stake2_keypair - config.signers = vec![&default_signer, &stake2_keypair]; - config.command = CliCommand::DelegateStake { - stake_account_pubkey: stake_keypair.pubkey(), - vote_account_pubkey: vote2_keypair.pubkey(), - stake_authority: 0, - force: true, - sign_only: false, - dump_transaction_message: false, - blockhash_query: BlockhashQuery::default(), - nonce_account: None, - nonce_authority: 0, - memo: None, - fee_payer: 0, - redelegation_stake_account: Some(1), - compute_unit_price: None, - }; - process_command(&config).unwrap(); - - // `stake_keypair` should now be deactivating - let stake_account = rpc_client.get_account(&stake_keypair.pubkey()).unwrap(); - let stake_state: StakeStateV2 = stake_account.state().unwrap(); - let StakeStateV2::Stake(_, stake, _) = stake_state else { - panic!() - }; - check_activation_status( - &stake.delegation, - StakeActivationState::Deactivating, - 50_000_000_000 - rent_exempt_reserve, - ); - - // `stake_keypair2` should now be activating - let stake_account = rpc_client.get_account(&stake2_keypair.pubkey()).unwrap(); - let stake_state: StakeStateV2 = stake_account.state().unwrap(); - let StakeStateV2::Stake(_, stake, _) = stake_state else { - panic!() - }; - check_activation_status(&stake.delegation, StakeActivationState::Activating, 0); - - // check that all the stake, save `rent_exempt_reserve`, have been moved from `stake_keypair` - // to `stake2_keypair` - check_balance!(rent_exempt_reserve, &rpc_client, &stake_keypair.pubkey()); - check_balance!(50_000_000_000, &rpc_client, &stake2_keypair.pubkey()); - - // wait for new epoch plus reward blocks - wait_for_next_epoch_plus_n_slots(&rpc_client, 1); - - // `stake_keypair` should now be deactivated - let stake_account = rpc_client.get_account(&stake_keypair.pubkey()).unwrap(); - let stake_state: StakeStateV2 = stake_account.state().unwrap(); - let StakeStateV2::Stake(_, stake, _) = stake_state else { - panic!() - }; - check_activation_status(&stake.delegation, StakeActivationState::Inactive, 0); - - // `stake2_keypair` should now be delegated to `vote2_keypair` and fully activated - let stake2_account = rpc_client.get_account(&stake2_keypair.pubkey()).unwrap(); - let stake2_state: StakeStateV2 = stake2_account.state().unwrap(); - - match stake2_state { - StakeStateV2::Stake(meta, stake, _) => { - assert_eq!(stake.delegation.voter_pubkey, vote2_keypair.pubkey()); - check_activation_status( - &stake.delegation, - StakeActivationState::Active, - 50_000_000_000 - meta.rent_exempt_reserve, - ); - } - _ => panic!("Unexpected stake2 state!"), - }; -} - #[test] fn test_stake_delegation_force() { let mint_keypair = Keypair::new(); @@ -396,7 +131,6 @@ fn test_stake_delegation_force() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; process_command(&config).unwrap(); @@ -440,7 +174,6 @@ fn test_stake_delegation_force() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; process_command(&config).unwrap_err(); @@ -458,7 +191,6 @@ fn test_stake_delegation_force() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; process_command(&config).unwrap(); @@ -537,7 +269,6 @@ fn test_seed_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; process_command(&config_validator).unwrap(); @@ -629,7 +360,6 @@ fn test_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; process_command(&config_validator).unwrap(); @@ -745,7 +475,6 @@ fn test_offline_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; config_offline.output_format = OutputFormat::JsonCompact; @@ -768,7 +497,6 @@ fn test_offline_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; process_command(&config_payer).unwrap(); @@ -906,7 +634,6 @@ fn test_nonced_stake_delegation_and_deactivation() { nonce_authority: 0, memo: None, fee_payer: 0, - redelegation_stake_account: None, compute_unit_price: None, }; process_command(&config).unwrap(); diff --git a/client/src/connection_cache.rs b/client/src/connection_cache.rs index 1e8c88e8bdbf8c..4d3e49ff86ab41 100644 --- a/client/src/connection_cache.rs +++ b/client/src/connection_cache.rs @@ -16,7 +16,6 @@ use { solana_streamer::streamer::StakedNodes, solana_udp_client::{UdpConfig, UdpConnectionManager, UdpPool}, std::{ - error::Error, net::{IpAddr, Ipv4Addr, SocketAddr}, sync::{Arc, RwLock}, }, @@ -110,29 +109,6 @@ impl ConnectionCache { } } - #[deprecated( - since = "1.15.0", - note = "This method does not do anything. Please use `new_with_client_options` instead to set the client certificate." - )] - pub fn update_client_certificate( - &mut self, - _keypair: &Keypair, - _ipaddr: IpAddr, - ) -> Result<(), Box> { - Ok(()) - } - - #[deprecated( - since = "1.15.0", - note = "This method does not do anything. Please use `new_with_client_options` instead to set staked nodes information." - )] - pub fn set_staked_nodes( - &mut self, - _staked_nodes: &Arc>, - _client_pubkey: &Pubkey, - ) { - } - pub fn with_udp(name: &'static str, connection_pool_size: usize) -> Self { // The minimum pool size is 1. let connection_pool_size = 1.max(connection_pool_size); diff --git a/client/src/lib.rs b/client/src/lib.rs index 89b66cf3b34796..f5e045ff531604 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -2,13 +2,10 @@ pub mod connection_cache; pub mod nonblocking; -pub mod quic_client; pub mod send_and_confirm_transactions_in_parallel; pub mod thin_client; pub mod tpu_client; -pub mod tpu_connection; pub mod transaction_executor; -pub mod udp_client; extern crate solana_metrics; diff --git a/client/src/nonblocking/mod.rs b/client/src/nonblocking/mod.rs index ab11ae5c6782b2..b62618c024b5ca 100644 --- a/client/src/nonblocking/mod.rs +++ b/client/src/nonblocking/mod.rs @@ -1,7 +1,4 @@ -pub mod quic_client; pub mod tpu_client; -pub mod tpu_connection; -pub mod udp_client; pub mod blockhash_query { pub use solana_rpc_client_nonce_utils::nonblocking::blockhash_query::*; diff --git a/client/src/nonblocking/quic_client.rs b/client/src/nonblocking/quic_client.rs deleted file mode 100644 index 28b9649289e2b4..00000000000000 --- a/client/src/nonblocking/quic_client.rs +++ /dev/null @@ -1,8 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_quic_client::nonblocking::quic_client::QuicClientConnection` instead." -)] -pub use solana_quic_client::nonblocking::quic_client::QuicClientConnection as QuicTpuConnection; -pub use solana_quic_client::nonblocking::quic_client::{ - QuicClient, QuicClientCertificate, QuicLazyInitializedEndpoint, -}; diff --git a/client/src/nonblocking/tpu_connection.rs b/client/src/nonblocking/tpu_connection.rs deleted file mode 100644 index b91a88853310b4..00000000000000 --- a/client/src/nonblocking/tpu_connection.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_connection_cache::nonblocking::client_connection::ClientConnection` instead." -)] -pub use solana_connection_cache::nonblocking::client_connection::ClientConnection as TpuConnection; diff --git a/client/src/nonblocking/udp_client.rs b/client/src/nonblocking/udp_client.rs deleted file mode 100644 index e880b1fb107cf8..00000000000000 --- a/client/src/nonblocking/udp_client.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_udp_client::nonblocking::udp_client::UdpClientConnection` instead." -)] -pub use solana_udp_client::nonblocking::udp_client::UdpClientConnection as UdpTpuConnection; diff --git a/client/src/quic_client.rs b/client/src/quic_client.rs deleted file mode 100644 index a32aa381cb10ef..00000000000000 --- a/client/src/quic_client.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_quic_client::quic_client::QuicClientConnection` instead." -)] -pub use solana_quic_client::quic_client::QuicClientConnection as QuicTpuConnection; diff --git a/client/src/tpu_connection.rs b/client/src/tpu_connection.rs deleted file mode 100644 index 9e000612a51e03..00000000000000 --- a/client/src/tpu_connection.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_connection_cache::client_connection::ClientConnection` instead." -)] -pub use solana_connection_cache::client_connection::ClientConnection as TpuConnection; -pub use solana_connection_cache::client_connection::ClientStats; diff --git a/client/src/udp_client.rs b/client/src/udp_client.rs deleted file mode 100644 index c05b74b3640749..00000000000000 --- a/client/src/udp_client.rs +++ /dev/null @@ -1,5 +0,0 @@ -#[deprecated( - since = "1.15.0", - note = "Please use `solana_udp_client::udp_client::UdpClientConnection` instead." -)] -pub use solana_udp_client::udp_client::UdpClientConnection as UdpTpuConnection; diff --git a/compute-budget/src/compute_budget.rs b/compute-budget/src/compute_budget.rs index 24eeb46815372d..da04296a7e3080 100644 --- a/compute-budget/src/compute_budget.rs +++ b/compute-budget/src/compute_budget.rs @@ -1,4 +1,4 @@ -use crate::compute_budget_processor::{self, ComputeBudgetLimits, DEFAULT_HEAP_COST}; +use crate::compute_budget_limits::{self, ComputeBudgetLimits, DEFAULT_HEAP_COST}; #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] impl ::solana_frozen_abi::abi_example::AbiExample for ComputeBudget { @@ -127,7 +127,7 @@ pub struct ComputeBudget { impl Default for ComputeBudget { fn default() -> Self { - Self::new(compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT as u64) + Self::new(compute_budget_limits::MAX_COMPUTE_UNIT_LIMIT as u64) } } diff --git a/compute-budget/src/compute_budget_limits.rs b/compute-budget/src/compute_budget_limits.rs new file mode 100644 index 00000000000000..20731a71430332 --- /dev/null +++ b/compute-budget/src/compute_budget_limits.rs @@ -0,0 +1,54 @@ +use { + crate::prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + solana_sdk::{entrypoint::HEAP_LENGTH, fee::FeeBudgetLimits}, + std::num::NonZeroU32, +}; + +/// Roughly 0.5us/page, where page is 32K; given roughly 15CU/us, the +/// default heap page cost = 0.5 * 15 ~= 8CU/page +pub const DEFAULT_HEAP_COST: u64 = 8; +pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; +pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; +pub const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; +pub const MIN_HEAP_FRAME_BYTES: u32 = HEAP_LENGTH as u32; + +/// The total accounts data a transaction can load is limited to 64MiB to not break +/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction +pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: NonZeroU32 = + unsafe { NonZeroU32::new_unchecked(64 * 1024 * 1024) }; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct ComputeBudgetLimits { + pub updated_heap_bytes: u32, + pub compute_unit_limit: u32, + pub compute_unit_price: u64, + pub loaded_accounts_bytes: NonZeroU32, +} + +impl Default for ComputeBudgetLimits { + fn default() -> Self { + ComputeBudgetLimits { + updated_heap_bytes: MIN_HEAP_FRAME_BYTES, + compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, + compute_unit_price: 0, + loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, + } + } +} + +impl From for FeeBudgetLimits { + fn from(val: ComputeBudgetLimits) -> Self { + let prioritization_fee_details = PrioritizationFeeDetails::new( + PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), + u64::from(val.compute_unit_limit), + ); + let prioritization_fee = prioritization_fee_details.get_fee(); + + FeeBudgetLimits { + loaded_accounts_data_size_limit: val.loaded_accounts_bytes, + heap_cost: DEFAULT_HEAP_COST, + compute_unit_limit: u64::from(val.compute_unit_limit), + prioritization_fee, + } + } +} diff --git a/compute-budget/src/lib.rs b/compute-budget/src/lib.rs index dd4b3d45f9b219..f6ff865be67185 100644 --- a/compute-budget/src/lib.rs +++ b/compute-budget/src/lib.rs @@ -2,5 +2,5 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] pub mod compute_budget; -pub mod compute_budget_processor; +pub mod compute_budget_limits; pub mod prioritization_fee; diff --git a/core/Cargo.toml b/core/Cargo.toml index b3d24130d77179..e3dd1bdf1f629f 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -15,6 +15,7 @@ codecov = { repository = "solana-labs/solana", branch = "master", service = "git [dependencies] ahash = { workspace = true } +anyhow = { workspace = true } arrayvec = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } @@ -45,10 +46,13 @@ serde_bytes = { workspace = true } serde_derive = { workspace = true } solana-accounts-db = { workspace = true } solana-bloom = { workspace = true } +solana-builtins-default-costs = { workspace = true } solana-client = { workspace = true } solana-compute-budget = { workspace = true } +solana-connection-cache = { workspace = true } solana-cost-model = { workspace = true } solana-entry = { workspace = true } +solana-fee = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-geyser-plugin-manager = { workspace = true } @@ -59,17 +63,19 @@ solana-metrics = { workspace = true } solana-net-utils = { workspace = true } solana-perf = { workspace = true } solana-poh = { workspace = true } -solana-program-runtime = { workspace = true } solana-quic-client = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-rpc = { workspace = true } solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } +solana-runtime-transaction = { workspace = true } solana-sanitize = { workspace = true } solana-sdk = { workspace = true } solana-send-transaction-service = { workspace = true } +solana-short-vec = { workspace = true } solana-streamer = { workspace = true } solana-svm = { workspace = true } +solana-timings = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-turbine = { workspace = true } @@ -93,6 +99,7 @@ serde_json = { workspace = true } serial_test = { workspace = true } # See order-crates-for-publishing.py for using this unusual `path = "."` solana-core = { path = ".", features = ["dev-context-only-utils"] } +solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } solana-logger = { workspace = true } solana-poh = { workspace = true, features = ["dev-context-only-utils"] } solana-program-runtime = { workspace = true } @@ -127,6 +134,7 @@ frozen-abi = [ "solana-program-runtime/frozen-abi", "solana-runtime/frozen-abi", "solana-sdk/frozen-abi", + "solana-short-vec/frozen-abi", "solana-svm/frozen-abi", "solana-vote/frozen-abi", "solana-vote-program/frozen-abi", diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 7a69eaf674d520..d0efbfafddfc0b 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -303,6 +303,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + false, ); let chunk_len = verified.len() / CHUNKS; @@ -398,7 +399,7 @@ fn simulate_process_entries( let bank_fork = BankForks::new_rw_arc(bank); let bank = bank_fork.read().unwrap().get_with_scheduler(slot).unwrap(); bank.clone_without_scheduler() - .set_fork_graph_in_program_cache(bank_fork.clone()); + .set_fork_graph_in_program_cache(Arc::downgrade(&bank_fork)); for i in 0..(num_accounts / 2) { bank.transfer(initial_lamports, mint_keypair, &keypairs[i * 2].pubkey()) diff --git a/core/benches/consumer.rs b/core/benches/consumer.rs index 6dd9eb5b8bf0fa..d736b93ef96ffd 100644 --- a/core/benches/consumer.rs +++ b/core/benches/consumer.rs @@ -19,7 +19,7 @@ use { poh_recorder::{create_test_recorder, PohRecorder}, poh_service::PohService, }, - solana_runtime::bank::Bank, + solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ account::{Account, ReadableAccount}, signature::Keypair, @@ -89,6 +89,7 @@ fn create_consumer(poh_recorder: &RwLock) -> Consumer { struct BenchFrame { bank: Arc, + _bank_forks: Arc>, ledger_path: TempDir, exit: Arc, poh_recorder: Arc>, @@ -115,7 +116,7 @@ fn setup() -> BenchFrame { bank.write_cost_tracker() .unwrap() .set_limits(u64::MAX, u64::MAX, u64::MAX); - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let ledger_path = TempDir::new().unwrap(); let blockstore = Arc::new( @@ -126,6 +127,7 @@ fn setup() -> BenchFrame { BenchFrame { bank, + _bank_forks: bank_forks, ledger_path, exit, poh_recorder, @@ -147,6 +149,7 @@ fn bench_process_and_record_transactions(bencher: &mut Bencher, batch_size: usiz let BenchFrame { bank, + _bank_forks, ledger_path: _ledger_path, exit, poh_recorder, diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index 0e732e62e66b2b..5c9768aa0bc215 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -33,7 +33,7 @@ use { solana_client::connection_cache::ConnectionCache, solana_gossip::cluster_info::ClusterInfo, solana_ledger::blockstore_processor::TransactionStatusSender, - solana_measure::{measure, measure_us}, + solana_measure::measure_us, solana_perf::{data_budget::DataBudget, packet::PACKETS_PER_BATCH}, solana_poh::poh_recorder::{PohRecorder, TransactionRecorder}, solana_runtime::{ @@ -339,6 +339,7 @@ impl BankingStage { connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, + enable_forwarding: bool, ) -> Self { Self::new_num_threads( block_production_method, @@ -354,6 +355,7 @@ impl BankingStage { connection_cache, bank_forks, prioritization_fee_cache, + enable_forwarding, ) } @@ -372,6 +374,7 @@ impl BankingStage { connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, + enable_forwarding: bool, ) -> Self { match block_production_method { BlockProductionMethod::ThreadLocalMultiIterator => { @@ -403,6 +406,7 @@ impl BankingStage { connection_cache, bank_forks, prioritization_fee_cache, + enable_forwarding, ), } } @@ -505,6 +509,7 @@ impl BankingStage { connection_cache: Arc, bank_forks: Arc>, prioritization_fee_cache: &Arc, + enable_forwarding: bool, ) -> Self { assert!(num_threads >= MIN_TOTAL_THREADS); // Single thread to generate entries from many banks. @@ -586,13 +591,15 @@ impl BankingStage { ) } - let forwarder = Forwarder::new( - poh_recorder.clone(), - bank_forks.clone(), - cluster_info.clone(), - connection_cache.clone(), - data_budget.clone(), - ); + let forwarder = enable_forwarding.then(|| { + Forwarder::new( + poh_recorder.clone(), + bank_forks.clone(), + cluster_info.clone(), + connection_cache.clone(), + data_budget.clone(), + ) + }); // Spawn the central scheduler thread bank_thread_hdls.push({ @@ -669,13 +676,13 @@ impl BankingStage { if unprocessed_transaction_storage.should_not_process() { return; } - let (decision, make_decision_time) = - measure!(decision_maker.make_consume_or_forward_decision()); + let (decision, make_decision_us) = + measure_us!(decision_maker.make_consume_or_forward_decision()); let metrics_action = slot_metrics_tracker.check_leader_slot_boundary( decision.bank_start(), Some(unprocessed_transaction_storage), ); - slot_metrics_tracker.increment_make_decision_us(make_decision_time.as_us()); + slot_metrics_tracker.increment_make_decision_us(make_decision_us); match decision { BufferedPacketsDecision::Consume(bank_start) => { @@ -684,17 +691,15 @@ impl BankingStage { // packet processing metrics from the next slot towards the metrics // of the previous slot slot_metrics_tracker.apply_action(metrics_action); - let (_, consume_buffered_packets_time) = measure!( - consumer.consume_buffered_packets( + let (_, consume_buffered_packets_us) = measure_us!(consumer + .consume_buffered_packets( &bank_start, unprocessed_transaction_storage, banking_stage_stats, slot_metrics_tracker, - ), - "consume_buffered_packets", - ); + )); slot_metrics_tracker - .increment_consume_buffered_packets_us(consume_buffered_packets_time.as_us()); + .increment_consume_buffered_packets_us(consume_buffered_packets_us); } BufferedPacketsDecision::Forward => { let ((), forward_us) = measure_us!(forwarder.handle_forwarding( @@ -743,20 +748,17 @@ impl BankingStage { if !unprocessed_transaction_storage.is_empty() || last_metrics_update.elapsed() >= SLOT_BOUNDARY_CHECK_PERIOD { - let (_, process_buffered_packets_time) = measure!( - Self::process_buffered_packets( - decision_maker, - forwarder, - consumer, - &mut unprocessed_transaction_storage, - &banking_stage_stats, - &mut slot_metrics_tracker, - &mut tracer_packet_stats, - ), - "process_buffered_packets", - ); + let (_, process_buffered_packets_us) = measure_us!(Self::process_buffered_packets( + decision_maker, + forwarder, + consumer, + &mut unprocessed_transaction_storage, + &banking_stage_stats, + &mut slot_metrics_tracker, + &mut tracer_packet_stats, + )); slot_metrics_tracker - .increment_process_buffered_packets_us(process_buffered_packets_time.as_us()); + .increment_process_buffered_packets_us(process_buffered_packets_us); last_metrics_update = Instant::now(); } @@ -883,6 +885,7 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + false, ); drop(non_vote_sender); drop(tpu_vote_sender); @@ -938,6 +941,7 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + false, ); trace!("sending bank"); drop(non_vote_sender); @@ -1015,8 +1019,9 @@ mod tests { replay_vote_sender, None, Arc::new(ConnectionCache::new("connection_cache_test")), - bank_forks, + bank_forks.clone(), // keep a local-copy of bank-forks so worker threads do not lose weak access to bank-forks &Arc::new(PrioritizationFeeCache::new(0u64)), + false, ); // fund another account so we can send 2 good transactions in a single batch. @@ -1064,7 +1069,7 @@ mod tests { drop(poh_recorder); let mut blockhash = start_hash; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); bank.process_transaction(&fund_tx).unwrap(); //receive entries + ticks loop { @@ -1208,7 +1213,7 @@ mod tests { .map(|(_bank, (entry, _tick_height))| entry) .collect(); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); for entry in entries { bank.process_entry_transactions(entry.transactions) .iter() @@ -1232,7 +1237,7 @@ mod tests { mint_keypair, .. } = create_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let ledger_path = get_tmp_ledger_path_auto_delete!(); { let blockstore = Blockstore::open(ledger_path.path()) @@ -1378,6 +1383,7 @@ mod tests { Arc::new(ConnectionCache::new("connection_cache_test")), bank_forks, &Arc::new(PrioritizationFeeCache::new(0u64)), + false, ); let keypairs = (0..100).map(|_| Keypair::new()).collect_vec(); diff --git a/core/src/banking_stage/committer.rs b/core/src/banking_stage/committer.rs index 0ca1304a4560f5..6e5f411dc0f4ae 100644 --- a/core/src/banking_stage/committer.rs +++ b/core/src/banking_stage/committer.rs @@ -14,8 +14,8 @@ use { }, solana_sdk::{hash::Hash, pubkey::Pubkey, saturating_add_assign}, solana_svm::{ - account_loader::TransactionLoadResult, - transaction_results::{TransactionExecutionResult, TransactionResults}, + transaction_commit_result::{TransactionCommitResult, TransactionCommitResultExtensions}, + transaction_execution_result::TransactionExecutionResult, }, solana_transaction_status::{ token_balances::TransactionTokenBalancesSet, TransactionTokenBalance, @@ -27,7 +27,7 @@ use { pub enum CommitTransactionDetails { Committed { compute_units: u64, - loaded_accounts_data_size: usize, + loaded_accounts_data_size: u32, }, NotCommitted, } @@ -67,7 +67,6 @@ impl Committer { pub(super) fn commit_transactions( &self, batch: &TransactionBatch, - loaded_transactions: &mut [TransactionLoadResult], execution_results: Vec, last_blockhash: Hash, lamports_per_signature: u64, @@ -86,9 +85,8 @@ impl Committer { .filter_map(|(execution_result, tx)| execution_result.was_executed().then_some(tx)) .collect_vec(); - let (tx_results, commit_time_us) = measure_us!(bank.commit_transactions( + let (commit_results, commit_time_us) = measure_us!(bank.commit_transactions( batch.sanitized_transactions(), - loaded_transactions, execution_results, last_blockhash, lamports_per_signature, @@ -104,34 +102,30 @@ impl Committer { )); execute_and_commit_timings.commit_us = commit_time_us; - let commit_transaction_statuses = tx_results - .execution_results + let commit_transaction_statuses = commit_results .iter() - .zip(tx_results.loaded_accounts_stats.iter()) - .map( - |(execution_result, loaded_accounts_stats)| match execution_result.details() { - // reports actual execution CUs, and actual loaded accounts size for - // transaction committed to block. qos_service uses these information to adjust - // reserved block space. - Some(details) => CommitTransactionDetails::Committed { - compute_units: details.executed_units, - loaded_accounts_data_size: loaded_accounts_stats - .as_ref() - .map_or(0, |stats| stats.loaded_accounts_data_size), - }, - None => CommitTransactionDetails::NotCommitted, + .map(|commit_result| match commit_result { + // reports actual execution CUs, and actual loaded accounts size for + // transaction committed to block. qos_service uses these information to adjust + // reserved block space. + Ok(committed_tx) => CommitTransactionDetails::Committed { + compute_units: committed_tx.execution_details.executed_units, + loaded_accounts_data_size: committed_tx + .loaded_account_stats + .loaded_accounts_data_size, }, - ) + Err(_) => CommitTransactionDetails::NotCommitted, + }) .collect(); let ((), find_and_send_votes_us) = measure_us!({ bank_utils::find_and_send_votes( batch.sanitized_transactions(), - &tx_results, + &commit_results, Some(&self.replay_vote_sender), ); self.collect_balances_and_send_status_batch( - tx_results, + commit_results, bank, batch, pre_balance_info, @@ -146,7 +140,7 @@ impl Committer { fn collect_balances_and_send_status_batch( &self, - tx_results: TransactionResults, + commit_results: Vec, bank: &Arc, batch: &TransactionBatch, pre_balance_info: &mut PreBalanceInfo, @@ -158,11 +152,10 @@ impl Committer { let post_token_balances = collect_token_balances(bank, batch, &mut pre_balance_info.mint_decimals); let mut transaction_index = starting_transaction_index.unwrap_or_default(); - let batch_transaction_indexes: Vec<_> = tx_results - .execution_results + let batch_transaction_indexes: Vec<_> = commit_results .iter() - .map(|result| { - if result.was_executed() { + .map(|commit_result| { + if commit_result.was_executed() { let this_transaction_index = transaction_index; saturating_add_assign!(transaction_index, 1); this_transaction_index @@ -174,7 +167,7 @@ impl Committer { transaction_status_sender.send_transaction_status_batch( bank.clone(), txs, - tx_results.execution_results, + commit_results, TransactionBalancesSet::new( std::mem::take(&mut pre_balance_info.native), post_balances, @@ -183,7 +176,6 @@ impl Committer { std::mem::take(&mut pre_balance_info.token), post_token_balances, ), - tx_results.rent_debits, batch_transaction_indexes, ); } diff --git a/core/src/banking_stage/consume_worker.rs b/core/src/banking_stage/consume_worker.rs index f83ca6724d415e..57a4778d3204b3 100644 --- a/core/src/banking_stage/consume_worker.rs +++ b/core/src/banking_stage/consume_worker.rs @@ -710,7 +710,8 @@ mod tests { }, solana_poh::poh_recorder::{PohRecorder, WorkingBankEntry}, solana_runtime::{ - prioritization_fee_cache::PrioritizationFeeCache, vote_sender_types::ReplayVoteReceiver, + bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, + vote_sender_types::ReplayVoteReceiver, }, solana_sdk::{ genesis_config::GenesisConfig, poh_config::PohConfig, pubkey::Pubkey, @@ -729,6 +730,7 @@ mod tests { mint_keypair: Keypair, genesis_config: GenesisConfig, bank: Arc, + _bank_forks: Arc>, _ledger_path: TempDir, _entry_receiver: Receiver, poh_recorder: Arc>, @@ -745,7 +747,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) @@ -788,6 +790,7 @@ mod tests { mint_keypair, genesis_config, bank, + _bank_forks: bank_forks, _ledger_path: ledger_path, _entry_receiver: entry_receiver, poh_recorder, diff --git a/core/src/banking_stage/consumer.rs b/core/src/banking_stage/consumer.rs index 6ae0881da45d8a..f4e15edd0888f9 100644 --- a/core/src/banking_stage/consumer.rs +++ b/core/src/banking_stage/consumer.rs @@ -9,22 +9,21 @@ use { BankingStageStats, }, itertools::Itertools, - solana_compute_budget::compute_budget_processor::process_compute_budget_instructions, solana_ledger::token_balances::collect_token_balances, solana_measure::{measure::Measure, measure_us}, solana_poh::poh_recorder::{ BankStart, PohRecorderError, RecordTransactionsSummary, RecordTransactionsTimings, TransactionRecorder, }, - solana_program_runtime::timings::ExecuteTimings, solana_runtime::{ bank::{Bank, LoadAndExecuteTransactionsOutput}, - compute_budget_details::GetComputeBudgetDetails, transaction_batch::TransactionBatch, }, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ clock::{Slot, FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, feature_set, + fee::FeeBudgetLimits, message::SanitizedMessage, saturating_add_assign, timing::timestamp, @@ -35,6 +34,7 @@ use { transaction_error_metrics::TransactionErrorMetrics, transaction_processor::{ExecutionRecordingConfig, TransactionProcessingConfig}, }, + solana_timings::ExecuteTimings, std::{ sync::{atomic::Ordering, Arc}, time::Instant, @@ -577,20 +577,59 @@ impl Consumer { .sanitized_transactions() .iter() .filter_map(|transaction| { - let round_compute_unit_price_enabled = false; // TODO get from working_bank.feature_set - transaction - .get_compute_budget_details(round_compute_unit_price_enabled) - .map(|details| details.compute_unit_price) + process_compute_budget_instructions( + transaction.message().program_instructions_iter(), + ) + .ok() + .map(|limits| limits.compute_unit_price) }) .minmax(); let (min_prioritization_fees, max_prioritization_fees) = min_max.into_option().unwrap_or_default(); + let mut error_counters = TransactionErrorMetrics::default(); + let mut retryable_transaction_indexes: Vec<_> = batch + .lock_results() + .iter() + .enumerate() + .filter_map(|(index, res)| match res { + // following are retryable errors + Err(TransactionError::AccountInUse) => { + error_counters.account_in_use += 1; + Some(index) + } + Err(TransactionError::WouldExceedMaxBlockCostLimit) => { + error_counters.would_exceed_max_block_cost_limit += 1; + Some(index) + } + Err(TransactionError::WouldExceedMaxVoteCostLimit) => { + error_counters.would_exceed_max_vote_cost_limit += 1; + Some(index) + } + Err(TransactionError::WouldExceedMaxAccountCostLimit) => { + error_counters.would_exceed_max_account_cost_limit += 1; + Some(index) + } + Err(TransactionError::WouldExceedAccountDataBlockLimit) => { + error_counters.would_exceed_account_data_block_limit += 1; + Some(index) + } + // following are non-retryable errors + Err(TransactionError::TooManyAccountLocks) => { + error_counters.too_many_account_locks += 1; + None + } + Err(_) => None, + Ok(_) => None, + }) + .collect(); + let (load_and_execute_transactions_output, load_execute_us) = measure_us!(bank .load_and_execute_transactions( batch, MAX_PROCESSING_AGE, &mut execute_and_commit_timings.execute_timings, + &mut error_counters, TransactionProcessingConfig { account_overrides: None, check_program_modification_slot: bank.check_program_modification_slot(), @@ -606,15 +645,11 @@ impl Consumer { execute_and_commit_timings.load_execute_us = load_execute_us; let LoadAndExecuteTransactionsOutput { - mut loaded_transactions, execution_results, - mut retryable_transaction_indexes, executed_transactions_count, executed_non_vote_transactions_count, executed_with_successful_result_count, signature_count, - error_counters, - .. } = load_and_execute_transactions_output; let transactions_attempted_execution_count = execution_results.len(); @@ -681,7 +716,6 @@ impl Consumer { let (commit_time_us, commit_transaction_statuses) = if executed_transactions_count != 0 { self.committer.commit_transactions( batch, - &mut loaded_transactions, execution_results, last_blockhash, lamports_per_signature, @@ -741,15 +775,14 @@ impl Consumer { error_counters: &mut TransactionErrorMetrics, ) -> Result<(), TransactionError> { let fee_payer = message.fee_payer(); - let budget_limits = - process_compute_budget_instructions(message.program_instructions_iter())?.into(); - let fee = bank.fee_structure().calculate_fee( + let fee_budget_limits = FeeBudgetLimits::from(process_compute_budget_instructions( + message.program_instructions_iter(), + )?); + let fee = solana_fee::calculate_fee( message, - bank.get_lamports_per_signature(), - &budget_limits, - bank.feature_set.is_active( - &feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), - ), + bank.get_lamports_per_signature() == 0, + bank.fee_structure().lamports_per_signature, + fee_budget_limits.prioritization_fee, bank.feature_set .is_active(&feature_set::remove_rounding_in_fee_calculation::id()), ); @@ -849,9 +882,8 @@ mod tests { }, solana_perf::packet::Packet, solana_poh::poh_recorder::{PohRecorder, Record, WorkingBankEntry}, - solana_program_runtime::timings::ProgramTiming, solana_rpc::transaction_status_service::TransactionStatusService, - solana_runtime::prioritization_fee_cache::PrioritizationFeeCache, + solana_runtime::{bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache}, solana_sdk::{ account::AccountSharedData, account_utils::StateMut, @@ -878,6 +910,7 @@ mod tests { transaction::{MessageHash, Transaction, VersionedTransaction}, }, solana_svm::account_loader::CheckedTransactionDetails, + solana_timings::ProgramTiming, solana_transaction_status::{TransactionStatusMeta, VersionedTransactionWithStatusMeta}, std::{ borrow::Cow, @@ -989,6 +1022,7 @@ mod tests { ) -> ( Vec, Arc, + Arc>, Arc>, Receiver, GenesisConfigInfo, @@ -1003,7 +1037,7 @@ mod tests { } = &genesis_config_info; let blockstore = Blockstore::open(ledger_path).expect("Expected to be able to open database ledger"); - let bank = Bank::new_no_wallclock_throttle_for_tests(genesis_config).0; + let (bank, bank_forks) = Bank::new_no_wallclock_throttle_for_tests(genesis_config); let exit = Arc::new(AtomicBool::default()); let (poh_recorder, entry_receiver, record_receiver) = PohRecorder::new( bank.tick_height(), @@ -1032,6 +1066,7 @@ mod tests { ( transactions, bank, + bank_forks, poh_recorder, entry_receiver, genesis_config_info, @@ -1059,7 +1094,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); let transactions = sanitize_transactions(vec![system_transaction::transfer( @@ -1187,7 +1222,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let pubkey = Pubkey::new_unique(); // setup nonce account with a durable nonce different from the current @@ -1343,7 +1378,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); let transactions = { @@ -1427,7 +1462,7 @@ mod tests { } = create_slow_genesis_config(10_000); let mut bank = Bank::new_for_tests(&genesis_config); bank.ns_per_slot = u128::MAX; - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); let pubkey = solana_sdk::pubkey::new_rand(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -1583,7 +1618,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); @@ -1660,7 +1695,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(lamports); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() .unwrap() @@ -1721,7 +1756,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); // set cost tracker limits to MAX so it will not filter out TXs bank.write_cost_tracker() .unwrap() @@ -1780,7 +1815,7 @@ mod tests { mint_keypair, .. } = create_slow_genesis_config(10_000); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); @@ -1861,7 +1896,7 @@ mod tests { } = create_slow_genesis_config(solana_sdk::native_token::sol_to_lamports(1000.0)); genesis_config.rent.lamports_per_byte_year = 50; genesis_config.rent.exemption_threshold = 2.0; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); let pubkey1 = solana_sdk::pubkey::new_rand(); let keypair1 = Keypair::new(); @@ -2130,7 +2165,7 @@ mod tests { fn test_consume_buffered_packets() { let ledger_path = get_tmp_ledger_path_auto_delete!(); { - let (transactions, bank, poh_recorder, _entry_receiver, _, poh_simulator) = + let (transactions, bank, _bank_forks, poh_recorder, _entry_receiver, _, poh_simulator) = setup_conflicting_transactions(ledger_path.path()); let recorder: TransactionRecorder = poh_recorder.read().unwrap().new_recorder(); let num_conflicting_transactions = transactions.len(); @@ -2203,8 +2238,15 @@ mod tests { fn test_consume_buffered_packets_sanitization_error() { let ledger_path = get_tmp_ledger_path_auto_delete!(); { - let (mut transactions, bank, poh_recorder, _entry_receiver, _, poh_simulator) = - setup_conflicting_transactions(ledger_path.path()); + let ( + mut transactions, + bank, + _bank_forks, + poh_recorder, + _entry_receiver, + _, + poh_simulator, + ) = setup_conflicting_transactions(ledger_path.path()); let duplicate_account_key = transactions[0].message.account_keys[0]; transactions[0] .message @@ -2259,7 +2301,7 @@ mod tests { fn test_consume_buffered_packets_retryable() { let ledger_path = get_tmp_ledger_path_auto_delete!(); { - let (transactions, bank, poh_recorder, _entry_receiver, _, poh_simulator) = + let (transactions, bank, _bank_forks, poh_recorder, _entry_receiver, _, poh_simulator) = setup_conflicting_transactions(ledger_path.path()); let recorder = poh_recorder.read().unwrap().new_recorder(); let num_conflicting_transactions = transactions.len(); @@ -2355,8 +2397,15 @@ mod tests { fn test_consume_buffered_packets_batch_priority_guard() { let ledger_path = get_tmp_ledger_path_auto_delete!(); { - let (_, bank, poh_recorder, _entry_receiver, genesis_config_info, poh_simulator) = - setup_conflicting_transactions(ledger_path.path()); + let ( + _, + bank, + _bank_forks, + poh_recorder, + _entry_receiver, + genesis_config_info, + poh_simulator, + ) = setup_conflicting_transactions(ledger_path.path()); let recorder = poh_recorder.read().unwrap().new_recorder(); // Setup transactions: diff --git a/core/src/banking_stage/decision_maker.rs b/core/src/banking_stage/decision_maker.rs index 6ad2c3042b254f..1bd0b224fdf034 100644 --- a/core/src/banking_stage/decision_maker.rs +++ b/core/src/banking_stage/decision_maker.rs @@ -148,7 +148,7 @@ mod tests { #[test] fn test_make_consume_or_forward_decision() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let ledger_path = temp_dir(); let blockstore = Arc::new(Blockstore::open(ledger_path.as_path()).unwrap()); let (exit, poh_recorder, poh_service, _entry_receiver) = diff --git a/core/src/banking_stage/forwarder.rs b/core/src/banking_stage/forwarder.rs index 492ba94504558b..acb34b8b4dc1e9 100644 --- a/core/src/banking_stage/forwarder.rs +++ b/core/src/banking_stage/forwarder.rs @@ -10,7 +10,8 @@ use { next_leader::{next_leader, next_leader_tpu_vote}, tracer_packet_stats::TracerPacketStats, }, - solana_client::{connection_cache::ConnectionCache, tpu_connection::TpuConnection}, + solana_client::connection_cache::ConnectionCache, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_gossip::cluster_info::ClusterInfo, solana_measure::measure_us, solana_perf::{data_budget::DataBudget, packet::Packet}, diff --git a/core/src/banking_stage/immutable_deserialized_packet.rs b/core/src/banking_stage/immutable_deserialized_packet.rs index a9835672632aed..f4a3a7e6623376 100644 --- a/core/src/banking_stage/immutable_deserialized_packet.rs +++ b/core/src/banking_stage/immutable_deserialized_packet.rs @@ -1,19 +1,20 @@ use { super::packet_filter::PacketFilterFailure, + solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_perf::packet::Packet, - solana_runtime::compute_budget_details::{ComputeBudgetDetails, GetComputeBudgetDetails}, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sanitize::SanitizeError, solana_sdk::{ hash::Hash, message::Message, pubkey::Pubkey, - short_vec::decode_shortu16_len, signature::Signature, transaction::{ AddressLoader, SanitizedTransaction, SanitizedVersionedTransaction, VersionedTransaction, }, }, + solana_short_vec::decode_shortu16_len, std::{cmp::Ordering, collections::HashSet, mem::size_of}, thiserror::Error, }; @@ -43,7 +44,8 @@ pub struct ImmutableDeserializedPacket { transaction: SanitizedVersionedTransaction, message_hash: Hash, is_simple_vote: bool, - compute_budget_details: ComputeBudgetDetails, + compute_unit_price: u64, + compute_unit_limit: u32, } impl ImmutableDeserializedPacket { @@ -55,13 +57,20 @@ impl ImmutableDeserializedPacket { let is_simple_vote = packet.meta().is_simple_vote_tx(); // drop transaction if prioritization fails. - let mut compute_budget_details = sanitized_transaction - .get_compute_budget_details(packet.meta().round_compute_unit_price()) - .ok_or(DeserializedPacketError::PrioritizationFailure)?; + let ComputeBudgetLimits { + mut compute_unit_price, + compute_unit_limit, + .. + } = process_compute_budget_instructions( + sanitized_transaction + .get_message() + .program_instructions_iter(), + ) + .map_err(|_| DeserializedPacketError::PrioritizationFailure)?; // set compute unit price to zero for vote transactions if is_simple_vote { - compute_budget_details.compute_unit_price = 0; + compute_unit_price = 0; }; Ok(Self { @@ -69,7 +78,8 @@ impl ImmutableDeserializedPacket { transaction: sanitized_transaction, message_hash, is_simple_vote, - compute_budget_details, + compute_unit_price, + compute_unit_limit, }) } @@ -90,15 +100,11 @@ impl ImmutableDeserializedPacket { } pub fn compute_unit_price(&self) -> u64 { - self.compute_budget_details.compute_unit_price + self.compute_unit_price } pub fn compute_unit_limit(&self) -> u64 { - self.compute_budget_details.compute_unit_limit - } - - pub fn compute_budget_details(&self) -> ComputeBudgetDetails { - self.compute_budget_details.clone() + u64::from(self.compute_unit_limit) } // This function deserializes packets into transactions, computes the blake3 hash of transaction diff --git a/core/src/banking_stage/latest_unprocessed_votes.rs b/core/src/banking_stage/latest_unprocessed_votes.rs index 084e4125b842ae..069c6e4bbb3d07 100644 --- a/core/src/banking_stage/latest_unprocessed_votes.rs +++ b/core/src/banking_stage/latest_unprocessed_votes.rs @@ -121,7 +121,7 @@ impl LatestValidatorVotePacket { pub(crate) fn weighted_random_order_by_stake<'a>( bank: &Bank, pubkeys: impl Iterator, -) -> impl Iterator { +) -> impl Iterator + 'static { // Efraimidis and Spirakis algo for weighted random sample without replacement let staked_nodes = bank.staked_nodes(); let mut pubkey_with_weight: Vec<(f64, Pubkey)> = pubkeys @@ -274,49 +274,51 @@ impl LatestUnprocessedVotes { bank: Arc, forward_packet_batches_by_accounts: &mut ForwardPacketBatchesByAccounts, ) -> usize { - let mut continue_forwarding = true; - let pubkeys_by_stake = weighted_random_order_by_stake( - &bank, - self.latest_votes_per_pubkey.read().unwrap().keys(), - ) - .collect_vec(); - pubkeys_by_stake - .into_iter() - .filter(|&pubkey| { - if !continue_forwarding { - return false; - } - if let Some(lock) = self.get_entry(pubkey) { - let mut vote = lock.write().unwrap(); - if !vote.is_vote_taken() && !vote.is_forwarded() { - let deserialized_vote_packet = vote.vote.as_ref().unwrap().clone(); - if let Some(sanitized_vote_transaction) = deserialized_vote_packet - .build_sanitized_transaction( - bank.vote_only_bank(), - bank.as_ref(), - bank.get_reserved_account_keys(), - ) - { - if forward_packet_batches_by_accounts.try_add_packet( - &sanitized_vote_transaction, - deserialized_vote_packet, - &bank.feature_set, - ) { - vote.forwarded = true; - } else { - // To match behavior of regular transactions we stop - // forwarding votes as soon as one fails - continue_forwarding = false; - } - return true; - } else { - return false; - } - } - } - false - }) - .count() + let pubkeys_by_stake = { + let binding = self.latest_votes_per_pubkey.read().unwrap(); + weighted_random_order_by_stake(&bank, binding.keys()) + }; + + let mut forwarded_count: usize = 0; + for pubkey in pubkeys_by_stake { + let Some(vote) = self.get_entry(pubkey) else { + continue; + }; + + let mut vote = vote.write().unwrap(); + if vote.is_vote_taken() || vote.is_forwarded() { + continue; + } + + let deserialized_vote_packet = vote.vote.as_ref().unwrap().clone(); + let Some(sanitized_vote_transaction) = deserialized_vote_packet + .build_sanitized_transaction( + bank.vote_only_bank(), + bank.as_ref(), + bank.get_reserved_account_keys(), + ) + else { + continue; + }; + + let forwarding_successful = forward_packet_batches_by_accounts.try_add_packet( + &sanitized_vote_transaction, + deserialized_vote_packet, + &bank.feature_set, + ); + + if !forwarding_successful { + // To match behavior of regular transactions we stop forwarding votes as soon as one + // fails. We are assuming that failure (try_add_packet) means no more space + // available. + break; + } + + vote.forwarded = true; + forwarded_count += 1; + } + + forwarded_count } /// Drains all votes yet to be processed sorted by a weighted random ordering by stake diff --git a/core/src/banking_stage/leader_slot_timing_metrics.rs b/core/src/banking_stage/leader_slot_timing_metrics.rs index 00435163ab1d6d..6dbd697a956010 100644 --- a/core/src/banking_stage/leader_slot_timing_metrics.rs +++ b/core/src/banking_stage/leader_slot_timing_metrics.rs @@ -1,7 +1,7 @@ use { solana_poh::poh_recorder::RecordTransactionsTimings, - solana_program_runtime::timings::ExecuteTimings, solana_sdk::{clock::Slot, saturating_add_assign}, + solana_timings::ExecuteTimings, std::time::Instant, }; diff --git a/core/src/banking_stage/packet_filter.rs b/core/src/banking_stage/packet_filter.rs index 5e90902e01816f..4c38d70762e35e 100644 --- a/core/src/banking_stage/packet_filter.rs +++ b/core/src/banking_stage/packet_filter.rs @@ -1,6 +1,6 @@ use { super::immutable_deserialized_packet::ImmutableDeserializedPacket, - solana_cost_model::block_cost_limits::BUILT_IN_INSTRUCTION_COSTS, + solana_builtins_default_costs::BUILTIN_INSTRUCTION_COSTS, solana_sdk::{ed25519_program, saturating_add_assign, secp256k1_program}, thiserror::Error, }; @@ -22,7 +22,7 @@ impl ImmutableDeserializedPacket { pub fn check_insufficent_compute_unit_limit(&self) -> Result<(), PacketFilterFailure> { let mut static_builtin_cost_sum: u64 = 0; for (program_id, _) in self.transaction().get_message().program_instructions_iter() { - if let Some(ix_cost) = BUILT_IN_INSTRUCTION_COSTS.get(program_id) { + if let Some(ix_cost) = BUILTIN_INSTRUCTION_COSTS.get(program_id) { saturating_add_assign!(static_builtin_cost_sum, *ix_cost); } } diff --git a/core/src/banking_stage/qos_service.rs b/core/src/banking_stage/qos_service.rs index bf8b7df963e392..eafc3052aa26dd 100644 --- a/core/src/banking_stage/qos_service.rs +++ b/core/src/banking_stage/qos_service.rs @@ -707,10 +707,10 @@ mod tests { // calculate their costs, apply to cost_tracker let transaction_count = 5; let keypair = Keypair::new(); - let loaded_accounts_data_size: usize = 1_000_000; + let loaded_accounts_data_size: u32 = 1_000_000; let transaction = solana_sdk::transaction::Transaction::new_unsigned(solana_sdk::message::Message::new( &[ - solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(loaded_accounts_data_size as u32), + solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(loaded_accounts_data_size), solana_sdk::system_instruction::transfer(&keypair.pubkey(), &solana_sdk::pubkey::Pubkey::new_unique(), 1), ], Some(&keypair.pubkey()), @@ -720,7 +720,7 @@ mod tests { .map(|_| transfer_tx.clone()) .collect(); let execute_units_adjustment: u64 = 10; - let loaded_accounts_data_size_adjustment: usize = 32000; + let loaded_accounts_data_size_adjustment: u32 = 32000; let loaded_accounts_data_size_cost_adjustment = CostModel::calculate_loaded_accounts_data_size_cost( loaded_accounts_data_size_adjustment, @@ -827,10 +827,10 @@ mod tests { // calculate their costs, apply to cost_tracker let transaction_count = 5; let keypair = Keypair::new(); - let loaded_accounts_data_size: usize = 1_000_000; + let loaded_accounts_data_size: u32 = 1_000_000; let transaction = solana_sdk::transaction::Transaction::new_unsigned(solana_sdk::message::Message::new( &[ - solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(loaded_accounts_data_size as u32), + solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(loaded_accounts_data_size), solana_sdk::system_instruction::transfer(&keypair.pubkey(), &solana_sdk::pubkey::Pubkey::new_unique(), 1), ], Some(&keypair.pubkey()), @@ -840,7 +840,7 @@ mod tests { .map(|_| transfer_tx.clone()) .collect(); let execute_units_adjustment: u64 = 10; - let loaded_accounts_data_size_adjustment: usize = 32000; + let loaded_accounts_data_size_adjustment: u32 = 32000; let loaded_accounts_data_size_cost_adjustment = CostModel::calculate_loaded_accounts_data_size_cost( loaded_accounts_data_size_adjustment, diff --git a/core/src/banking_stage/read_write_account_set.rs b/core/src/banking_stage/read_write_account_set.rs index 4b1efc015e2bbf..9ed0f24500d9db 100644 --- a/core/src/banking_stage/read_write_account_set.rs +++ b/core/src/banking_stage/read_write_account_set.rs @@ -84,7 +84,7 @@ mod tests { use { super::ReadWriteAccountSet, solana_ledger::genesis_utils::GenesisConfigInfo, - solana_runtime::{bank::Bank, genesis_utils::create_genesis_config}, + solana_runtime::{bank::Bank, bank_forks::BankForks, genesis_utils::create_genesis_config}, solana_sdk::{ account::AccountSharedData, address_lookup_table::{ @@ -101,7 +101,10 @@ mod tests { signer::Signer, transaction::{MessageHash, SanitizedTransaction, VersionedTransaction}, }, - std::{borrow::Cow, sync::Arc}, + std::{ + borrow::Cow, + sync::{Arc, RwLock}, + }, }; fn create_test_versioned_message( @@ -171,9 +174,9 @@ mod tests { ) } - fn create_test_bank() -> Arc { + fn create_test_bank() -> (Arc, Arc>) { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); - Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0 + Bank::new_no_wallclock_throttle_for_tests(&genesis_config) } // Helper function (could potentially use test_case in future). @@ -182,7 +185,7 @@ mod tests { // conflict_index = 2 means write lock conflict with address table key // conflict_index = 3 means read lock conflict with address table key fn test_check_and_take_locks(conflict_index: usize, add_write: bool, expectation: bool) { - let bank = create_test_bank(); + let (bank, _bank_forks) = create_test_bank(); let (bank, table_address) = create_test_address_lookup_table(bank, 2); let tx = create_test_sanitized_transaction( &Keypair::new(), diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index 045d2cca1d8dba..59ce92173ed26e 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -556,10 +556,20 @@ fn try_schedule_transaction( } // Schedule the transaction if it can be. - let transaction_locks = transaction.get_account_locks_unchecked(); + let message = transaction.message(); + let account_keys = message.account_keys(); + let write_account_locks = account_keys + .iter() + .enumerate() + .filter_map(|(index, key)| message.is_writable(index).then_some(key)); + let read_account_locks = account_keys + .iter() + .enumerate() + .filter_map(|(index, key)| (!message.is_writable(index)).then_some(key)); + let Some(thread_id) = account_locks.try_lock_accounts( - transaction_locks.writable.into_iter(), - transaction_locks.readonly.into_iter(), + write_account_locks, + read_account_locks, ThreadSet::any(num_threads), thread_selector, ) else { diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index b84af5a902c5d0..cb945cf37ab189 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -23,10 +23,10 @@ use { }, arrayvec::ArrayVec, crossbeam_channel::RecvTimeoutError, - solana_compute_budget::compute_budget_processor::process_compute_budget_instructions, solana_cost_model::cost_model::CostModel, solana_measure::measure_us, solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ self, clock::{FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, MAX_PROCESSING_AGE}, @@ -65,8 +65,8 @@ pub(crate) struct SchedulerController { timing_metrics: SchedulerTimingMetrics, /// Metric report handles for the worker threads. worker_metrics: Vec>, - /// State for forwarding packets to the leader. - forwarder: Forwarder, + /// State for forwarding packets to the leader, if enabled. + forwarder: Option, } impl SchedulerController { @@ -76,7 +76,7 @@ impl SchedulerController { bank_forks: Arc>, scheduler: PrioGraphScheduler, worker_metrics: Vec>, - forwarder: Forwarder, + forwarder: Option, ) -> Self { Self { decision_maker, @@ -147,6 +147,7 @@ impl SchedulerController { &mut self, decision: &BufferedPacketsDecision, ) -> Result<(), SchedulerError> { + let forwarding_enabled = self.forwarder.is_some(); match decision { BufferedPacketsDecision::Consume(bank_start) => { let (scheduling_summary, schedule_time_us) = measure_us!(self.scheduler.schedule( @@ -186,16 +187,30 @@ impl SchedulerController { }); } BufferedPacketsDecision::Forward => { - let (_, forward_time_us) = measure_us!(self.forward_packets(false)); - self.timing_metrics.update(|timing_metrics| { - saturating_add_assign!(timing_metrics.forward_time_us, forward_time_us); - }); + if forwarding_enabled { + let (_, forward_time_us) = measure_us!(self.forward_packets(false)); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.forward_time_us, forward_time_us); + }); + } else { + let (_, clear_time_us) = measure_us!(self.clear_container()); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.clear_time_us, clear_time_us); + }); + } } BufferedPacketsDecision::ForwardAndHold => { - let (_, forward_time_us) = measure_us!(self.forward_packets(true)); - self.timing_metrics.update(|timing_metrics| { - saturating_add_assign!(timing_metrics.forward_time_us, forward_time_us); - }); + if forwarding_enabled { + let (_, forward_time_us) = measure_us!(self.forward_packets(true)); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.forward_time_us, forward_time_us); + }); + } else { + let (_, clean_time_us) = measure_us!(self.clean_queue()); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.clean_time_us, clean_time_us); + }); + } } BufferedPacketsDecision::Hold => {} } @@ -234,6 +249,7 @@ impl SchedulerController { let start = Instant::now(); let bank = self.bank_forks.read().unwrap().working_bank(); let feature_set = &bank.feature_set; + let forwarder = self.forwarder.as_mut().expect("forwarder must exist"); // Pop from the container in chunks, filter using bank checks, then attempt to forward. // This doubles as a way to clean the queue as well as forwarding transactions. @@ -282,7 +298,7 @@ impl SchedulerController { // If not already forwarded and can be forwarded, add to forwardable packets. if state.should_forward() - && self.forwarder.try_add_packet( + && forwarder.try_add_packet( sanitized_transaction, immutable_packet, feature_set, @@ -300,9 +316,8 @@ impl SchedulerController { } // Forward each batch of transactions - self.forwarder - .forward_batched_packets(&ForwardOption::ForwardTransaction); - self.forwarder.clear_batches(); + forwarder.forward_batched_packets(&ForwardOption::ForwardTransaction); + forwarder.clear_batches(); // If we hit the time limit. Drop everything that was not checked/processed. // If we cannot run these simple checks in time, then we cannot run them during @@ -330,7 +345,6 @@ impl SchedulerController { /// Clears the transaction state container. /// This only clears pending transactions, and does **not** clear in-flight transactions. - #[allow(dead_code)] fn clear_container(&mut self) { let mut num_dropped_on_clear: usize = 0; while let Some(id) = self.container.pop() { @@ -346,7 +360,6 @@ impl SchedulerController { /// Clean unprocessable transactions from the queue. These will be transactions that are /// expired, already processed, or are no longer sanitizable. /// This only clears pending transactions, and does **not** clear in-flight transactions. - #[allow(dead_code)] fn clean_queue(&mut self) { // Clean up any transactions that have already been processed, are too old, or do not have // valid nonce accounts. @@ -424,17 +437,19 @@ impl SchedulerController { let remaining_queue_capacity = self.container.remaining_queue_capacity(); const MAX_PACKET_RECEIVE_TIME: Duration = Duration::from_millis(100); - let recv_timeout = match decision { - BufferedPacketsDecision::Consume(_) => { + let (recv_timeout, should_buffer) = match decision { + BufferedPacketsDecision::Consume(_) => ( if self.container.is_empty() { MAX_PACKET_RECEIVE_TIME } else { Duration::ZERO - } + }, + true, + ), + BufferedPacketsDecision::Forward => (MAX_PACKET_RECEIVE_TIME, self.forwarder.is_some()), + BufferedPacketsDecision::ForwardAndHold | BufferedPacketsDecision::Hold => { + (MAX_PACKET_RECEIVE_TIME, true) } - BufferedPacketsDecision::Forward - | BufferedPacketsDecision::ForwardAndHold - | BufferedPacketsDecision::Hold => MAX_PACKET_RECEIVE_TIME, }; let (received_packet_results, receive_time_us) = measure_us!(self @@ -456,11 +471,21 @@ impl SchedulerController { saturating_add_assign!(count_metrics.num_received, num_received_packets); }); - let (_, buffer_time_us) = - measure_us!(self.buffer_packets(receive_packet_results.deserialized_packets)); - self.timing_metrics.update(|timing_metrics| { - saturating_add_assign!(timing_metrics.buffer_time_us, buffer_time_us); - }); + if should_buffer { + let (_, buffer_time_us) = measure_us!( + self.buffer_packets(receive_packet_results.deserialized_packets) + ); + self.timing_metrics.update(|timing_metrics| { + saturating_add_assign!(timing_metrics.buffer_time_us, buffer_time_us); + }); + } else { + self.count_metrics.update(|count_metrics| { + saturating_add_assign!( + count_metrics.num_dropped_on_receive, + num_received_packets + ); + }); + } } Err(RecvTimeoutError::Timeout) => {} Err(RecvTimeoutError::Disconnected) => return false, @@ -636,14 +661,13 @@ mod tests { banking_stage::{ consumer::TARGET_NUM_TRANSACTIONS_PER_BATCH, scheduler_messages::{ConsumeWork, FinishedConsumeWork, TransactionBatchId}, - tests::{create_slow_genesis_config, new_test_cluster_info}, + tests::create_slow_genesis_config, }, banking_trace::BankingPacketBatch, sigverify::SigverifyTracerPacketStats, }, crossbeam_channel::{unbounded, Receiver, Sender}, itertools::Itertools, - solana_client::connection_cache::ConnectionCache, solana_ledger::{ blockstore::Blockstore, genesis_utils::GenesisConfigInfo, get_tmp_ledger_path_auto_delete, leader_schedule_cache::LeaderScheduleCache, @@ -712,17 +736,6 @@ mod tests { let (consume_work_senders, consume_work_receivers) = create_channels(num_threads); let (finished_consume_work_sender, finished_consume_work_receiver) = unbounded(); - let validator_keypair = Arc::new(Keypair::new()); - let (_local_node, cluster_info) = new_test_cluster_info(Some(validator_keypair)); - let cluster_info = Arc::new(cluster_info); - let forwarder = Forwarder::new( - poh_recorder.clone(), - bank_forks.clone(), - cluster_info, - Arc::new(ConnectionCache::new("connection_cache_test")), - Arc::default(), - ); - let test_frame = TestFrame { bank, mint_keypair, @@ -741,7 +754,7 @@ mod tests { bank_forks, PrioGraphScheduler::new(consume_work_senders, finished_consume_work_receiver), vec![], // no actual workers with metrics to report, this can be empty - forwarder, + None, ); (test_frame, scheduler_controller) diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs index 7f05210b48e11c..bb8cbbe617396a 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_metrics.rs @@ -271,6 +271,10 @@ pub struct SchedulerTimingMetricsInner { pub schedule_filter_time_us: u64, /// Time spent scheduling transactions. pub schedule_time_us: u64, + /// Time spent clearing transactions from the container. + pub clear_time_us: u64, + /// Time spent cleaning expired or processed transactions from the container. + pub clean_time_us: u64, /// Time spent forwarding transactions. pub forward_time_us: u64, /// Time spent receiving completed transactions. @@ -312,6 +316,8 @@ impl SchedulerTimingMetricsInner { ("buffer_time_us", self.buffer_time_us, i64), ("schedule_filter_time_us", self.schedule_filter_time_us, i64), ("schedule_time_us", self.schedule_time_us, i64), + ("clear_time_us", self.clear_time_us, i64), + ("clean_time_us", self.clean_time_us, i64), ("forward_time_us", self.forward_time_us, i64), ( "receive_completed_time_us", @@ -331,6 +337,8 @@ impl SchedulerTimingMetricsInner { self.buffer_time_us = 0; self.schedule_filter_time_us = 0; self.schedule_time_us = 0; + self.clear_time_us = 0; + self.clean_time_us = 0; self.forward_time_us = 0; self.receive_completed_time_us = 0; } diff --git a/core/src/banking_stage/unprocessed_transaction_storage.rs b/core/src/banking_stage/unprocessed_transaction_storage.rs index bc2dec2a82c7c1..ce6638016433c6 100644 --- a/core/src/banking_stage/unprocessed_transaction_storage.rs +++ b/core/src/banking_stage/unprocessed_transaction_storage.rs @@ -17,7 +17,7 @@ use { }, itertools::Itertools, min_max_heap::MinMaxHeap, - solana_measure::{measure, measure_us}, + solana_measure::measure_us, solana_runtime::bank::Bank, solana_sdk::{ clock::FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET, feature_set::FeatureSet, hash::Hash, @@ -636,35 +636,24 @@ impl ThreadLocalUnprocessedPackets { if accepting_packets { let ( (sanitized_transactions, transaction_to_packet_indexes), - packet_conversion_time, - ): ( - (Vec, Vec), - _, - ) = measure!( - self.sanitize_unforwarded_packets( - &packets_to_forward, - &bank, - &mut total_dropped_packets - ), - "sanitize_packet", - ); + packet_conversion_us, + ) = measure_us!(self.sanitize_unforwarded_packets( + &packets_to_forward, + &bank, + &mut total_dropped_packets + )); saturating_add_assign!( total_packet_conversion_us, - packet_conversion_time.as_us() + packet_conversion_us ); - let (forwardable_transaction_indexes, filter_packets_time) = measure!( - Self::filter_invalid_transactions( + let (forwardable_transaction_indexes, filter_packets_us) = + measure_us!(Self::filter_invalid_transactions( &sanitized_transactions, &bank, &mut total_dropped_packets - ), - "filter_packets", - ); - saturating_add_assign!( - total_filter_packets_us, - filter_packets_time.as_us() - ); + )); + saturating_add_assign!(total_filter_packets_us, filter_packets_us); for forwardable_transaction_index in &forwardable_transaction_indexes { saturating_add_assign!(total_forwardable_packets, 1); @@ -1069,7 +1058,7 @@ mod tests { mint_keypair, .. } = create_genesis_config(10); - let current_bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (current_bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let simple_transactions: Vec = (0..256) .map(|_id| { diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index e7e349d05c57a4..cae40c587cb572 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -8,7 +8,7 @@ use { bank::Bank, commitment::{BlockCommitment, BlockCommitmentCache, CommitmentSlots, VOTE_THRESHOLD_SIZE}, }, - solana_sdk::clock::Slot, + solana_sdk::{clock::Slot, pubkey::Pubkey}, solana_vote_program::vote_state::VoteState, std::{ cmp::max, @@ -26,14 +26,23 @@ pub struct CommitmentAggregationData { bank: Arc, root: Slot, total_stake: Stake, + // The latest local vote state of the node running this service. + // Used for commitment aggregation if the node's vote account is staked. + node_vote_state: (Pubkey, VoteState), } impl CommitmentAggregationData { - pub fn new(bank: Arc, root: Slot, total_stake: Stake) -> Self { + pub fn new( + bank: Arc, + root: Slot, + total_stake: Stake, + node_vote_state: (Pubkey, VoteState), + ) -> Self { Self { bank, root, total_stake, + node_vote_state, } } } @@ -139,8 +148,11 @@ impl AggregateCommitmentService { aggregation_data: CommitmentAggregationData, ancestors: Vec, ) -> CommitmentSlots { - let (block_commitment, rooted_stake) = - Self::aggregate_commitment(&ancestors, &aggregation_data.bank); + let (block_commitment, rooted_stake) = Self::aggregate_commitment( + &ancestors, + &aggregation_data.bank, + &aggregation_data.node_vote_state, + ); let highest_super_majority_root = get_highest_super_majority_root(rooted_stake, aggregation_data.total_stake); @@ -173,6 +185,7 @@ impl AggregateCommitmentService { pub fn aggregate_commitment( ancestors: &[Slot], bank: &Bank, + (node_vote_pubkey, node_vote_state): &(Pubkey, VoteState), ) -> (HashMap, Vec<(Slot, u64)>) { assert!(!ancestors.is_empty()); @@ -183,11 +196,17 @@ impl AggregateCommitmentService { let mut commitment = HashMap::new(); let mut rooted_stake: Vec<(Slot, u64)> = Vec::new(); - for (lamports, account) in bank.vote_accounts().values() { + for (pubkey, (lamports, account)) in bank.vote_accounts().iter() { if *lamports == 0 { continue; } - if let Ok(vote_state) = account.vote_state().as_ref() { + let vote_state = if pubkey == node_vote_pubkey { + // Override old vote_state in bank with latest one for my own vote pubkey + Ok(node_vote_state) + } else { + account.vote_state() + }; + if let Ok(vote_state) = vote_state { Self::aggregate_commitment_for_vote_account( &mut commitment, &mut rooted_stake, @@ -382,8 +401,7 @@ mod tests { assert_eq!(rooted_stake[0], (root, lamports)); } - #[test] - fn test_aggregate_commitment_validity() { + fn do_test_aggregate_commitment_validity(with_node_vote_state: bool) { let ancestors = vec![3, 4, 5, 7, 9, 10, 11]; let GenesisConfigInfo { mut genesis_config, .. @@ -447,9 +465,11 @@ mod tests { let mut vote_state1 = vote_state::from(&vote_account1).unwrap(); process_slot_vote_unchecked(&mut vote_state1, 3); process_slot_vote_unchecked(&mut vote_state1, 5); - let versioned = VoteStateVersions::new_current(vote_state1); - vote_state::to(&versioned, &mut vote_account1).unwrap(); - bank.store_account(&pk1, &vote_account1); + if !with_node_vote_state { + let versioned = VoteStateVersions::new_current(vote_state1.clone()); + vote_state::to(&versioned, &mut vote_account1).unwrap(); + bank.store_account(&pk1, &vote_account1); + } let mut vote_state2 = vote_state::from(&vote_account2).unwrap(); process_slot_vote_unchecked(&mut vote_state2, 9); @@ -470,8 +490,18 @@ mod tests { vote_state::to(&versioned, &mut vote_account4).unwrap(); bank.store_account(&pk4, &vote_account4); - let (commitment, rooted_stake) = - AggregateCommitmentService::aggregate_commitment(&ancestors, &bank); + let node_vote_pubkey = if with_node_vote_state { + pk1 + } else { + // Use some random pubkey as dummy to suppress the override. + solana_sdk::pubkey::new_rand() + }; + + let (commitment, rooted_stake) = AggregateCommitmentService::aggregate_commitment( + &ancestors, + &bank, + &(node_vote_pubkey, vote_state1), + ); for a in ancestors { if a <= 3 { @@ -499,17 +529,21 @@ mod tests { assert_eq!(get_highest_super_majority_root(rooted_stake, 100), 1) } + #[test] + fn test_aggregate_commitment_validity_with_node_vote_state() { + do_test_aggregate_commitment_validity(true) + } + + #[test] + fn test_aggregate_commitment_validity_without_node_vote_state() { + do_test_aggregate_commitment_validity(false); + } + #[test] fn test_highest_super_majority_root_advance() { - fn get_vote_account_root_slot(vote_pubkey: Pubkey, bank: &Bank) -> Slot { + fn get_vote_state(vote_pubkey: Pubkey, bank: &Bank) -> VoteState { let vote_account = bank.get_vote_account(&vote_pubkey).unwrap(); - let slot = vote_account - .vote_state() - .as_ref() - .unwrap() - .root_slot - .unwrap(); - slot + vote_account.vote_state().cloned().unwrap() } let block_commitment_cache = RwLock::new(BlockCommitmentCache::new_for_tests()); @@ -547,10 +581,10 @@ mod tests { } let working_bank = bank_forks.read().unwrap().working_bank(); - let root = get_vote_account_root_slot( - validator_vote_keypairs.vote_keypair.pubkey(), - &working_bank, - ); + let vote_pubkey = validator_vote_keypairs.vote_keypair.pubkey(); + let root = get_vote_state(vote_pubkey, &working_bank) + .root_slot + .unwrap(); for x in 0..root { bank_forks .write() @@ -579,10 +613,8 @@ mod tests { bank34.process_transaction(&vote33).unwrap(); let working_bank = bank_forks.read().unwrap().working_bank(); - let root = get_vote_account_root_slot( - validator_vote_keypairs.vote_keypair.pubkey(), - &working_bank, - ); + let vote_state = get_vote_state(vote_pubkey, &working_bank); + let root = vote_state.root_slot.unwrap(); let ancestors = working_bank.status_cache_ancestors(); let _ = AggregateCommitmentService::update_commitment_cache( &block_commitment_cache, @@ -590,6 +622,7 @@ mod tests { bank: working_bank, root: 0, total_stake: 100, + node_vote_state: (vote_pubkey, vote_state.clone()), }, ancestors, ); @@ -628,6 +661,7 @@ mod tests { bank: working_bank, root: 1, total_stake: 100, + node_vote_state: (vote_pubkey, vote_state), }, ancestors, ); @@ -662,10 +696,9 @@ mod tests { } let working_bank = bank_forks.read().unwrap().working_bank(); - let root = get_vote_account_root_slot( - validator_vote_keypairs.vote_keypair.pubkey(), - &working_bank, - ); + let vote_state = + get_vote_state(validator_vote_keypairs.vote_keypair.pubkey(), &working_bank); + let root = vote_state.root_slot.unwrap(); let ancestors = working_bank.status_cache_ancestors(); let _ = AggregateCommitmentService::update_commitment_cache( &block_commitment_cache, @@ -673,6 +706,7 @@ mod tests { bank: working_bank, root: 0, total_stake: 100, + node_vote_state: (vote_pubkey, vote_state), }, ancestors, ); diff --git a/core/src/consensus.rs b/core/src/consensus.rs index cd1201429fec11..93c80c554c6ab1 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -237,7 +237,7 @@ pub(crate) enum BlockhashStatus { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "679XkZ4upGc389SwqAsjs5tr2qB4wisqjbwtei7fGhxC") + frozen_abi(digest = "H6T5A66kgJYANFXVrUprxV76WD5ce7Gf62q9SiBC2uYk") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower { @@ -508,7 +508,8 @@ impl Tower { } } - pub(crate) fn is_slot_confirmed( + #[cfg(test)] + fn is_slot_confirmed( &self, slot: Slot, voted_stakes: &VotedStakes, diff --git a/core/src/consensus/progress_map.rs b/core/src/consensus/progress_map.rs index 0925182e721a5e..a06f51b2001534 100644 --- a/core/src/consensus/progress_map.rs +++ b/core/src/consensus/progress_map.rs @@ -186,7 +186,7 @@ pub struct ForkStats { pub vote_threshold: Vec, pub is_locked_out: bool, pub voted_stakes: VotedStakes, - pub is_supermajority_confirmed: bool, + pub duplicate_confirmed_hash: Option, pub computed: bool, pub lockout_intervals: LockoutIntervals, pub bank_hash: Option, @@ -368,15 +368,15 @@ impl ProgressMap { .and_then(|s| s.fork_stats.my_latest_landed_vote) } - pub fn set_supermajority_confirmed_slot(&mut self, slot: Slot) { + pub fn set_duplicate_confirmed_hash(&mut self, slot: Slot, hash: Hash) { let slot_progress = self.get_mut(&slot).unwrap(); - slot_progress.fork_stats.is_supermajority_confirmed = true; + slot_progress.fork_stats.duplicate_confirmed_hash = Some(hash); } - pub fn is_supermajority_confirmed(&self, slot: Slot) -> Option { + pub fn is_duplicate_confirmed(&self, slot: Slot) -> Option { self.progress_map .get(&slot) - .map(|s| s.fork_stats.is_supermajority_confirmed) + .map(|s| s.fork_stats.duplicate_confirmed_hash.is_some()) } pub fn get_bank_prev_leader_slot(&self, bank: &Bank) -> Option { diff --git a/core/src/consensus/tower1_14_11.rs b/core/src/consensus/tower1_14_11.rs index ee96540e1cc4be..8068d000deff22 100644 --- a/core/src/consensus/tower1_14_11.rs +++ b/core/src/consensus/tower1_14_11.rs @@ -9,7 +9,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "4LayQwoKrE2jPhbNtg3TSpKrtEtjcPiwsVPJN7aCavri") + frozen_abi(digest = "EqYa8kwY9Z1Zbjxgs2aBbqKyCK4f7WAG8gJ7pVSQyKzk") )] #[derive(Clone, Serialize, Deserialize, Debug, PartialEq)] pub struct Tower1_14_11 { diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 920aaca9046b8e..fb0e7890cd1b38 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -50,7 +50,6 @@ use { }, solana_measure::measure::Measure, solana_poh::poh_recorder::{PohLeaderStatus, PohRecorder, GRACE_TICKS_FACTOR, MAX_GRACE_SLOTS}, - solana_program_runtime::timings::ExecuteTimings, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSenderConfig}, rpc_subscriptions::RpcSubscriptions, @@ -76,7 +75,8 @@ use { timing::timestamp, transaction::Transaction, }, - solana_vote_program::vote_state::VoteTransaction, + solana_timings::ExecuteTimings, + solana_vote_program::vote_state::{VoteState, VoteTransaction}, std::{ collections::{HashMap, HashSet}, num::NonZeroUsize, @@ -126,12 +126,6 @@ enum ForkReplayMode { Parallel(ThreadPool), } -#[derive(PartialEq, Eq, Debug)] -enum ConfirmationType { - SupermajorityVoted, - DuplicateConfirmed, -} - enum GenerateVoteTxResult { // non voting validator, not eligible for refresh NonVoting, @@ -146,31 +140,6 @@ impl GenerateVoteTxResult { } } -#[derive(PartialEq, Eq, Debug)] -struct ConfirmedSlot { - slot: Slot, - frozen_hash: Hash, - confirmation_type: ConfirmationType, -} - -impl ConfirmedSlot { - fn new_supermajority_voted(slot: Slot, frozen_hash: Hash) -> Self { - Self { - slot, - frozen_hash, - confirmation_type: ConfirmationType::SupermajorityVoted, - } - } - - fn new_duplicate_confirmed_slot(slot: Slot, frozen_hash: Hash) -> Self { - Self { - slot, - frozen_hash, - confirmation_type: ConfirmationType::DuplicateConfirmed, - } - } -} - // Implement a destructor for the ReplayStage thread to signal it exited // even on panics struct Finalizer { @@ -864,7 +833,7 @@ impl ReplayStage { let mut compute_slot_stats_time = Measure::start("compute_slot_stats_time"); for slot in newly_computed_slot_stats { let fork_stats = progress.get_fork_stats(slot).unwrap(); - let confirmed_slots = Self::confirm_forks( + let duplicate_confirmed_forks = Self::tower_duplicate_confirmed_forks( &tower, &fork_stats.voted_stakes, fork_stats.total_stake, @@ -872,8 +841,8 @@ impl ReplayStage { &bank_forks, ); - Self::mark_slots_confirmed( - &confirmed_slots, + Self::mark_slots_duplicate_confirmed( + &duplicate_confirmed_forks, &blockstore, &bank_forks, &mut progress, @@ -1390,15 +1359,21 @@ impl ReplayStage { ) -> (ProgressMap, HeaviestSubtreeForkChoice) { let (root_bank, frozen_banks, duplicate_slot_hashes) = { let bank_forks = bank_forks.read().unwrap(); + let root_bank = bank_forks.root_bank(); let duplicate_slots = blockstore - .duplicate_slots_iterator(bank_forks.root_bank().slot()) + // It is important that the root bank is not marked as duplicate on initialization. + // Although this bank could contain a duplicate proof, the fact that it was rooted + // either during a previous run or artificially means that we should ignore any + // duplicate proofs for the root slot, thus we start consuming duplicate proofs + // from the root slot + 1 + .duplicate_slots_iterator(root_bank.slot().saturating_add(1)) .unwrap(); let duplicate_slot_hashes = duplicate_slots.filter_map(|slot| { let bank = bank_forks.get(slot)?; Some((slot, bank.hash())) }); ( - bank_forks.root_bank(), + root_bank, bank_forks.frozen_banks().values().cloned().collect(), duplicate_slot_hashes.collect::>(), ) @@ -2406,10 +2381,28 @@ impl ReplayStage { } let mut update_commitment_cache_time = Measure::start("update_commitment_cache"); + // Send (voted) bank along with the updated vote account state for this node, the vote + // state is always newer than the one in the bank by definition, because banks can't + // contain vote transactions which are voting on its own slot. + // + // It should be acceptable to aggressively use the vote for our own _local view_ of + // commitment aggregation, although it's not guaranteed that the new vote transaction is + // observed by other nodes at this point. + // + // The justification stems from the assumption of the sensible voting behavior from the + // consensus subsystem. That's because it means there would be a slashing possibility + // otherwise. + // + // This behavior isn't significant normally for mainnet-beta, because staked nodes aren't + // servicing RPC requests. However, this eliminates artificial 1-slot delay of the + // `finalized` confirmation if a node is materially staked and servicing RPC requests at + // the same time for development purposes. + let node_vote_state = (*vote_account_pubkey, tower.vote_state.clone()); Self::update_commitment_cache( bank.clone(), bank_forks.read().unwrap().root(), progress.get_fork_stats(bank.slot()).unwrap().total_stake, + node_vote_state, lockouts_sender, ); update_commitment_cache_time.stop(); @@ -2699,11 +2692,15 @@ impl ReplayStage { bank: Arc, root: Slot, total_stake: Stake, + node_vote_state: (Pubkey, VoteState), lockouts_sender: &Sender, ) { - if let Err(e) = - lockouts_sender.send(CommitmentAggregationData::new(bank, root, total_stake)) - { + if let Err(e) = lockouts_sender.send(CommitmentAggregationData::new( + bank, + root, + total_stake, + node_vote_state, + )) { trace!("lockouts_sender failed: {:?}", e); } } @@ -3048,37 +3045,22 @@ impl ReplayStage { } } - if bank.collector_id() != my_pubkey { - // If the block does not have at least DATA_SHREDS_PER_FEC_BLOCK shreds in the last FEC set, - // mark it dead. No reason to perform this check on our leader block. - if !blockstore - .is_last_fec_set_full(bank.slot()) - .inspect_err(|e| { - warn!( - "Unable to determine if last fec set is full for slot {} {}, - marking as dead: {e:?}", - bank.slot(), - bank.hash() - ) - }) - .unwrap_or(false) - { - // Update metric regardless of feature flag - datapoint_warn!( - "incomplete_final_fec_set", - ("slot", bank_slot, i64), - ("hash", bank.hash().to_string(), String) - ); - if bank - .feature_set - .is_active(&solana_sdk::feature_set::vote_only_full_fec_sets::id()) - { + let _block_id = if bank.collector_id() != my_pubkey { + // If the block does not have at least DATA_SHREDS_PER_FEC_BLOCK correctly retransmitted + // shreds in the last FEC set, mark it dead. No reason to perform this check on our leader block. + match blockstore.check_last_fec_set_and_get_block_id( + bank.slot(), + bank.hash(), + &bank.feature_set, + ) { + Ok(block_id) => block_id, + Err(result_err) => { let root = bank_forks.read().unwrap().root(); Self::mark_dead_slot( blockstore, bank, root, - &BlockstoreProcessorError::IncompleteFinalFecSet, + &result_err, rpc_subscriptions, duplicate_slots_tracker, duplicate_confirmed_slots, @@ -3092,7 +3074,9 @@ impl ReplayStage { continue; } } - } + } else { + None + }; let r_replay_stats = replay_stats.read().unwrap(); let replay_progress = bank_progress.replay_progress.clone(); @@ -3208,7 +3192,7 @@ impl ReplayStage { &parent_blockhash.to_string(), bank.slot(), &bank.last_blockhash().to_string(), - &bank.rewards, + &bank.get_rewards_and_num_partitions(), Some(bank.clock().unix_timestamp), Some(bank.block_height()), bank.executed_transaction_count(), @@ -4102,8 +4086,8 @@ impl ReplayStage { } #[allow(clippy::too_many_arguments)] - fn mark_slots_confirmed( - confirmed_slots: &[ConfirmedSlot], + fn mark_slots_duplicate_confirmed( + confirmed_slots: &[(Slot, Hash)], blockstore: &Blockstore, bank_forks: &RwLock, progress: &mut ProgressMap, @@ -4116,36 +4100,14 @@ impl ReplayStage { duplicate_confirmed_slots: &mut DuplicateConfirmedSlots, ) { let root_slot = bank_forks.read().unwrap().root(); - for ConfirmedSlot { - slot, - frozen_hash, - confirmation_type, - } in confirmed_slots.iter() - { - if *confirmation_type == ConfirmationType::SupermajorityVoted { - // This case should be guaranteed as false by confirm_forks() - if let Some(false) = progress.is_supermajority_confirmed(*slot) { - // Because supermajority confirmation will iterate through and update the - // subtree in fork choice, only incur this cost if the slot wasn't already - // confirmed - progress.set_supermajority_confirmed_slot(*slot); - // If the slot was confirmed, then it must be frozen. Otherwise, we couldn't - // have replayed any of its descendants and figured out it was confirmed. - assert!(*frozen_hash != Hash::default()); - } - } + for (slot, frozen_hash) in confirmed_slots.iter() { + assert!(*frozen_hash != Hash::default()); if *slot <= root_slot { continue; } - match confirmation_type { - ConfirmationType::SupermajorityVoted => (), - ConfirmationType::DuplicateConfirmed => (), - #[allow(unreachable_patterns)] - _ => panic!("programmer error"), - } - + progress.set_duplicate_confirmed_hash(*slot, *frozen_hash); if let Some(prev_hash) = duplicate_confirmed_slots.insert(*slot, *frozen_hash) { assert_eq!(prev_hash, *frozen_hash); // Already processed this signal @@ -4172,60 +4134,53 @@ impl ReplayStage { } } - fn confirm_forks( + fn tower_duplicate_confirmed_forks( tower: &Tower, voted_stakes: &VotedStakes, total_stake: Stake, progress: &ProgressMap, bank_forks: &RwLock, - ) -> Vec { - let mut confirmed_forks = vec![]; + ) -> Vec<(Slot, Hash)> { + let mut duplicate_confirmed_forks = vec![]; for (slot, prog) in progress.iter() { - if !prog.fork_stats.is_supermajority_confirmed { - let bank = bank_forks - .read() - .unwrap() - .get(*slot) - .expect("bank in progress must exist in BankForks") - .clone(); - let duration = prog - .replay_stats - .read() - .unwrap() - .started - .elapsed() - .as_millis(); - if bank.is_frozen() && tower.is_slot_confirmed(*slot, voted_stakes, total_stake) { - info!("validator fork confirmed {} {}ms", *slot, duration); - datapoint_info!("validator-confirmation", ("duration_ms", duration, i64)); - confirmed_forks - .push(ConfirmedSlot::new_supermajority_voted(*slot, bank.hash())); - } else if bank.is_frozen() - && tower.is_slot_duplicate_confirmed(*slot, voted_stakes, total_stake) - { - info!( - "validator fork duplicate confirmed {} {}ms", - *slot, duration - ); - datapoint_info!( - "validator-duplicate-confirmation", - ("duration_ms", duration, i64) - ); - confirmed_forks.push(ConfirmedSlot::new_duplicate_confirmed_slot( - *slot, - bank.hash(), - )); - } else { - debug!( - "validator fork not confirmed {} {}ms {:?}", - *slot, - duration, - voted_stakes.get(slot) - ); - } + if prog.fork_stats.duplicate_confirmed_hash.is_some() { + continue; + } + let bank = bank_forks + .read() + .unwrap() + .get(*slot) + .expect("bank in progress must exist in BankForks"); + let duration = prog + .replay_stats + .read() + .unwrap() + .started + .elapsed() + .as_millis(); + if !bank.is_frozen() { + continue; + } + if tower.is_slot_duplicate_confirmed(*slot, voted_stakes, total_stake) { + info!( + "validator fork duplicate confirmed {} {}ms", + *slot, duration + ); + datapoint_info!( + "validator-duplicate-confirmation", + ("duration_ms", duration, i64) + ); + duplicate_confirmed_forks.push((*slot, bank.hash())); + } else { + debug!( + "validator fork not confirmed {} {}ms {:?}", + *slot, + duration, + voted_stakes.get(slot) + ); } } - confirmed_forks + duplicate_confirmed_forks } #[allow(clippy::too_many_arguments)] @@ -4499,6 +4454,9 @@ pub(crate) mod tests { replay_stage::ReplayStage, vote_simulator::{self, VoteSimulator}, }, + blockstore_processor::{ + confirm_full_slot, fill_blockstore_slot_with_ticks, process_bank_0, ProcessOptions, + }, crossbeam_channel::unbounded, itertools::Itertools, solana_entry::entry::{self, Entry}, @@ -5281,13 +5239,14 @@ pub(crate) mod tests { #[test] fn test_replay_commitment_cache() { - fn leader_vote(vote_slot: Slot, bank: &Bank, pubkey: &Pubkey) { + fn leader_vote(vote_slot: Slot, bank: &Bank, pubkey: &Pubkey) -> (Pubkey, VoteState) { let mut leader_vote_account = bank.get_account(pubkey).unwrap(); let mut vote_state = vote_state::from(&leader_vote_account).unwrap(); vote_state::process_slot_vote_unchecked(&mut vote_state, vote_slot); - let versioned = VoteStateVersions::new_current(vote_state); + let versioned = VoteStateVersions::new_current(vote_state.clone()); vote_state::to(&versioned, &mut leader_vote_account).unwrap(); bank.store_account(pubkey, &leader_vote_account); + (*pubkey, vote_state) } let leader_pubkey = solana_sdk::pubkey::new_rand(); @@ -5353,11 +5312,12 @@ pub(crate) mod tests { } let arc_bank = bank_forks.read().unwrap().get(i).unwrap(); - leader_vote(i - 1, &arc_bank, &leader_voting_pubkey); + let node_vote_state = leader_vote(i - 1, &arc_bank, &leader_voting_pubkey); ReplayStage::update_commitment_cache( arc_bank.clone(), 0, leader_lamports, + node_vote_state, &lockouts_sender, ); arc_bank.freeze(); @@ -5532,7 +5492,7 @@ pub(crate) mod tests { // bank 1, so no slot should be confirmed. { let fork_progress = progress.get(&0).unwrap(); - let confirmed_forks = ReplayStage::confirm_forks( + let confirmed_forks = ReplayStage::tower_duplicate_confirmed_forks( &tower, &fork_progress.fork_stats.voted_stakes, fork_progress.fork_stats.total_stake, @@ -5582,7 +5542,7 @@ pub(crate) mod tests { assert_eq!(newly_computed, vec![1]); { let fork_progress = progress.get(&1).unwrap(); - let confirmed_forks = ReplayStage::confirm_forks( + let confirmed_forks = ReplayStage::tower_duplicate_confirmed_forks( &tower, &fork_progress.fork_stats.voted_stakes, fork_progress.fork_stats.total_stake, @@ -5590,10 +5550,7 @@ pub(crate) mod tests { &bank_forks, ); // No new stats should have been computed - assert_eq!( - confirmed_forks, - vec![ConfirmedSlot::new_supermajority_voted(0, bank0.hash())] - ); + assert_eq!(confirmed_forks, vec![(0, bank0.hash())]); } let ancestors = bank_forks.read().unwrap().ancestors(); @@ -9008,4 +8965,136 @@ pub(crate) mod tests { assert_eq!(tower.vote_state, expected_tower.vote_state); assert_eq!(tower.node_pubkey, expected_tower.node_pubkey); } + + #[test] + fn test_initialize_progress_and_fork_choice_with_duplicates() { + solana_logger::setup(); + let GenesisConfigInfo { + mut genesis_config, .. + } = create_genesis_config(123); + + let ticks_per_slot = 1; + genesis_config.ticks_per_slot = ticks_per_slot; + let (ledger_path, blockhash) = + solana_ledger::create_new_tmp_ledger_auto_delete!(&genesis_config); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + /* + Bank forks with: + slot 0 + | + slot 1 -> Duplicate before restart, the restart slot + | + slot 2 + | + slot 3 -> Duplicate before restart, artificially rooted + | + slot 4 -> Duplicate before restart, artificially rooted + | + slot 5 -> Duplicate before restart + | + slot 6 + */ + + let mut last_hash = blockhash; + for i in 0..6 { + last_hash = + fill_blockstore_slot_with_ticks(&blockstore, ticks_per_slot, i + 1, i, last_hash); + } + // Artifically root 3 and 4 + blockstore.set_roots([3, 4].iter()).unwrap(); + + // Set up bank0 + let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); + let bank0 = bank_forks.read().unwrap().get_with_scheduler(0).unwrap(); + let recyclers = VerifyRecyclers::default(); + let replay_tx_thread_pool = rayon::ThreadPoolBuilder::new() + .num_threads(1) + .thread_name(|i| format!("solReplayTx{i:02}")) + .build() + .expect("new rayon threadpool"); + + process_bank_0( + &bank0, + &blockstore, + &replay_tx_thread_pool, + &ProcessOptions::default(), + &recyclers, + None, + None, + ); + + // Mark block 1, 3, 4, 5 as duplicate + blockstore.store_duplicate_slot(1, vec![], vec![]).unwrap(); + blockstore.store_duplicate_slot(3, vec![], vec![]).unwrap(); + blockstore.store_duplicate_slot(4, vec![], vec![]).unwrap(); + blockstore.store_duplicate_slot(5, vec![], vec![]).unwrap(); + + let bank1 = bank_forks.write().unwrap().insert(Bank::new_from_parent( + bank0.clone_without_scheduler(), + &Pubkey::default(), + 1, + )); + confirm_full_slot( + &blockstore, + &bank1, + &replay_tx_thread_pool, + &ProcessOptions::default(), + &recyclers, + &mut ConfirmationProgress::new(bank0.last_blockhash()), + None, + None, + None, + &mut ExecuteTimings::default(), + ) + .unwrap(); + + bank_forks + .write() + .unwrap() + .set_root( + 1, + &solana_runtime::accounts_background_service::AbsRequestSender::default(), + None, + ) + .unwrap(); + + let leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank1); + + // process_blockstore_from_root() from slot 1 onwards + blockstore_processor::process_blockstore_from_root( + &blockstore, + &bank_forks, + &leader_schedule_cache, + &ProcessOptions::default(), + None, + None, + None, + &AbsRequestSender::default(), + ) + .unwrap(); + + assert_eq!(bank_forks.read().unwrap().root(), 4); + + // Verify that fork choice can be initialized and that the root is not marked duplicate + let (_progress, fork_choice) = + ReplayStage::initialize_progress_and_fork_choice_with_locked_bank_forks( + &bank_forks, + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &blockstore, + ); + + let bank_forks = bank_forks.read().unwrap(); + // 4 (the artificial root) is the tree root and no longer duplicate + assert_eq!(fork_choice.tree_root().0, 4); + assert!(fork_choice + .is_candidate(&(4, bank_forks.bank_hash(4).unwrap())) + .unwrap()); + + // 5 is still considered duplicate, so it is not a valid fork choice candidate + assert!(!fork_choice + .is_candidate(&(5, bank_forks.bank_hash(5).unwrap())) + .unwrap()); + } } diff --git a/core/src/snapshot_packager_service.rs b/core/src/snapshot_packager_service.rs index 8f8fa6e66a39f8..ebfa0a9bbe869a 100644 --- a/core/src/snapshot_packager_service.rs +++ b/core/src/snapshot_packager_service.rs @@ -73,7 +73,7 @@ impl SnapshotPackagerService { // Archiving the snapshot package is not allowed to fail. // AccountsBackgroundService calls `clean_accounts()` with a value for - // last_full_snapshot_slot that requires this archive call to succeed. + // latest_full_snapshot_slot that requires this archive call to succeed. let (archive_result, archive_time_us) = measure_us!(snapshot_utils::serialize_and_archive_snapshot_package( snapshot_package, &snapshot_config, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 05c3932c4909dd..c76d2dd4d0094e 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -116,6 +116,7 @@ impl Tpu { tpu_max_connections_per_ipaddr_per_minute: u64, prioritization_fee_cache: &Arc, block_production_method: BlockProductionMethod, + enable_block_production_forwarding: bool, _generator_config: Option, /* vestigial code for replay invalidator */ ) -> (Self, Vec>) { let TpuSockets { @@ -246,6 +247,7 @@ impl Tpu { connection_cache.clone(), bank_forks.clone(), prioritization_fee_cache, + enable_block_production_forwarding, ); let (entry_receiver, tpu_entry_notifier) = diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 4dcd7bbfa3e589..083ff02bbb4abc 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -66,7 +66,7 @@ pub struct Tvu { retransmit_stage: RetransmitStage, window_service: WindowService, cluster_slots_service: ClusterSlotsService, - replay_stage: ReplayStage, + replay_stage: Option, blockstore_cleanup_service: Option, cost_update_service: CostUpdateService, voting_service: VotingService, @@ -160,6 +160,8 @@ impl Tvu { cluster_slots: Arc, wen_restart_repair_slots: Option>>>, ) -> Result { + let in_wen_restart = wen_restart_repair_slots.is_some(); + let TvuSockets { repair: repair_socket, fetch: fetch_sockets, @@ -312,33 +314,37 @@ impl Tvu { let drop_bank_service = DropBankService::new(drop_bank_receiver); - let replay_stage = ReplayStage::new( - replay_stage_config, - blockstore.clone(), - bank_forks.clone(), - cluster_info.clone(), - ledger_signal_receiver, - duplicate_slots_receiver, - poh_recorder.clone(), - tower, - vote_tracker, - cluster_slots, - retransmit_slots_sender, - ancestor_duplicate_slots_receiver, - replay_vote_sender, - duplicate_confirmed_slots_receiver, - gossip_verified_vote_hash_receiver, - cluster_slots_update_sender, - cost_update_sender, - voting_sender, - drop_bank_sender, - block_metadata_notifier, - log_messages_bytes_limit, - prioritization_fee_cache.clone(), - dumped_slots_sender, - banking_tracer, - popular_pruned_forks_receiver, - )?; + let replay_stage = if in_wen_restart { + None + } else { + Some(ReplayStage::new( + replay_stage_config, + blockstore.clone(), + bank_forks.clone(), + cluster_info.clone(), + ledger_signal_receiver, + duplicate_slots_receiver, + poh_recorder.clone(), + tower, + vote_tracker, + cluster_slots, + retransmit_slots_sender, + ancestor_duplicate_slots_receiver, + replay_vote_sender, + duplicate_confirmed_slots_receiver, + gossip_verified_vote_hash_receiver, + cluster_slots_update_sender, + cost_update_sender, + voting_sender, + drop_bank_sender, + block_metadata_notifier, + log_messages_bytes_limit, + prioritization_fee_cache.clone(), + dumped_slots_sender, + banking_tracer, + popular_pruned_forks_receiver, + )?) + }; let blockstore_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { BlockstoreCleanupService::new(blockstore.clone(), max_ledger_shreds, exit.clone()) @@ -381,7 +387,9 @@ impl Tvu { if self.blockstore_cleanup_service.is_some() { self.blockstore_cleanup_service.unwrap().join()?; } - self.replay_stage.join()?; + if self.replay_stage.is_some() { + self.replay_stage.unwrap().join()?; + } self.cost_update_service.join()?; self.voting_service.join()?; if let Some(warmup_service) = self.warm_quic_cache_service { @@ -414,10 +422,7 @@ pub mod tests { std::sync::atomic::{AtomicU64, Ordering}, }; - #[ignore] - #[test] - #[serial] - fn test_tvu_exit() { + fn test_tvu_exit(enable_wen_restart: bool) { solana_logger::setup(); let leader = Node::new_localhost(); let target1_keypair = Keypair::new(); @@ -428,15 +433,17 @@ pub mod tests { let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); - let keypair = Arc::new(Keypair::new()); let (turbine_quic_endpoint_sender, _turbine_quic_endpoint_receiver) = tokio::sync::mpsc::channel(/*capacity:*/ 128); let (_turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); let (repair_quic_endpoint_sender, _repair_quic_endpoint_receiver) = tokio::sync::mpsc::channel(/*buffer:*/ 128); //start cluster_info1 - let cluster_info1 = - ClusterInfo::new(target1.info.clone(), keypair, SocketAddrSpace::Unspecified); + let cluster_info1 = ClusterInfo::new( + target1.info.clone(), + target1_keypair.into(), + SocketAddrSpace::Unspecified, + ); cluster_info1.insert_info(leader.info); let cref1 = Arc::new(cluster_info1); @@ -464,6 +471,11 @@ pub mod tests { let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let outstanding_repair_requests = Arc::>::default(); let cluster_slots = Arc::new(ClusterSlots::default()); + let wen_restart_repair_slots = if enable_wen_restart { + Some(Arc::new(RwLock::new(vec![]))) + } else { + None + }; let tvu = Tvu::new( &vote_keypair.pubkey(), Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])), @@ -520,11 +532,28 @@ pub mod tests { repair_quic_endpoint_sender, outstanding_repair_requests, cluster_slots, - None, + wen_restart_repair_slots, ) .expect("assume success"); + if enable_wen_restart { + assert!(tvu.replay_stage.is_none()) + } else { + assert!(tvu.replay_stage.is_some()) + } exit.store(true, Ordering::Relaxed); tvu.join().unwrap(); poh_service.join().unwrap(); } + + #[test] + #[serial] + fn test_tvu_exit_no_wen_restart() { + test_tvu_exit(false); + } + + #[test] + #[serial] + fn test_tvu_exit_with_wen_restart() { + test_tvu_exit(true); + } } diff --git a/core/src/validator.rs b/core/src/validator.rs index 889a3a6c467d63..97acfb70533f8f 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -5,7 +5,7 @@ use { crate::{ accounts_hash_verifier::AccountsHashVerifier, admin_rpc_post_init::AdminRpcRequestMetadataPostInit, - banking_trace::{self, BankingTracer}, + banking_trace::{self, BankingTracer, TraceError}, cache_block_meta_service::{CacheBlockMetaSender, CacheBlockMetaService}, cluster_info_vote_listener::VoteTracker, completed_data_sets_service::CompletedDataSetsService, @@ -27,6 +27,7 @@ use { tpu::{Tpu, TpuSockets, DEFAULT_TPU_COALESCE}, tvu::{Tvu, TvuConfig, TvuSockets}, }, + anyhow::{anyhow, Context, Result}, crossbeam_channel::{bounded, unbounded, Receiver}, lazy_static::lazy_static, quinn::Endpoint, @@ -136,6 +137,7 @@ use { }, strum::VariantNames, strum_macros::{Display, EnumString, EnumVariantNames, IntoStaticStr}, + thiserror::Error, tokio::runtime::Runtime as TokioRuntime, }; @@ -266,6 +268,7 @@ pub struct ValidatorConfig { pub banking_trace_dir_byte_limit: banking_trace::DirByteLimit, pub block_verification_method: BlockVerificationMethod, pub block_production_method: BlockProductionMethod, + pub enable_block_production_forwarding: bool, pub generator_config: Option, pub use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup, pub wen_restart_proto_path: Option, @@ -337,6 +340,7 @@ impl Default for ValidatorConfig { banking_trace_dir_byte_limit: 0, block_verification_method: BlockVerificationMethod::default(), block_production_method: BlockProductionMethod::default(), + enable_block_production_forwarding: false, generator_config: None, use_snapshot_archives_at_startup: UseSnapshotArchivesAtStartup::default(), wen_restart_proto_path: None, @@ -355,6 +359,7 @@ impl ValidatorConfig { enforce_ulimit_nofile: false, rpc_config: JsonRpcConfig::default_for_test(), block_production_method: BlockProductionMethod::default(), + enable_block_production_forwarding: true, // enable forwarding by default for tests replay_forks_threads: NonZeroUsize::new(1).expect("1 is non-zero"), replay_transactions_threads: NonZeroUsize::new(get_max_thread_count()) .expect("thread count is non-zero"), @@ -513,7 +518,7 @@ impl Validator { tpu_enable_udp: bool, tpu_max_connections_per_ipaddr_per_minute: u64, admin_rpc_service_post_init: Arc>>, - ) -> Result { + ) -> Result { let start_time = Instant::now(); let id = identity_keypair.pubkey(); @@ -523,8 +528,9 @@ impl Validator { info!("vote account pubkey: {vote_account}"); if !config.no_os_network_stats_reporting { - verify_net_stats_access() - .map_err(|err| format!("Failed to access network stats: {err:?}"))?; + verify_net_stats_access().map_err(|e| { + ValidatorError::Other(format!("Failed to access network stats: {e:?}")) + })?; } let mut bank_notification_senders = Vec::new(); @@ -543,7 +549,9 @@ impl Validator { geyser_plugin_config_files, rpc_to_plugin_manager_receiver_and_exit, ) - .map_err(|err| format!("Failed to load the Geyser plugin: {err:?}"))?, + .map_err(|err| { + ValidatorError::Other(format!("Failed to load the Geyser plugin: {err:?}")) + })?, ) } else { None @@ -579,13 +587,13 @@ impl Validator { info!("Initializing sigverify done."); if !ledger_path.is_dir() { - return Err(format!( + return Err(anyhow!( "ledger directory does not exist or is not accessible: {ledger_path:?}" )); } let genesis_config = open_genesis_config(ledger_path, config.max_genesis_archive_unpacked_size) - .map_err(|err| format!("Failed to open genesis config: {err}"))?; + .context("Failed to open genesis config")?; metrics_config_sanity_check(genesis_config.cluster_type)?; @@ -598,12 +606,10 @@ impl Validator { wait_for_supermajority_slot + 1, expected_shred_version, ) - .map_err(|err| { - format!( - "Failed to backup and clear shreds with incorrect \ - shred version from blockstore: {err:?}" - ) - })?; + .context( + "Failed to backup and clear shreds with incorrect \ + shred version from blockstore", + )?; } } @@ -625,7 +631,7 @@ impl Validator { &config.snapshot_config.bank_snapshots_dir, &config.account_snapshot_paths, ) - .map_err(|err| format!("failed to clean orphaned account snapshot directories: {err}"))?; + .context("failed to clean orphaned account snapshot directories")?; timer.stop(); info!("Cleaning orphaned account snapshot directories done. {timer}"); @@ -727,7 +733,8 @@ impl Validator { transaction_notifier, entry_notifier, Some(poh_timing_point_sender.clone()), - )?; + ) + .map_err(ValidatorError::Other)?; let hard_forks = bank_forks.read().unwrap().root_bank().hard_forks(); if !hard_forks.is_empty() { info!("Hard forks: {:?}", hard_forks); @@ -743,11 +750,12 @@ impl Validator { if let Some(expected_shred_version) = config.expected_shred_version { if expected_shred_version != node.info.shred_version() { - return Err(format!( + return Err(ValidatorError::Other(format!( "shred version mismatch: expected {} found: {}", expected_shred_version, node.info.shred_version(), - )); + )) + .into()); } } @@ -809,7 +817,6 @@ impl Validator { let pruned_banks_request_handler = PrunedBanksRequestHandler { pruned_banks_receiver, }; - let last_full_snapshot_slot = starting_snapshot_hashes.map(|x| x.full.0 .0); let accounts_background_service = AccountsBackgroundService::new( bank_forks.clone(), exit.clone(), @@ -818,7 +825,6 @@ impl Validator { pruned_banks_request_handler, }, config.accounts_db_test_hash_calculation, - last_full_snapshot_slot, ); info!( "Using: block-verification-method: {}, block-production-method: {}", @@ -884,10 +890,13 @@ impl Validator { &bank_forks, &leader_schedule_cache, &accounts_background_request_sender, - )?; + ) + .map_err(ValidatorError::Other)?; if config.process_ledger_before_services { - process_blockstore.process()?; + process_blockstore + .process() + .map_err(ValidatorError::Other)?; } *start_progress.write().unwrap() = ValidatorStartProgress::StartingServices; @@ -961,7 +970,9 @@ impl Validator { &identity_keypair, node.info .tpu(Protocol::UDP) - .map_err(|err| format!("Invalid TPU address: {err:?}"))? + .map_err(|err| { + ValidatorError::Other(format!("Invalid TPU address: {err:?}")) + })? .ip(), )), Some((&staked_nodes, &identity_keypair.pubkey())), @@ -1025,7 +1036,8 @@ impl Validator { max_complete_transaction_status_slot, max_complete_rewards_slot, prioritization_fee_cache.clone(), - )?; + ) + .map_err(ValidatorError::Other)?; let pubsub_service = if !config.rpc_config.full_api { None @@ -1166,8 +1178,7 @@ impl Validator { &cluster_info, rpc_override_health_check, &start_progress, - ) - .map_err(|err| format!("wait_for_supermajority failed: {err:?}"))?; + )?; let blockstore_metric_report_service = BlockstoreMetricReportService::new(blockstore.clone(), exit.clone()); @@ -1202,8 +1213,7 @@ impl Validator { &blockstore.banking_trace_path(), exit.clone(), config.banking_trace_dir_byte_limit, - ))) - .map_err(|err| format!("{} [{:?}]", &err, &err))?; + )))?; if banking_tracer.is_enabled() { info!( "Enabled banking trace (dir_byte_limit: {})", @@ -1366,11 +1376,12 @@ impl Validator { outstanding_repair_requests.clone(), cluster_slots.clone(), wen_restart_repair_slots.clone(), - )?; + ) + .map_err(ValidatorError::Other)?; if in_wen_restart { info!("Waiting for wen_restart phase one to finish"); - match wait_for_wen_restart(WenRestartConfig { + wait_for_wen_restart(WenRestartConfig { wen_restart_path: config.wen_restart_proto_path.clone().unwrap(), last_vote, blockstore: blockstore.clone(), @@ -1383,12 +1394,8 @@ impl Validator { accounts_background_request_sender: accounts_background_request_sender.clone(), genesis_config_hash: genesis_config.hash(), exit: exit.clone(), - }) { - Ok(()) => { - return Err("wen_restart phase one completedy".to_string()); - } - Err(e) => return Err(format!("wait_for_wen_restart failed: {e:?}")), - }; + })?; + return Err(ValidatorError::WenRestartFinished.into()); } let (tpu, mut key_notifies) = Tpu::new( @@ -1432,6 +1439,7 @@ impl Validator { tpu_max_connections_per_ipaddr_per_minute, &prioritization_fee_cache, config.block_production_method.clone(), + config.enable_block_production_forwarding, config.generator_config.clone(), ); @@ -2326,11 +2334,22 @@ fn initialize_rpc_transaction_history_services( } } -#[derive(Debug, PartialEq, Eq)] -enum ValidatorError { +#[derive(Error, Debug)] +pub enum ValidatorError { + #[error("Bad expected bank hash")] BadExpectedBankHash, + + #[error("Ledger does not have enough data to wait for supermajority")] NotEnoughLedgerData, - Error(String), + + #[error("{0}")] + Other(String), + + #[error(transparent)] + TraceError(#[from] TraceError), + + #[error("Wen Restart finished, please continue with --wait-for-supermajority")] + WenRestartFinished, } // Return if the validator waited on other nodes to start. In this case @@ -2353,7 +2372,7 @@ fn wait_for_supermajority( if let Some(process_blockstore) = process_blockstore { process_blockstore .process() - .map_err(ValidatorError::Error)?; + .map_err(ValidatorError::Other)?; } let bank = bank_forks.read().unwrap().working_bank(); @@ -2453,7 +2472,7 @@ fn get_stake_percent_in_gossip(bank: &Bank, cluster_info: &ClusterInfo, log: boo if activated_stake == 0 { continue; } - let vote_state_node_pubkey = vote_account.node_pubkey().unwrap_or_default(); + let vote_state_node_pubkey = vote_account.node_pubkey().copied().unwrap_or_default(); if let Some(peer) = peers.get(&vote_state_node_pubkey) { if peer.shred_version() == my_shred_version { @@ -2757,7 +2776,7 @@ mod tests { // bank=0, wait=1, should fail config.wait_for_supermajority = Some(1); - assert_eq!( + matches!( wait_for_supermajority( &config, None, @@ -2766,7 +2785,7 @@ mod tests { rpc_override_health_check.clone(), &start_progress, ), - Err(ValidatorError::NotEnoughLedgerData) + Err(ValidatorError::NotEnoughLedgerData), ); // bank=1, wait=0, should pass, bank is past the wait slot @@ -2789,7 +2808,7 @@ mod tests { // bank=1, wait=1, equal, but bad hash provided config.wait_for_supermajority = Some(1); config.expected_bank_hash = Some(hash(&[1])); - assert_eq!( + matches!( wait_for_supermajority( &config, None, @@ -2798,7 +2817,7 @@ mod tests { rpc_override_health_check, &start_progress, ), - Err(ValidatorError::BadExpectedBankHash) + Err(ValidatorError::BadExpectedBankHash), ); } diff --git a/core/src/verified_vote_packets.rs b/core/src/verified_vote_packets.rs index 1ab56997d13128..0840c57b809d22 100644 --- a/core/src/verified_vote_packets.rs +++ b/core/src/verified_vote_packets.rs @@ -213,78 +213,81 @@ impl VerifiedVotePackets { let vote_packets = vote_packets_receiver.recv_timeout(RECV_TIMEOUT)?; let vote_packets = std::iter::once(vote_packets).chain(vote_packets_receiver.try_iter()); + // No need to process any votes if we will not be the leader soon. But, + // return early only after draining the channel to avoid accumulating + // votes that will be stale by the time we do become leader + if !would_be_leader { + return Ok(()); + } + for gossip_votes in vote_packets { - if would_be_leader { - for verfied_vote_metadata in gossip_votes { - let VerifiedVoteMetadata { - vote_account_key, - vote, - packet_batch, - signature, - } = verfied_vote_metadata; - if vote.is_empty() { - error!("Empty votes should have been filtered out earlier in the pipeline"); - continue; - } - let slot = vote.last_voted_slot().unwrap(); - let hash = vote.hash(); - let timestamp = vote.timestamp(); - - match vote { - VoteTransaction::VoteStateUpdate(_) | VoteTransaction::TowerSync(_) => { - let (latest_gossip_slot, latest_timestamp) = - self.0.get(&vote_account_key).map_or((0, None), |vote| { - (vote.get_latest_gossip_slot(), vote.get_latest_timestamp()) - }); - // Since votes are not incremental, we keep only the latest vote - // If the vote is for the same slot we will only allow it if - // it has a later timestamp (refreshed vote) - // - // Timestamp can be None if something was wrong with the senders clock. - // We directly compare as Options to ensure that votes with proper - // timestamps have precedence (Some is > None). - if slot > latest_gossip_slot - || ((slot == latest_gossip_slot) && (timestamp > latest_timestamp)) - { - self.0.insert( - vote_account_key, - FullTowerVote(GossipVote { - slot, - hash, - packet_batch, - signature, - timestamp, - }), - ); - } + for verfied_vote_metadata in gossip_votes { + let VerifiedVoteMetadata { + vote_account_key, + vote, + packet_batch, + signature, + } = verfied_vote_metadata; + if vote.is_empty() { + error!("Empty votes should have been filtered out earlier in the pipeline"); + continue; + } + let slot = vote.last_voted_slot().unwrap(); + let hash = vote.hash(); + let timestamp = vote.timestamp(); + + match vote { + VoteTransaction::VoteStateUpdate(_) | VoteTransaction::TowerSync(_) => { + let (latest_gossip_slot, latest_timestamp) = + self.0.get(&vote_account_key).map_or((0, None), |vote| { + (vote.get_latest_gossip_slot(), vote.get_latest_timestamp()) + }); + // Since votes are not incremental, we keep only the latest vote + // If the vote is for the same slot we will only allow it if + // it has a later timestamp (refreshed vote) + // + // Timestamp can be None if something was wrong with the senders clock. + // We directly compare as Options to ensure that votes with proper + // timestamps have precedence (Some is > None). + if slot > latest_gossip_slot + || ((slot == latest_gossip_slot) && (timestamp > latest_timestamp)) + { + self.0.insert( + vote_account_key, + FullTowerVote(GossipVote { + slot, + hash, + packet_batch, + signature, + timestamp, + }), + ); } - _ => { - if let Some(FullTowerVote(gossip_vote)) = - self.0.get_mut(&vote_account_key) - { - if slot > gossip_vote.slot { - warn!( + } + _ => { + if let Some(FullTowerVote(gossip_vote)) = self.0.get_mut(&vote_account_key) + { + if slot > gossip_vote.slot { + warn!( "Originally {} submitted full tower votes, but now has reverted to incremental votes. Converting back to old format.", vote_account_key ); - let mut votes = BTreeMap::new(); - let GossipVote { - slot, - hash, - packet_batch, - signature, - .. - } = std::mem::take(gossip_vote); - votes.insert((slot, hash), (packet_batch, signature)); - self.0.insert(vote_account_key, IncrementalVotes(votes)); - } else { - continue; - } - }; - let validator_votes: &mut BTreeMap< - (Slot, Hash), - (PacketBatch, Signature), - > = match self + let mut votes = BTreeMap::new(); + let GossipVote { + slot, + hash, + packet_batch, + signature, + .. + } = std::mem::take(gossip_vote); + votes.insert((slot, hash), (packet_batch, signature)); + self.0.insert(vote_account_key, IncrementalVotes(votes)); + } else { + continue; + } + }; + let validator_votes: &mut BTreeMap<(Slot, Hash), (PacketBatch, Signature)> = + match self .0 .entry(vote_account_key) .or_insert(IncrementalVotes(BTreeMap::new())) @@ -292,11 +295,10 @@ impl VerifiedVotePackets { IncrementalVotes(votes) => votes, FullTowerVote(_) => continue, // Should never happen }; - validator_votes.insert((slot, hash), (packet_batch, signature)); - if validator_votes.len() > MAX_VOTES_PER_VALIDATOR { - let smallest_key = validator_votes.keys().next().cloned().unwrap(); - validator_votes.remove(&smallest_key).unwrap(); - } + validator_votes.insert((slot, hash), (packet_batch, signature)); + if validator_votes.len() > MAX_VOTES_PER_VALIDATOR { + let smallest_key = validator_votes.keys().next().cloned().unwrap(); + validator_votes.remove(&smallest_key).unwrap(); } } } diff --git a/core/src/warm_quic_cache_service.rs b/core/src/warm_quic_cache_service.rs index b7f196661ba150..e4a67cbe993169 100644 --- a/core/src/warm_quic_cache_service.rs +++ b/core/src/warm_quic_cache_service.rs @@ -3,10 +3,8 @@ use { rand::{thread_rng, Rng}, - solana_client::{ - connection_cache::{ConnectionCache, Protocol}, - tpu_connection::TpuConnection, - }, + solana_client::connection_cache::{ConnectionCache, Protocol}, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_gossip::cluster_info::ClusterInfo, solana_poh::poh_recorder::PohRecorder, std::{ diff --git a/core/tests/epoch_accounts_hash.rs b/core/tests/epoch_accounts_hash.rs index 7ec41c673fd7b3..938031fa05ffbe 100755 --- a/core/tests/epoch_accounts_hash.rs +++ b/core/tests/epoch_accounts_hash.rs @@ -220,7 +220,6 @@ impl BackgroundServices { pruned_banks_request_handler, }, false, - None, ); info!("Starting background services... DONE"); diff --git a/core/tests/snapshots.rs b/core/tests/snapshots.rs index 4268e48260d479..de56e29e0d3b47 100644 --- a/core/tests/snapshots.rs +++ b/core/tests/snapshots.rs @@ -235,12 +235,7 @@ fn run_bank_forks_snapshot_n( .unwrap() .set_root(bank.slot(), &request_sender, None) .unwrap(); - snapshot_request_handler.handle_snapshot_requests( - false, - 0, - &mut None, - &AtomicBool::new(false), - ); + snapshot_request_handler.handle_snapshot_requests(false, 0, &AtomicBool::new(false)); } } @@ -465,7 +460,7 @@ fn test_bank_forks_incremental_snapshot( accounts_package_sender, }; - let mut last_full_snapshot_slot = None; + let mut latest_full_snapshot_slot = None; for slot in 1..=LAST_SLOT { // Make a new bank and perform some transactions let bank = { @@ -500,18 +495,14 @@ fn test_bank_forks_incremental_snapshot( .unwrap() .set_root(bank.slot(), &request_sender, None) .unwrap(); - snapshot_request_handler.handle_snapshot_requests( - false, - 0, - &mut last_full_snapshot_slot, - &AtomicBool::new(false), - ); + snapshot_request_handler.handle_snapshot_requests(false, 0, &AtomicBool::new(false)); } // Since AccountsBackgroundService isn't running, manually make a full snapshot archive // at the right interval if snapshot_utils::should_take_full_snapshot(slot, FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS) { make_full_snapshot_archive(&bank, &snapshot_test_config.snapshot_config).unwrap(); + latest_full_snapshot_slot = Some(slot); } // Similarly, make an incremental snapshot archive at the right interval, but only if // there's been at least one full snapshot first, and a full snapshot wasn't already @@ -521,12 +512,12 @@ fn test_bank_forks_incremental_snapshot( else if snapshot_utils::should_take_incremental_snapshot( slot, INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS, - last_full_snapshot_slot, - ) && slot != last_full_snapshot_slot.unwrap() + latest_full_snapshot_slot, + ) && slot != latest_full_snapshot_slot.unwrap() { make_incremental_snapshot_archive( &bank, - last_full_snapshot_slot.unwrap(), + latest_full_snapshot_slot.unwrap(), &snapshot_test_config.snapshot_config, ) .unwrap(); @@ -731,11 +722,10 @@ fn test_snapshots_with_background_services( exit.clone(), abs_request_handler, false, - None, ); - let mut last_full_snapshot_slot = None; - let mut last_incremental_snapshot_slot = None; + let mut latest_full_snapshot_slot = None; + let mut latest_incremental_snapshot_slot = None; let mint_keypair = &snapshot_test_config.genesis_config_info.mint_keypair; for slot in 1..=LAST_SLOT { // Make a new bank and process some transactions @@ -788,16 +778,16 @@ fn test_snapshots_with_background_services( ); std::thread::sleep(Duration::from_secs(1)); } - last_full_snapshot_slot = Some(slot); + latest_full_snapshot_slot = Some(slot); } else if slot % INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS == 0 - && last_full_snapshot_slot.is_some() + && latest_full_snapshot_slot.is_some() { let timer = Instant::now(); while snapshot_utils::get_highest_incremental_snapshot_archive_slot( &snapshot_test_config .snapshot_config .incremental_snapshot_archives_dir, - last_full_snapshot_slot.unwrap(), + latest_full_snapshot_slot.unwrap(), ) != Some(slot) { assert!( @@ -806,7 +796,7 @@ fn test_snapshots_with_background_services( ); std::thread::sleep(Duration::from_secs(1)); } - last_incremental_snapshot_slot = Some(slot); + latest_incremental_snapshot_slot = Some(slot); } } @@ -841,7 +831,7 @@ fn test_snapshots_with_background_services( assert_eq!( deserialized_bank.slot(), - last_incremental_snapshot_slot.unwrap() + latest_incremental_snapshot_slot.unwrap() ); assert_eq!( &deserialized_bank, diff --git a/core/tests/unified_scheduler.rs b/core/tests/unified_scheduler.rs index 22f9fab0c05122..a6e40296510609 100644 --- a/core/tests/unified_scheduler.rs +++ b/core/tests/unified_scheduler.rs @@ -15,7 +15,6 @@ use { unfrozen_gossip_verified_vote_hashes::UnfrozenGossipVerifiedVoteHashes, }, solana_ledger::genesis_utils::create_genesis_config, - solana_program_runtime::timings::ExecuteTimings, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, genesis_utils::GenesisConfigInfo, prioritization_fee_cache::PrioritizationFeeCache, @@ -26,6 +25,7 @@ use { system_transaction, transaction::{Result, SanitizedTransaction}, }, + solana_timings::ExecuteTimings, solana_unified_scheduler_pool::{ DefaultTaskHandler, HandlerContext, PooledScheduler, SchedulerPool, TaskHandler, }, @@ -83,7 +83,7 @@ fn test_scheduler_waited_by_drop_bank_service() { bank_forks.write().unwrap().install_scheduler_pool(pool); let genesis = 0; let genesis_bank = &bank_forks.read().unwrap().get(genesis).unwrap(); - genesis_bank.set_fork_graph_in_program_cache(bank_forks.clone()); + genesis_bank.set_fork_graph_in_program_cache(Arc::downgrade(&bank_forks)); // Create bank, which is pruned later let pruned = 2; diff --git a/cost-model/Cargo.toml b/cost-model/Cargo.toml index 52e10447387eeb..366a27cebe7575 100644 --- a/cost-model/Cargo.toml +++ b/cost-model/Cargo.toml @@ -13,18 +13,13 @@ edition = { workspace = true } ahash = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } -solana-address-lookup-table-program = { workspace = true } -solana-bpf-loader-program = { workspace = true } +solana-builtins-default-costs = { workspace = true } solana-compute-budget = { workspace = true } -solana-compute-budget-program = { workspace = true } -solana-config-program = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } -solana-loader-v4-program = { workspace = true } solana-metrics = { workspace = true } +solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } -solana-stake-program = { workspace = true } -solana-system-program = { workspace = true } solana-vote-program = { workspace = true } [lib] @@ -33,8 +28,10 @@ name = "solana_cost_model" [dev-dependencies] itertools = { workspace = true } +rand = "0.8.5" solana-logger = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } +solana-system-program = { workspace = true } static_assertions = { workspace = true } test-case = { workspace = true } diff --git a/cost-model/benches/cost_model.rs b/cost-model/benches/cost_model.rs new file mode 100644 index 00000000000000..c92ddd7f9b0b1a --- /dev/null +++ b/cost-model/benches/cost_model.rs @@ -0,0 +1,77 @@ +#![feature(test)] +extern crate test; +use { + solana_cost_model::cost_model::CostModel, + solana_sdk::{ + feature_set::FeatureSet, + hash::Hash, + message::Message, + pubkey::Pubkey, + signature::Keypair, + signer::Signer, + system_instruction, + transaction::{SanitizedTransaction, Transaction}, + }, + test::Bencher, +}; + +struct BenchSetup { + transactions: Vec, + feature_set: FeatureSet, +} + +const NUM_TRANSACTIONS_PER_ITER: usize = 1024; + +fn setup(num_transactions: usize) -> BenchSetup { + let transactions = (0..num_transactions) + .map(|_| { + // As many transfer instructions as is possible in a regular packet. + let from_keypair = Keypair::new(); + let to_lamports = + Vec::from_iter(std::iter::repeat_with(|| (Pubkey::new_unique(), 1)).take(24)); + let ixs = system_instruction::transfer_many(&from_keypair.pubkey(), &to_lamports); + let message = Message::new(&ixs, Some(&from_keypair.pubkey())); + let transaction = Transaction::new(&[from_keypair], message, Hash::default()); + SanitizedTransaction::from_transaction_for_tests(transaction) + }) + .collect(); + + let feature_set = FeatureSet::default(); + + BenchSetup { + transactions, + feature_set, + } +} + +#[bench] +fn bench_cost_model(bencher: &mut Bencher) { + let BenchSetup { + transactions, + feature_set, + } = setup(NUM_TRANSACTIONS_PER_ITER); + + bencher.iter(|| { + for transaction in &transactions { + let _ = CostModel::calculate_cost(test::black_box(transaction), &feature_set); + } + }); +} + +#[bench] +fn bench_cost_model_requested_write_locks(bencher: &mut Bencher) { + let BenchSetup { + transactions, + mut feature_set, + } = setup(NUM_TRANSACTIONS_PER_ITER); + feature_set.activate( + &solana_sdk::feature_set::cost_model_requested_write_lock_cost::id(), + 0, + ); + + bencher.iter(|| { + for transaction in &transactions { + let _ = CostModel::calculate_cost(test::black_box(transaction), &feature_set); + } + }); +} diff --git a/cost-model/src/block_cost_limits.rs b/cost-model/src/block_cost_limits.rs index b04f289e0553af..7fc4b2fe670b39 100644 --- a/cost-model/src/block_cost_limits.rs +++ b/cost-model/src/block_cost_limits.rs @@ -1,13 +1,5 @@ //! defines block cost related limits //! -use { - lazy_static::lazy_static, - solana_sdk::{ - address_lookup_table, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, - compute_budget, ed25519_program, loader_v4, pubkey::Pubkey, secp256k1_program, - }, - std::collections::HashMap, -}; /// Static configurations: /// @@ -28,32 +20,12 @@ pub const SIGNATURE_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 24; pub const SECP256K1_VERIFY_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 223; /// Number of compute units for one ed25519 signature verification. pub const ED25519_VERIFY_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 76; +/// Number of compute units for one ed25519 strict signature verification. +pub const ED25519_VERIFY_STRICT_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 80; /// Number of compute units for one write lock pub const WRITE_LOCK_UNITS: u64 = COMPUTE_UNIT_TO_US_RATIO * 10; /// Number of data bytes per compute units pub const INSTRUCTION_DATA_BYTES_COST: u64 = 140 /*bytes per us*/ / COMPUTE_UNIT_TO_US_RATIO; -// Number of compute units for each built-in programs -lazy_static! { - /// Number of compute units for each built-in programs - pub static ref BUILT_IN_INSTRUCTION_COSTS: HashMap = [ - (solana_stake_program::id(), solana_stake_program::stake_instruction::DEFAULT_COMPUTE_UNITS), - (solana_config_program::id(), solana_config_program::config_processor::DEFAULT_COMPUTE_UNITS), - (solana_vote_program::id(), solana_vote_program::vote_processor::DEFAULT_COMPUTE_UNITS), - (solana_system_program::id(), solana_system_program::system_processor::DEFAULT_COMPUTE_UNITS), - (compute_budget::id(), solana_compute_budget_program::DEFAULT_COMPUTE_UNITS), - (address_lookup_table::program::id(), solana_address_lookup_table_program::processor::DEFAULT_COMPUTE_UNITS), - (bpf_loader_upgradeable::id(), solana_bpf_loader_program::UPGRADEABLE_LOADER_COMPUTE_UNITS), - (bpf_loader_deprecated::id(), solana_bpf_loader_program::DEPRECATED_LOADER_COMPUTE_UNITS), - (bpf_loader::id(), solana_bpf_loader_program::DEFAULT_LOADER_COMPUTE_UNITS), - (loader_v4::id(), solana_loader_v4_program::DEFAULT_COMPUTE_UNITS), - // Note: These are precompile, run directly in bank during sanitizing; - (secp256k1_program::id(), 0), - (ed25519_program::id(), 0), - ] - .iter() - .cloned() - .collect(); -} /// Statically computed data: /// diff --git a/cost-model/src/cost_model.rs b/cost-model/src/cost_model.rs index c444ad0885566f..c57a929d8b9bd1 100644 --- a/cost-model/src/cost_model.rs +++ b/cost-model/src/cost_model.rs @@ -8,10 +8,11 @@ use { crate::{block_cost_limits::*, transaction_cost::*}, log::*, - solana_compute_budget::compute_budget_processor::{ - process_compute_budget_instructions, DEFAULT_HEAP_COST, - DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, + solana_builtins_default_costs::BUILTIN_INSTRUCTION_COSTS, + solana_compute_budget::compute_budget_limits::{ + DEFAULT_HEAP_COST, DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, MAX_COMPUTE_UNIT_LIMIT, }, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, @@ -40,7 +41,7 @@ impl CostModel { } else { let mut tx_cost = UsageCostDetails::new_with_default_capacity(); - Self::get_signature_cost(&mut tx_cost, transaction); + Self::get_signature_cost(&mut tx_cost, transaction, feature_set); Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); Self::get_transaction_cost(&mut tx_cost, transaction, feature_set); tx_cost.allocated_accounts_data_size = @@ -56,7 +57,7 @@ impl CostModel { pub fn calculate_cost_for_executed_transaction( transaction: &SanitizedTransaction, actual_programs_execution_cost: u64, - actual_loaded_accounts_data_size_bytes: usize, + actual_loaded_accounts_data_size_bytes: u32, feature_set: &FeatureSet, ) -> TransactionCost { if transaction.is_simple_vote_transaction() { @@ -66,7 +67,7 @@ impl CostModel { } else { let mut tx_cost = UsageCostDetails::new_with_default_capacity(); - Self::get_signature_cost(&mut tx_cost, transaction); + Self::get_signature_cost(&mut tx_cost, transaction, feature_set); Self::get_write_lock_cost(&mut tx_cost, transaction, feature_set); Self::get_instructions_data_cost(&mut tx_cost, transaction); tx_cost.allocated_accounts_data_size = @@ -82,13 +83,25 @@ impl CostModel { } } - fn get_signature_cost(tx_cost: &mut UsageCostDetails, transaction: &SanitizedTransaction) { + fn get_signature_cost( + tx_cost: &mut UsageCostDetails, + transaction: &SanitizedTransaction, + feature_set: &FeatureSet, + ) { let signatures_count_detail = transaction.message().get_signature_details(); tx_cost.num_transaction_signatures = signatures_count_detail.num_transaction_signatures(); tx_cost.num_secp256k1_instruction_signatures = signatures_count_detail.num_secp256k1_instruction_signatures(); tx_cost.num_ed25519_instruction_signatures = signatures_count_detail.num_ed25519_instruction_signatures(); + + let ed25519_verify_cost = + if feature_set.is_active(&feature_set::ed25519_precompile_verify_strict::id()) { + ED25519_VERIFY_STRICT_COST + } else { + ED25519_VERIFY_COST + }; + tx_cost.signature_cost = signatures_count_detail .num_transaction_signatures() .saturating_mul(SIGNATURE_COST) @@ -100,7 +113,7 @@ impl CostModel { .saturating_add( signatures_count_detail .num_ed25519_instruction_signatures() - .saturating_mul(ED25519_VERIFY_COST), + .saturating_mul(ed25519_verify_cost), ); } @@ -148,7 +161,7 @@ impl CostModel { for (program_id, instruction) in transaction.message().program_instructions_iter() { let ix_execution_cost = - if let Some(builtin_cost) = BUILT_IN_INSTRUCTION_COSTS.get(program_id) { + if let Some(builtin_cost) = BUILTIN_INSTRUCTION_COSTS.get(program_id) { *builtin_cost } else { has_user_space_instructions = true; @@ -186,7 +199,7 @@ impl CostModel { } loaded_accounts_data_size_cost = Self::calculate_loaded_accounts_data_size_cost( - usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap(), + compute_budget_limits.loaded_accounts_bytes.get(), feature_set, ); } @@ -215,7 +228,7 @@ impl CostModel { } pub fn calculate_loaded_accounts_data_size_cost( - loaded_accounts_data_size: usize, + loaded_accounts_data_size: u32, _feature_set: &FeatureSet, ) -> u64 { FeeStructure::calculate_memory_usage_cost(loaded_accounts_data_size, DEFAULT_HEAP_COST) @@ -358,7 +371,7 @@ mod tests { ); // expected cost for one system transfer instructions - let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS + let expected_execution_cost = BUILTIN_INSTRUCTION_COSTS .get(&system_program::id()) .unwrap(); @@ -526,7 +539,7 @@ mod tests { debug!("many transfer transaction {:?}", tx); // expected cost for two system transfer instructions - let program_cost = BUILT_IN_INSTRUCTION_COSTS + let program_cost = BUILTIN_INSTRUCTION_COSTS .get(&system_program::id()) .unwrap(); let expected_cost = program_cost * 2; @@ -610,12 +623,12 @@ mod tests { )); let expected_account_cost = WRITE_LOCK_UNITS * 2; - let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS + let expected_execution_cost = BUILTIN_INSTRUCTION_COSTS .get(&system_program::id()) .unwrap(); const DEFAULT_PAGE_COST: u64 = 8; let expected_loaded_accounts_data_size_cost = - solana_compute_budget::compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + solana_compute_budget::compute_budget_limits::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES.get() as u64 / ACCOUNT_DATA_COST_PAGE_SIZE * DEFAULT_PAGE_COST; @@ -648,10 +661,10 @@ mod tests { let feature_set = FeatureSet::all_enabled(); let expected_account_cost = WRITE_LOCK_UNITS * 2; - let expected_execution_cost = BUILT_IN_INSTRUCTION_COSTS + let expected_execution_cost = BUILTIN_INSTRUCTION_COSTS .get(&system_program::id()) .unwrap() - + BUILT_IN_INSTRUCTION_COSTS + + BUILTIN_INSTRUCTION_COSTS .get(&compute_budget::id()) .unwrap(); let expected_loaded_accounts_data_size_cost = (data_limit as u64) / (32 * 1024) * 8; @@ -681,7 +694,7 @@ mod tests { start_hash, )); // transaction has one builtin instruction, and one bpf instruction, no ComputeBudget::compute_unit_limit - let expected_builtin_cost = *BUILT_IN_INSTRUCTION_COSTS + let expected_builtin_cost = *BUILTIN_INSTRUCTION_COSTS .get(&solana_system_program::id()) .unwrap(); let expected_bpf_cost = DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT; @@ -710,10 +723,10 @@ mod tests { start_hash, )); // transaction has one builtin instruction, and one ComputeBudget::compute_unit_limit - let expected_cost = *BUILT_IN_INSTRUCTION_COSTS + let expected_cost = *BUILTIN_INSTRUCTION_COSTS .get(&solana_system_program::id()) .unwrap() - + BUILT_IN_INSTRUCTION_COSTS + + BUILTIN_INSTRUCTION_COSTS .get(&compute_budget::id()) .unwrap(); diff --git a/curves/bn254/Cargo.toml b/curves/bn254/Cargo.toml new file mode 100644 index 00000000000000..7182ff53f95b45 --- /dev/null +++ b/curves/bn254/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "solana-bn254" +description = "Solana BN254" +documentation = "https://docs.rs/solana-bn254" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +bytemuck = { workspace = true, features = ["derive"] } +solana-program = { workspace = true } +thiserror = { workspace = true } + +[target.'cfg(not(target_os = "solana"))'.dependencies] +ark-bn254 = { workspace = true } +ark-ec = { workspace = true } +ark-ff = { workspace = true } +ark-serialize = { workspace = true } + +[dev-dependencies] +array-bytes = { workspace = true } +serde = { workspace = true } +serde_derive = { workspace = true } +serde_json = { workspace = true } diff --git a/sdk/program/src/alt_bn128/compression.rs b/curves/bn254/src/compression.rs similarity index 98% rename from sdk/program/src/alt_bn128/compression.rs rename to curves/bn254/src/compression.rs index 5bcbe06434172e..b284772fca01b6 100644 --- a/sdk/program/src/alt_bn128/compression.rs +++ b/curves/bn254/src/compression.rs @@ -1,5 +1,5 @@ pub mod prelude { - pub use crate::alt_bn128::compression::{ + pub use crate::compression::{ alt_bn128_compression_size::*, consts::*, target_arch::*, AltBn128CompressionError, }; } @@ -70,7 +70,7 @@ mod target_arch { use { super::*, - crate::alt_bn128::compression::alt_bn128_compression_size, + crate::compression::alt_bn128_compression_size, ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}, }; @@ -194,6 +194,7 @@ mod target_arch { super::*, alt_bn128_compression_size::{G1, G1_COMPRESSED, G2, G2_COMPRESSED}, prelude::*, + solana_program::syscalls, }; pub fn alt_bn128_g1_compress( @@ -201,7 +202,7 @@ mod target_arch { ) -> Result<[u8; G1_COMPRESSED], AltBn128CompressionError> { let mut result_buffer = [0; G1_COMPRESSED]; let result = unsafe { - crate::syscalls::sol_alt_bn128_compression( + syscalls::sol_alt_bn128_compression( ALT_BN128_G1_COMPRESS, input as *const _ as *const u8, input.len() as u64, @@ -218,7 +219,7 @@ mod target_arch { pub fn alt_bn128_g1_decompress(input: &[u8]) -> Result<[u8; G1], AltBn128CompressionError> { let mut result_buffer = [0; G1]; let result = unsafe { - crate::syscalls::sol_alt_bn128_compression( + syscalls::sol_alt_bn128_compression( ALT_BN128_G1_DECOMPRESS, input as *const _ as *const u8, input.len() as u64, @@ -237,7 +238,7 @@ mod target_arch { ) -> Result<[u8; G2_COMPRESSED], AltBn128CompressionError> { let mut result_buffer = [0; G2_COMPRESSED]; let result = unsafe { - crate::syscalls::sol_alt_bn128_compression( + syscalls::sol_alt_bn128_compression( ALT_BN128_G2_COMPRESS, input as *const _ as *const u8, input.len() as u64, @@ -256,7 +257,7 @@ mod target_arch { ) -> Result<[u8; G2], AltBn128CompressionError> { let mut result_buffer = [0; G2]; let result = unsafe { - crate::syscalls::sol_alt_bn128_compression( + syscalls::sol_alt_bn128_compression( ALT_BN128_G2_DECOMPRESS, input as *const _ as *const u8, input.len() as u64, @@ -275,7 +276,7 @@ mod target_arch { mod tests { use { super::*, - crate::alt_bn128::compression::target_arch::convert_endianness, + crate::compression::target_arch::convert_endianness, ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress, Validate}, std::ops::Neg, target_arch::{ diff --git a/sdk/program/src/alt_bn128/mod.rs b/curves/bn254/src/lib.rs similarity index 99% rename from sdk/program/src/alt_bn128/mod.rs rename to curves/bn254/src/lib.rs index c7e1d8e5d28250..e60417cde6b53d 100644 --- a/sdk/program/src/alt_bn128/mod.rs +++ b/curves/bn254/src/lib.rs @@ -1,10 +1,10 @@ pub mod compression; pub mod prelude { - pub use crate::alt_bn128::{consts::*, target_arch::*, AltBn128Error}; + pub use crate::{consts::*, target_arch::*, AltBn128Error}; } use { - bytemuck_derive::{Pod, Zeroable}, + bytemuck::{Pod, Zeroable}, consts::*, thiserror::Error, }; @@ -305,14 +305,15 @@ mod target_arch { #[cfg(target_os = "solana")] mod target_arch { - use super::*; + use {super::*, solana_program::syscalls}; + pub fn alt_bn128_addition(input: &[u8]) -> Result, AltBn128Error> { if input.len() > ALT_BN128_ADDITION_INPUT_LEN { return Err(AltBn128Error::InvalidInputData); } let mut result_buffer = [0; ALT_BN128_ADDITION_OUTPUT_LEN]; let result = unsafe { - crate::syscalls::sol_alt_bn128_group_op( + syscalls::sol_alt_bn128_group_op( ALT_BN128_ADD, input as *const _ as *const u8, input.len() as u64, @@ -332,7 +333,7 @@ mod target_arch { } let mut result_buffer = [0u8; ALT_BN128_POINT_SIZE]; let result = unsafe { - crate::syscalls::sol_alt_bn128_group_op( + syscalls::sol_alt_bn128_group_op( ALT_BN128_MUL, input as *const _ as *const u8, input.len() as u64, @@ -356,7 +357,7 @@ mod target_arch { } let mut result_buffer = [0u8; 32]; let result = unsafe { - crate::syscalls::sol_alt_bn128_group_op( + syscalls::sol_alt_bn128_group_op( ALT_BN128_PAIRING, input as *const _ as *const u8, input.len() as u64, @@ -374,7 +375,7 @@ mod target_arch { #[cfg(test)] mod tests { use { - crate::alt_bn128::{prelude::*, PodG1}, + crate::{prelude::*, PodG1}, ark_bn254::g1::G1Affine, ark_ec::AffineRepr, ark_serialize::{CanonicalSerialize, Compress}, diff --git a/curves/curve25519/Cargo.toml b/curves/curve25519/Cargo.toml index fb04c29b60171b..ce77b5ddca04db 100644 --- a/curves/curve25519/Cargo.toml +++ b/curves/curve25519/Cargo.toml @@ -12,8 +12,10 @@ edition = { workspace = true } [dependencies] bytemuck = { workspace = true } bytemuck_derive = { workspace = true } -solana-program = { workspace = true } thiserror = { workspace = true } +[target.'cfg(target_os = "solana")'.dependencies] +solana-program = { workspace = true } + [target.'cfg(not(target_os = "solana"))'.dependencies] curve25519-dalek = { workspace = true, features = ["serde"] } diff --git a/curves/secp256k1-recover/Cargo.toml b/curves/secp256k1-recover/Cargo.toml new file mode 100644 index 00000000000000..545fb073e9e6ae --- /dev/null +++ b/curves/secp256k1-recover/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "solana-secp256k1-recover" +description = "Solana SECP256K1 Recover" +documentation = "https://docs.rs/solana-secp256k1-recover" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +borsh = { workspace = true, optional = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } +thiserror = { workspace = true } + +[target.'cfg(target_os = "solana")'.dependencies] +solana-define-syscall = { workspace = true } + +[target.'cfg(not(target_os = "solana"))'.dependencies] +libsecp256k1 = { workspace = true } + +[dev-dependencies] +anyhow = { workspace = true } +borsh = { workspace = true } + +[target.'cfg(not(target_os = "solana"))'.dev-dependencies] +libsecp256k1 = { workspace = true, features = ["hmac"] } + +[build-dependencies] +rustc_version = { workspace = true } + +[features] +borsh = ["dep:borsh"] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/curves/secp256k1-recover/build.rs b/curves/secp256k1-recover/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/curves/secp256k1-recover/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/program/src/secp256k1_recover.rs b/curves/secp256k1-recover/src/lib.rs similarity index 95% rename from sdk/program/src/secp256k1_recover.rs rename to curves/secp256k1-recover/src/lib.rs index fca67cf6b33ae8..837f2b8cf3aed3 100644 --- a/sdk/program/src/secp256k1_recover.rs +++ b/curves/secp256k1-recover/src/lib.rs @@ -1,3 +1,4 @@ +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] //! Public key recovery from [secp256k1] ECDSA signatures. //! //! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 @@ -21,7 +22,7 @@ //! also provides the [secp256k1 program][sp], which is more flexible, has lower CPU //! cost, and can validate many signatures at once. //! -//! [sp]: crate::secp256k1_program +//! [sp]: https://docs.rs/solana-program/latest/solana_program/secp256k1_program/ //! [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions #[cfg(feature = "borsh")] @@ -63,7 +64,7 @@ pub const SECP256K1_SIGNATURE_LENGTH: usize = 64; pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64; #[repr(transparent)] -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[cfg_attr(feature = "frozen-abi", derive(solana_frozen_abi_macro::AbiExample))] #[cfg_attr( feature = "borsh", derive(BorshSerialize, BorshDeserialize, BorshSchema), @@ -85,6 +86,9 @@ impl Secp256k1Pubkey { } } +#[cfg(target_os = "solana")] +solana_define_syscall::define_syscall!(fn sol_secp256k1_recover(hash: *const u8, recovery_id: u64, signature: *const u8, result: *mut u8) -> u64); + /// Recover the public key from a [secp256k1] ECDSA signature and /// cryptographically-hashed message. /// @@ -110,7 +114,7 @@ impl Secp256k1Pubkey { /// "overflowing" signature, and this function returns an error when parsing /// overflowing signatures. /// -/// [`keccak`]: crate::keccak +/// [`keccak`]: https://docs.rs/solana-program/latest/solana_program/keccak/ /// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub /// /// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a @@ -123,7 +127,7 @@ impl Secp256k1Pubkey { /// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and /// can validate many signatures at once. /// -/// [sp]: crate::secp256k1_program +/// [sp]: https://docs.rs/solana-program/latest/solana_program/secp256k1_program/ /// /// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`] /// crate, which clients may also want to use. @@ -161,7 +165,7 @@ impl Secp256k1Pubkey { /// signatures with high-order `S` values. The following code will accomplish /// this: /// -/// ```rust +/// ```rust,ignore /// # use solana_program::program_error::ProgramError; /// # let signature_bytes = [ /// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2, @@ -257,13 +261,13 @@ impl Secp256k1Pubkey { /// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse /// the secp256k1 signature to prevent malleability. /// -/// ```no_run +/// ```rust,ignore /// use solana_program::{ /// entrypoint::ProgramResult, /// keccak, msg, /// program_error::ProgramError, -/// secp256k1_recover::secp256k1_recover, /// }; +/// use solana_secp256k1_recover::secp256k1_recover; /// /// /// The key we expect to sign secp256k1 messages, /// /// as serialized by `libsecp256k1::PublicKey::serialize`. @@ -327,7 +331,7 @@ impl Secp256k1Pubkey { /// /// The RPC client program: /// -/// ```no_run +/// ```rust,ignore /// # use solana_program::example_mocks::solana_rpc_client; /// # use solana_program::example_mocks::solana_sdk; /// use anyhow::Result; @@ -399,7 +403,7 @@ pub fn secp256k1_recover( { let mut pubkey_buffer = [0u8; SECP256K1_PUBLIC_KEY_LENGTH]; let result = unsafe { - crate::syscalls::sol_secp256k1_recover( + sol_secp256k1_recover( hash.as_ptr(), recovery_id as u64, signature.as_ptr(), diff --git a/docs/build.sh b/docs/build.sh index 6269eabdbb78b0..4a122678a19b09 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -12,27 +12,6 @@ source ../ci/rust-version.sh ../ci/docker-run-default-image.sh docs/convert-ascii-to-svg.sh ./set-solana-release-tag.sh -# Get current channel -eval "$(../ci/channel-info.sh)" - -# Synchronize translations with Crowdin only on stable channel -if [ "$CHANNEL" = stable ]; then - echo "Downloading & updating translations..." - npm run crowdin:download - npm run crowdin:upload -fi - # Build from /src into /build npm run build echo $? - -# Publish only from merge commits and beta release tags -if [[ -n $CI ]]; then - if [[ -z $CI_PULL_REQUEST ]]; then - if [[ -n $CI_TAG ]] && [[ $CI_TAG != $BETA_CHANNEL* ]]; then - echo "not a beta tag" - exit 0 - fi - ./publish-docs.sh - fi -fi diff --git a/docs/src/architecture.md b/docs/src/architecture.md index b51bc0fff76702..798860f9adf15a 100644 --- a/docs/src/architecture.md +++ b/docs/src/architecture.md @@ -1,8 +1,8 @@ --- -title: Solana Validator Architecture +title: Agave Validator Architecture sidebar_position: 0 sidebar_label: Overview -pagination_label: Solana Validator Architecture +pagination_label: Agave Validator Architecture --- -In this section, we will describe the architecture of the Solana Validator. +In this section, we will describe the architecture of the Agave Validator. diff --git a/docs/src/backwards-compatibility.md b/docs/src/backwards-compatibility.md index 0fdc388ea2dbae..843bc1d4ebab5c 100644 --- a/docs/src/backwards-compatibility.md +++ b/docs/src/backwards-compatibility.md @@ -3,15 +3,18 @@ title: Backward Compatibility Policy --- As the Solana developer ecosystem grows, so does the need for clear expectations around -breaking API and behavior changes affecting applications and tooling built for Solana. +breaking API and behavior changes affecting applications and tooling built for Solana by Anza. In a perfect world, Solana development could continue at a very fast pace without ever causing issues for existing developers. However, some compromises will need to be made -and so this document attempts to clarify and codify the process for new releases. +and so this document attempts to clarify and codify the process for new releases. Furthermore, +there will be a growing number of validator clients maintained seperately by distinct teams. +Coordinating across these teams to ensure the reliability of the network will require ongoing +communication. ### Expectations -- Solana software releases include APIs, SDKs, and CLI tooling (with a few [exceptions](#exceptions)). -- Solana software releases follow semantic versioning, more details below. +- Agave software releases include APIs, SDKs, and CLI tooling (with a few [exceptions](#exceptions)). +- Agave software releases follow semantic versioning, more details below. - Software for a `MINOR` version release will be compatible across all software on the same `MAJOR` version. @@ -112,7 +115,7 @@ Major releases: ### Runtime Features -New Solana runtime features are feature-switched and manually activated. Runtime features +New Agave runtime features are feature-switched and manually activated. Runtime features include: the introduction of new native programs, sysvars, and syscalls; and changes to their behavior. Feature activation is cluster agnostic, allowing confidence to be built on Testnet before activation on Mainnet-beta. @@ -126,13 +129,6 @@ The release process is as follows: ### Infrastructure Changes -#### Public API Nodes - -Solana provides publicly available RPC API nodes for all developers to use. The Solana team -will make their best effort to communicate any changes to the host, port, rate-limiting behavior, -availability, etc. However, we recommend that developers rely on their own validator nodes to -discourage dependence upon Solana operated nodes. - #### Local cluster scripts and Docker images Breaking changes will be limited to `MAJOR` version updates. `MINOR` and `PATCH` updates should always diff --git a/docs/src/cli/index.md b/docs/src/cli/index.md index 77574419618354..27deffd485dfc9 100644 --- a/docs/src/cli/index.md +++ b/docs/src/cli/index.md @@ -11,7 +11,7 @@ cluster by delegating stake. To interact with a Solana cluster, we will use its command-line interface, also known as the CLI. We use the command-line because it is the first place the -Solana core team deploys new functionality. The command-line interface is not +Anza core team deploys new functionality. The command-line interface is not necessarily the easiest to use, but it provides the most direct, flexible, and secure access to your Solana accounts. diff --git a/docs/src/cli/install.md b/docs/src/cli/install.md index fbcd894660ccd8..d2d0cad8f84566 100644 --- a/docs/src/cli/install.md +++ b/docs/src/cli/install.md @@ -8,37 +8,37 @@ sidebar_position: 1 There are multiple ways to install the Solana tools on your computer depending on your preferred workflow: -- [Use Solana's Install Tool (Simplest option)](#use-solanas-install-tool) +- [Use the Solana Install Tool (Simplest option)](#use-solanas-install-tool) - [Download Prebuilt Binaries](#download-prebuilt-binaries) - [Build from Source](#build-from-source) - [Use Homebrew](#use-homebrew) -## Use Solana's Install Tool +## Use The Solana Install Tool ### MacOS & Linux - Open your favorite Terminal application -- Install the Solana release - [LATEST_SOLANA_RELEASE_VERSION](https://github.com/anza-xyz/agave/releases/tag/LATEST_SOLANA_RELEASE_VERSION) +- Install the Agave release + [LATEST_AGAVE_RELEASE_VERSION](https://github.com/anza-xyz/agave/releases/tag/LATEST_AGAVE_RELEASE_VERSION) on your machine by running: ```bash -sh -c "$(curl -sSfL https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/install)" +sh -c "$(curl -sSfL https://release.anza.xyz/LATEST_AGAVE_RELEASE_VERSION/install)" ``` -- You can replace `LATEST_SOLANA_RELEASE_VERSION` with the release tag matching +- You can replace `LATEST_AGAVE_RELEASE_VERSION` with the release tag matching the software version of your desired release, or use one of the three symbolic channel names: `stable`, `beta`, or `edge`. - The following output indicates a successful update: ```text -downloading LATEST_SOLANA_RELEASE_VERSION installer +downloading LATEST_AGAVE_RELEASE_VERSION installer Configuration: /home/solana/.config/solana/install/config.yml Active release directory: /home/solana/.local/share/solana/install/active_release -* Release version: LATEST_SOLANA_RELEASE_VERSION -* Release URL: https://github.com/anza-xyz/agave/releases/download/LATEST_SOLANA_RELEASE_VERSION/solana-release-x86_64-unknown-linux-gnu.tar.bz2 +* Release version: LATEST_AGAVE_RELEASE_VERSION +* Release URL: https://github.com/anza-xyz/agave/releases/download/LATEST_AGAVE_RELEASE_VERSION/solana-release-x86_64-unknown-linux-gnu.tar.bz2 Update successful ``` @@ -74,7 +74,7 @@ solana --version installer into a temporary directory: ```bash -cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/agave-install-init-x86_64-pc-windows-msvc.exe --output C:\agave-install-tmp\agave-install-init.exe --create-dirs" +cmd /c "curl https://release.anza.xyz/LATEST_AGAVE_RELEASE_VERSION/agave-install-init-x86_64-pc-windows-msvc.exe --output C:\agave-install-tmp\agave-install-init.exe --create-dirs" ``` - Copy and paste the following command, then press Enter to install the latest @@ -82,7 +82,7 @@ cmd /c "curl https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/agave-inst to allow the program to run. ```bash -C:\agave-install-tmp\agave-install-init.exe LATEST_SOLANA_RELEASE_VERSION +C:\agave-install-tmp\agave-install-init.exe LATEST_AGAVE_RELEASE_VERSION ``` - When the installer is finished, press Enter. diff --git a/docs/src/faq.md b/docs/src/faq.md index 35a17467f8c668..be6365dba0afca 100644 --- a/docs/src/faq.md +++ b/docs/src/faq.md @@ -3,6 +3,10 @@ title: Validator Frequently Asked Questions sidebar_label: Frequently Asked Questions --- +### What is Agave? How is it different from other possible Solana validators? + +Solana is an open source, decentralized, proof-of-stake blockchain. It is therefore possible for multiple distinct teams to fork and maintain their own validator software. The original Solana validator was maintained by Solana Labs. A new organization, Anza, was formed in 2024 consisting of former Solana Labs core engineering members. Anza forked the Solana validator and renamed it to Agave (this project). Agave is the version of the original Solana validator maintained by the team at Anza. As of the writing of this FAQ, Agave is the most popular validator client for the Solana network, but it is likely in the future there may be several validators running in parallel to help support the network. We recommend checking the community and doing research before making a selection. + ### What is a validator? A validator is a computer that runs a software program to verify transactions that are added to the Solana blockchain. A validator can be a voting validator or a non voting validator. To learn more, see [what is a validator](./what-is-a-validator.md). @@ -47,4 +51,4 @@ See [Solana validator prerequisites](./operations/prerequisites.md). ### What are the economics of running a validator? -See [economics of running a validator](./operations/validator-or-rpc-node.md#economics-of-running-a-consensus-validator). \ No newline at end of file +See [economics of running a validator](./operations/validator-or-rpc-node.md#economics-of-running-a-consensus-validator). diff --git a/docs/src/index.mdx b/docs/src/index.mdx index b7a098ea747132..6314ed65581456 100644 --- a/docs/src/index.mdx +++ b/docs/src/index.mdx @@ -3,18 +3,19 @@ slug: / id: home title: Home sidebar_label: Home -pagination_label: Solana Validator Documentation Home -description: "Solana is a high performance network that is utilized for a range - of use cases, \ - including finance, NFTs, payments, and gaming." +pagination_label: Agave Validator Documentation Home +description: "Agave is a validator for the Solana blockchain maintained by the Anza core engineering team \ + it is a fork of the original Solana validator software." --- -# Solana Validator Documentation +# Agave Validator Documentation Solana is a blockchain built for mass adoption. It's a high performance network that is utilized for a range of use cases, including finance, NFTs, payments, and gaming. Solana operates as a single global state machine, and is open, -interoperable and decentralized. +interoperable and decentralized. Agave is a fork of the original Solana validator +previously maintained by the Solana Labs team. Agave is now under active development by the +core engineering team at Anza, one of several Solana validator clients. ## Command Line Interface and Tool Suite @@ -43,7 +44,7 @@ works: ## Running a Validator -Explore what it takes to operate a Solana validator and help secure the network. +Explore what it takes to operate an Agave validator and help secure the network. - [Validator vs RPC node](./operations/validator-or-rpc-node.md) - Understand the important differences between voting and non-voting validators on the diff --git a/docs/src/operations/best-practices/general.md b/docs/src/operations/best-practices/general.md index 3e531b0160c571..92d73578ee38ee 100644 --- a/docs/src/operations/best-practices/general.md +++ b/docs/src/operations/best-practices/general.md @@ -1,5 +1,5 @@ --- -title: Solana Validator Operations Best Practices +title: Agave Validator Operations Best Practices sidebar_label: General Operations pagination_label: "Best Practices: Validator Operations" --- @@ -72,7 +72,7 @@ process. ### Building From Source -It is a best practice to always build your Solana binaries from source. If you +It is a best practice to always build your Agave binaries from source. If you build from source, you are certain that the code you are building has not been tampered with before the binary was created. You may also be able to optimize your `agave-validator` binary to your specific hardware. @@ -88,7 +88,7 @@ If you are not comfortable building from source, or you need to quickly install a new version to test something out, you could instead try using the `agave-install` command. -Assuming you want to install Solana version `1.14.17`, you would execute the +Assuming you want to install Agave version `1.14.17`, you would execute the following: ``` diff --git a/docs/src/operations/best-practices/monitoring.md b/docs/src/operations/best-practices/monitoring.md index a0f2ef9df9fa22..babef520b01abc 100644 --- a/docs/src/operations/best-practices/monitoring.md +++ b/docs/src/operations/best-practices/monitoring.md @@ -1,5 +1,5 @@ --- -title: Solana Validator Monitoring Best Practices +title: Agave Validator Monitoring Best Practices sidebar_label: Monitoring pagination_label: "Best Practices: Validator Monitoring" --- @@ -83,4 +83,4 @@ export TELEGRAM_CHAT_ID= Once your environment variables are set, restart `agave-watchtower`. You should see output about your validator. -To test that your Telegram configuration is working properly, you could stop your validator briefly until it is labeled as delinquent. Up to a minute after the validator is delinquent, you should receive a message in the Telegram group from your bot. Start the validator again and verify that you get another message in your Telegram group from the bot. The message should say `all clear`. \ No newline at end of file +To test that your Telegram configuration is working properly, you could stop your validator briefly until it is labeled as delinquent. Up to a minute after the validator is delinquent, you should receive a message in the Telegram group from your bot. Start the validator again and verify that you get another message in your Telegram group from the bot. The message should say `all clear`. diff --git a/docs/src/operations/best-practices/security.md b/docs/src/operations/best-practices/security.md index fab46b665ad7fa..861f0ecdfe9214 100644 --- a/docs/src/operations/best-practices/security.md +++ b/docs/src/operations/best-practices/security.md @@ -1,5 +1,5 @@ --- -title: Solana Validator Security Best Practices +title: Agave Validator Security Best Practices sidebar_label: Security pagination_label: "Best Practices: Validator Security" --- @@ -45,4 +45,4 @@ sudo apt install fail2ban ## DO NOT Use Password Authentication for SSH -In addition to installing `fail2ban`, it is recommended to disable password based authentication for SSH access. SSH key based authentication is preferred. \ No newline at end of file +In addition to installing `fail2ban`, it is recommended to disable password based authentication for SSH access. SSH key based authentication is preferred. diff --git a/docs/src/operations/guides/restart-cluster.md b/docs/src/operations/guides/restart-cluster.md index cda3f30a5a016d..0d3178b36a39e4 100644 --- a/docs/src/operations/guides/restart-cluster.md +++ b/docs/src/operations/guides/restart-cluster.md @@ -8,13 +8,13 @@ pagination_label: "Validator Guides: Restart a Cluster" ### Step 1. Identify the latest optimistically confirmed slot for the cluster -In Solana 1.14 or greater, run the following command to output the latest +In Agave 1.14 or greater, run the following command to output the latest optimistically confirmed slot your validator observed: ```bash agave-ledger-tool -l ledger latest-optimistic-slots ``` -In Solana 1.13 or less, the latest optimistically confirmed can be found by looking for the more recent occurrence of +In Agave 1.13 or less, the latest optimistically confirmed can be found by looking for the more recent occurrence of [this](https://github.com/solana-labs/solana/blob/0264147d42d506fb888f5c4c021a998e231a3e74/core/src/optimistic_confirmation_verifier.rs#L71) metrics datapoint. diff --git a/docs/src/operations/guides/validator-start.md b/docs/src/operations/guides/validator-start.md index d86c714be4e6a6..5907064105c794 100644 --- a/docs/src/operations/guides/validator-start.md +++ b/docs/src/operations/guides/validator-start.md @@ -260,17 +260,15 @@ Read more about [creating and managing a vote account](./vote-accounts.md). ## Known validators -If you know and respect other validator operators, you can specify this on the command line with the `--known-validator ` -argument to `agave-validator`. You can specify multiple ones by repeating the argument `--known-validator --known-validator `. -This has two effects, one is when the validator is booting with `--only-known-rpc`, it will only ask that set of -known nodes for downloading genesis and snapshot data. Another is that in combination with the `--halt-on-known-validators-accounts-hash-mismatch` option, -it will monitor the merkle root hash of the entire accounts state of other known nodes on gossip and if the hashes produce any mismatch, -the validator will halt the node to prevent the validator from voting or processing potentially incorrect state values. At the moment, the slot that -the validator publishes the hash on is tied to the snapshot interval. For the feature to be effective, all validators in the known -set should be set to the same snapshot interval value or multiples of the same. - -It is highly recommended you use these options to prevent malicious snapshot state download or -account state divergence. +If you know and respect other validator operators, you can specify this on the +command line with the `--known-validator ` argument to +`agave-validator`. You can specify multiple ones by repeating the argument +`--known-validator --known-validator `. This has the effect +that when the validator is booting with `--only-known-rpc`, it will only ask +that set of known nodes for downloading genesis and snapshot data. + +It is highly recommended you use this option to prevent malicious snapshot +state download. ## Connect Your Validator diff --git a/docs/src/operations/index.md b/docs/src/operations/index.md index 51e29ac4078cab..54312ae38d852f 100644 --- a/docs/src/operations/index.md +++ b/docs/src/operations/index.md @@ -3,6 +3,6 @@ title: Operating a Validator sidebar_position: 0 --- -This section describes how to run a Solana validator node. +This section describes how to run an Agave validator node. There are several clusters available to connect to; see [Choosing a Cluster](../cli/examples/choose-a-cluster.md) for an overview of each. diff --git a/docs/src/operations/prerequisites.md b/docs/src/operations/prerequisites.md index fb37d9ec4de3ff..d2bdf1e248d958 100644 --- a/docs/src/operations/prerequisites.md +++ b/docs/src/operations/prerequisites.md @@ -1,11 +1,11 @@ --- -title: Solana Validator Prerequisites +title: Agave Validator Prerequisites sidebar_position: 2 sidebar_label: Prerequisites pagination_label: Prerequisites to run a Validator --- -Operating a Solana validator is an interesting and rewarding task. Generally speaking, it requires someone with a technical background but also involves community engagement and marketing. +Operating an Agave validator is an interesting and rewarding task. Generally speaking, it requires someone with a technical background but also involves community engagement and marketing. ## How to be a good Validator Operator @@ -34,4 +34,4 @@ Whether you decide to run a [validator](../what-is-a-validator.md) or an [RPC no While anyone can join the network, you should make sure that your home computer and network meets the specifications in the [hardware requirements](./requirements.md) doc. Most home internet service providers do not provide consistent service that would allow your validator to perform well. If your home network or personal hardware is not performant enough to keep up with the Solana cluster, your validator will not be able to participate in consensus. -In addition to performance considerations, you will want to make sure that your home computer is resistant to outages caused by loss of power, flooding, fire, theft, etc. If you are just getting started and learning about being an operator, a home setup may be sufficient, but you will want to consider all of these factors when you start operating your validator on the mainnet-beta cluster. \ No newline at end of file +In addition to performance considerations, you will want to make sure that your home computer is resistant to outages caused by loss of power, flooding, fire, theft, etc. If you are just getting started and learning about being an operator, a home setup may be sufficient, but you will want to consider all of these factors when you start operating your validator on the mainnet-beta cluster. diff --git a/docs/src/operations/requirements.md b/docs/src/operations/requirements.md index 90140ed7a57061..03f3e0a4e4b245 100644 --- a/docs/src/operations/requirements.md +++ b/docs/src/operations/requirements.md @@ -1,5 +1,5 @@ --- -title: Solana Validator Requirements +title: Agave Validator Requirements sidebar_position: 3 sidebar_label: Requirements pagination_label: Requirements to Operate a Validator @@ -7,7 +7,7 @@ pagination_label: Requirements to Operate a Validator ## Minimum SOL requirements -There is no strict minimum amount of SOL required to run a validator on Solana. +There is no strict minimum amount of SOL required to run an Agave validator on Solana. However in order to participate in consensus, a vote account is required which has a rent-exempt reserve of 0.02685864 SOL. Voting also requires sending a vote @@ -18,61 +18,26 @@ transaction for each block the validator agrees with, which can cost up to The hardware recommendations below are provided as a guide. Operators are encouraged to do their own performance testing. -- CPU - - 12 cores / 24 threads, or more - - 2.8GHz base clock speed, or faster - - SHA extensions instruction support - - AMD Gen 3 or newer - - Intel Ice Lake or newer - - Higher clock speed is preferable over more cores - - AVX2 instruction support (to use official release binaries, self-compile - otherwise) - - Support for AVX512f is helpful -- RAM - - 256GB or more - - Error Correction Code (ECC) memory is suggested - - Motherboard with 512GB capacity suggested -- Disk - - PCIe Gen3 x4 NVME SSD, or better - - Accounts: 500GB, or larger. High TBW (Total Bytes Written) - - Ledger: 1TB or larger. High TBW suggested - - Snapshots: 250GB or larger. High TBW suggested - - OS: (Optional) 500GB, or larger. SATA OK - - The OS may be installed on the ledger disk, though testing has shown better - performance with the ledger on its own disk - - Accounts and ledger _can_ be stored on the same disk, however due to high - IOPS, this is not recommended - - The Samsung 970 and 980 Pro series SSDs are popular with the validator community -- GPUs - - Not necessary at this time - - Operators in the validator community do not use GPUs currently - -### RPC Node Recommendations - -The [hardware recommendations](#hardware-recommendations) above should be considered -bare minimums if the validator is intended to be employed as an RPC node. To provide -full functionality and improved reliability, the following adjustments should be -made. - -- CPU - - 16 cores / 32 threads, or more -- RAM - - **1.17** 512 GB or more if an `account-index` is used, 1TB+ for all three [account indexes](https://docs.solanalabs.com/operations/setup-an-rpc-node#account-indexing) - - **1.18 or newer** 512 GB or more for all three indexes -- Disk - - Consider a larger ledger disk if longer transaction history is required - - Accounts and ledger should not be stored on the same disk +| Component | Validator Requirements | Additional RPC Node Requirements | +|-----------|------------------------|----------------------------------| +| **CPU** | - 2.8GHz base clock speed, or faster
- SHA extensions instruction support
- AMD Gen 3 or newer
- Intel Ice Lake or newer
- Higher clock speed is preferable over more cores
- AVX2 instruction support (to use official release binaries, self-compile otherwise)
- Support for AVX512f is helpful
|| +| | 12 cores / 24 threads, or more | 16 cores / 32 threads, or more | +| **RAM** | Error Correction Code (ECC) memory is suggested
Motherboard with 512GB capacity suggested || +| | 256GB or more| 512 GB or more for **all [account indexes](https://docs.solanalabs.com/operations/setup-an-rpc-node#account-indexing)** | +| **Disk** | PCIe Gen3 x4 NVME SSD, or better, on each of:
- **Accounts**: 500GB, or larger. High TBW (Total Bytes Written)
- **Ledger**: 1TB or larger. High TBW suggested
- **Snapshots**: 250GB or larger. High TBW suggested
- **OS**: (Optional) 500GB, or larger. SATA OK

The OS may be installed on the ledger disk, though testing has shown better performance with the ledger on its own disk

Accounts and ledger *can* be stored on the same disk, however due to high IOPS, this is not recommended

The Samsung 970 and 980 Pro series SSDs are popular with the validator community | Consider a larger ledger disk if longer transaction history is required

Accounts and ledger **should not** be stored on the same disk | +| **GPUs** | Not necessary at this time
Operators in the validator community do not use GPUs currently | | + ## Virtual machines on Cloud Platforms -Running a solana node in the cloud requires significantly greater +Running an Agave node in the cloud requires significantly greater operational expertise to achieve stability and performance. Do not expect to find sympathetic voices should you chose this route and find yourself in need of support. ## Docker -Running validator for live clusters (including mainnet-beta) inside Docker is +Running an Agave validator for live clusters (including mainnet-beta) inside Docker is not recommended and generally not supported. This is due to concerns of general Docker's containerization overhead and resultant performance degradation unless specially configured. @@ -83,7 +48,7 @@ releases at [solanalabs/solana](https://hub.docker.com/r/solanalabs/solana). ## Software - We build and run on Ubuntu 20.04. -- See [Installing Solana CLI](../cli/install.md) for the current Solana software release. +- See [Installing Solana CLI](../cli/install.md) for the current Solana CLI software release. Prebuilt binaries are available for Linux x86_64 on CPUs supporting AVX2 \(Ubuntu 20.04 recommended\). MacOS or WSL users may build from source. @@ -106,4 +71,4 @@ be limited to any free 13 port range with `--dynamic-port-range` For security purposes, it is not suggested that the following ports be open to the internet on staked, mainnet-beta validators. - 8899 TCP - JSONRPC over HTTP. Change with `--rpc-port RPC_PORT`` -- 8900 TCP - JSONRPC over Websockets. Derived. Uses `RPC_PORT + 1` \ No newline at end of file +- 8900 TCP - JSONRPC over Websockets. Derived. Uses `RPC_PORT + 1` diff --git a/docs/src/operations/setup-a-validator.md b/docs/src/operations/setup-a-validator.md index c8d602f0bcf744..2a54b069ec20d7 100644 --- a/docs/src/operations/setup-a-validator.md +++ b/docs/src/operations/setup-a-validator.md @@ -1,6 +1,6 @@ --- -title: Setup a Solana Validator -sidebar_label: Setup a Validator +title: Setup an Agave Validator +sidebar_label: Setup an Agave Validator sidebar_position: 5 --- @@ -383,7 +383,7 @@ su - sol ## Install The Solana CLI on Remote Machine -Your remote machine will need the Solana cli installed to run the validator +Your remote machine will need the Solana CLI installed to run the Agave validator software. For simplicity, install the cli with user `sol`. Refer again to [Solana's Install Tool](../cli/install.md#use-solanas-install-tool) or [build from source](../cli/install.md#build-from-source). It is best for diff --git a/docs/src/operations/setup-an-rpc-node.md b/docs/src/operations/setup-an-rpc-node.md index e436ac21b11c4f..1fbd8202fcdcc5 100644 --- a/docs/src/operations/setup-an-rpc-node.md +++ b/docs/src/operations/setup-an-rpc-node.md @@ -1,6 +1,6 @@ --- -title: Setup a Solana RPC Node -sidebar_label: Setup an RPC Node +title: Setup an Agave RPC Node +sidebar_label: Setup an Agave RPC Node sidebar_position: 6 --- diff --git a/docs/src/runtime/sysvars.md b/docs/src/runtime/sysvars.md index 36c00747bfaa03..7bf3f04015fd59 100644 --- a/docs/src/runtime/sysvars.md +++ b/docs/src/runtime/sysvars.md @@ -154,18 +154,24 @@ and de-activations per epoch. It is updated at the start of every epoch. ## EpochRewards -The EpochRewards sysvar tracks the progress of epoch rewards distribution. The -sysvar is created in the first block of the epoch, and lasts for several blocks -while paying out the rewards. When all rewards have been distributed, the sysvar -is deleted. Unlike other sysvars, which almost always exist on-chain, -EpochRewards sysvar only exists during the reward period. Therefore, calling -`EpochRewards::get()` on blocks that are outside of the reward period will -return an error, i.e. `UnsupportedSysvar`. This can serve as a method for -determining whether epoch rewards distribution has finished. +The EpochRewards sysvar tracks whether the rewards period (including calculation +and distribution) is in progress, as well as the details needed to resume +distribution when starting from a snapshot during the rewards period. The sysvar +is repopulated at the start of the first block of each epoch. + - Address: `SysvarEpochRewards1111111111111111111111111` - Layout: [EpochRewards](https://docs.rs/solana-program/VERSION_FOR_DOCS_RS/solana_program/epoch_rewards/struct.EpochRewards.html) +- Fields: + + - `distribution_starting_block_height` - starting block height for distribution for the current epoch + - `num_partitions` - the number of partitions in the distribution + - `parent_blockhash` - the blockhash seed used to generate the partition hasher, ie. the blockhash of the parent of the first block in the epoch + - `total_points` - the total rewards points calculated for the epoch + - `total_rewards` - total rewards for epoch, in lamports + - `distributed_rewards` - rewards for the epoch distributed so far, in lamports + - `active` - whether the rewards period is currently active ## LastRestartSlot diff --git a/docs/src/what-is-a-validator.md b/docs/src/what-is-a-validator.md index acaef0d27c1586..a6e673430d7a6e 100644 --- a/docs/src/what-is-a-validator.md +++ b/docs/src/what-is-a-validator.md @@ -2,7 +2,7 @@ title: What is a Validator? --- -A validator is a computer that helps to run the Solana network. Each validator executes a program that keeps track of all accounts on the Solana cluster and validates transactions being added to the network. Without validators, Solana would not be able to function. +A validator is a computer that helps to run the Solana network. Each validator executes a program that keeps track of all accounts on the Solana cluster and validates transactions being added to the network. Without validators, Solana would not be able to function. Agave is one of several possible validator clients operators can use to help run the Solana network. The more independent entities that run validators, the less vulnerable the cluster is to an attack or catastrophe that affects the cluster. @@ -14,7 +14,7 @@ By becoming a validator, you are helping to grow the network. You are also learn Before we discuss validators in more detail, it's useful to make some distinctions. Using the same validator software, you have the option of running a voting/consensus node or choosing to instead run an RPC node. An RPC node helps Solana devs and others interact with the blockchain but for performance reasons should not vote. We go into more detail on RPC nodes in the next section, [what is an rpc node](./what-is-an-rpc-node.md). -For this document, when a validator is mentioned, we are talking about a voting/consensus node. Now, to better understand what your validator is doing, it would help to understand how the Solana network functions in more depth. +For this document, when a validator is mentioned, we are talking about a voting/consensus node. Now, to better understand what your validator is doing, it would help to understand how the Solana network functions in more depth. This documentation specifically focuses on the Agave client. ## Proof Of Stake diff --git a/dos/Cargo.toml b/dos/Cargo.toml index a59dce7b337239..535653e3386e6c 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -19,6 +19,7 @@ rand = { workspace = true } serde = { workspace = true } solana-bench-tps = { workspace = true } solana-client = { workspace = true } +solana-connection-cache = { workspace = true } solana-core = { workspace = true } solana-faucet = { workspace = true } solana-gossip = { workspace = true } diff --git a/dos/src/main.rs b/dos/src/main.rs index 033de4bf4b49da..0b299718467134 100644 --- a/dos/src/main.rs +++ b/dos/src/main.rs @@ -46,10 +46,8 @@ use { log::*, rand::{thread_rng, Rng}, solana_bench_tps::bench::generate_and_fund_keypairs, - solana_client::{ - connection_cache::ConnectionCache, tpu_client::TpuClientWrapper, - tpu_connection::TpuConnection, - }, + solana_client::{connection_cache::ConnectionCache, tpu_client::TpuClientWrapper}, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_core::repair::serve_repair::{RepairProtocol, RepairRequestHeader, ServeRepair}, solana_dos::cli::*, solana_gossip::{ diff --git a/fee/Cargo.toml b/fee/Cargo.toml new file mode 100644 index 00000000000000..0b8902bf2566bd --- /dev/null +++ b/fee/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "solana-fee" +description = "Solana fee calculation" +documentation = "https://docs.rs/solana-fee" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-sdk = { workspace = true } +solana-svm-transaction = { workspace = true } diff --git a/fee/src/lib.rs b/fee/src/lib.rs new file mode 100644 index 00000000000000..869d4dc7f4711c --- /dev/null +++ b/fee/src/lib.rs @@ -0,0 +1,40 @@ +use {solana_sdk::fee::FeeDetails, solana_svm_transaction::svm_message::SVMMessage}; + +/// Calculate fee for `SanitizedMessage` +pub fn calculate_fee( + message: &impl SVMMessage, + zero_fees_for_test: bool, + lamports_per_signature: u64, + prioritization_fee: u64, + remove_rounding_in_fee_calculation: bool, +) -> u64 { + calculate_fee_details( + message, + zero_fees_for_test, + lamports_per_signature, + prioritization_fee, + remove_rounding_in_fee_calculation, + ) + .total_fee() +} + +pub fn calculate_fee_details( + message: &impl SVMMessage, + zero_fees_for_test: bool, + lamports_per_signature: u64, + prioritization_fee: u64, + remove_rounding_in_fee_calculation: bool, +) -> FeeDetails { + if zero_fees_for_test { + return FeeDetails::default(); + } + let signature_fee = message + .num_total_signatures() + .saturating_mul(lamports_per_signature); + + FeeDetails::new( + signature_fee, + prioritization_fee, + remove_rounding_in_fee_calculation, + ) +} diff --git a/frozen-abi/src/abi_digester.rs b/frozen-abi/src/abi_digester.rs index 39e6ce46f34150..9d2ee5f296d470 100644 --- a/frozen-abi/src/abi_digester.rs +++ b/frozen-abi/src/abi_digester.rs @@ -127,7 +127,7 @@ impl AbiDigester { value.serialize(self.create_new()) } else { // Don't call value.visit_for_abi(...) to prefer autoref specialization - // resolution for IgnoreAsHelper + // resolution for TransparentAsHelper <&T>::visit_for_abi(&value, &mut self.create_new()) } } @@ -657,7 +657,7 @@ mod tests { type TestBitVec = bv::BitVec; mod bitflags_abi { - use crate::abi_example::{AbiExample, EvenAsOpaque, IgnoreAsHelper}; + use crate::abi_example::{AbiExample, EvenAsOpaque, TransparentAsHelper}; bitflags::bitflags! { #[frozen_abi(digest = "HhKNkaeAd7AohTb8S8sPKjAWwzxWY2DPz5FvkWmx5bSH")] @@ -673,7 +673,7 @@ mod tests { } } - impl IgnoreAsHelper for TestFlags {} + impl TransparentAsHelper for TestFlags {} // This (EvenAsOpaque) marker trait is needed for bitflags-generated types because we can't // impl AbiExample for its private type: // thread '...TestFlags_frozen_abi...' panicked at ...: diff --git a/frozen-abi/src/abi_example.rs b/frozen-abi/src/abi_example.rs index 7931b05b6a81b4..d0de2845776619 100644 --- a/frozen-abi/src/abi_example.rs +++ b/frozen-abi/src/abi_example.rs @@ -235,7 +235,7 @@ impl AbiExample for BitVec { } } -impl IgnoreAsHelper for BitVec {} +impl TransparentAsHelper for BitVec {} // This (EvenAsOpaque) marker trait is needed for BitVec because we can't impl AbiExample for its // private type: // thread '...TestBitVec_frozen_abi...' panicked at ...: @@ -515,12 +515,12 @@ impl AbiExample for IpAddr { // User-defined enums usually just need to impl this with namesake derive macro (AbiEnumVisitor). // // Note that sometimes this indirection doesn't work for various reasons. For that end, there are -// hacks with marker traits (IgnoreAsHelper/EvenAsOpaque). +// hacks with marker traits (TransparentAsHelper/EvenAsOpaque). pub trait AbiEnumVisitor: Serialize { fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult; } -pub trait IgnoreAsHelper {} +pub trait TransparentAsHelper {} pub trait EvenAsOpaque { const TYPE_NAME_MATCHER: &'static str; } @@ -538,7 +538,7 @@ impl AbiEnumVisitor for T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { info!("AbiEnumVisitor for T: {}", type_name::()); // not calling self.serialize(...) is intentional here as the most generic impl - // consider IgnoreAsHelper and EvenAsOpaque if you're stuck on this.... + // consider TransparentAsHelper and EvenAsOpaque if you're stuck on this.... T::example() .serialize(digester.create_new()) .map_err(DigestError::wrap_by_type::) @@ -558,9 +558,12 @@ impl AbiEnumVisitor for &T { // force to call self.serialize instead of T::visit_for_abi() for serialization // helper structs like ad-hoc iterator `struct`s -impl AbiEnumVisitor for &T { +impl AbiEnumVisitor for &T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { - info!("AbiEnumVisitor for (IgnoreAsHelper): {}", type_name::()); + info!( + "AbiEnumVisitor for (TransparentAsHelper): {}", + type_name::() + ); self.serialize(digester.create_new()) .map_err(DigestError::wrap_by_type::) } @@ -568,7 +571,7 @@ impl AbiEnumVisitor for &T { // force to call self.serialize instead of T::visit_for_abi() to work around the // inability of implementing AbiExample for private structs from other crates -impl AbiEnumVisitor for &T { +impl AbiEnumVisitor for &T { default fn visit_for_abi(&self, digester: &mut AbiDigester) -> DigestResult { let type_name = type_name::(); let matcher = T::TYPE_NAME_MATCHER; diff --git a/genesis/src/main.rs b/genesis/src/main.rs index 6b7efd5e664339..9edf3a49cc51e6 100644 --- a/genesis/src/main.rs +++ b/genesis/src/main.rs @@ -11,7 +11,7 @@ use { cluster_type_of, pubkey_of, pubkeys_of, unix_timestamp_from_rfc3339_datetime, }, input_validators::{ - is_pubkey_or_keypair, is_rfc3339_datetime, is_slot, is_valid_percentage, + is_pubkey, is_pubkey_or_keypair, is_rfc3339_datetime, is_slot, is_valid_percentage, }, }, solana_entry::poh::compute_hashes_per_tick, @@ -363,6 +363,15 @@ fn main() -> Result<(), Box> { "Selects the features that will be enabled for the cluster" ), ) + .arg( + Arg::with_name("deactivate_feature") + .long("deactivate-feature") + .takes_value(true) + .value_name("FEATURE_PUBKEY") + .validator(is_pubkey) + .multiple(true) + .help("Deactivate this feature in genesis. Compatible with --cluster-type development"), + ) .arg( Arg::with_name("max_genesis_archive_unpacked_size") .long("max-genesis-archive-unpacked-size") @@ -471,6 +480,14 @@ fn main() -> Result<(), Box> { let cluster_type = cluster_type_of(&matches, "cluster_type").unwrap(); + // Get the features to deactivate if provided + let features_to_deactivate = pubkeys_of(&matches, "deactivate_feature").unwrap_or_default(); + + if cluster_type != ClusterType::Development && !features_to_deactivate.is_empty() { + eprintln!("Error: The --deativate-feature argument cannot be used with --cluster-type={cluster_type:?}"); + std::process::exit(1); + } + match matches.value_of("hashes_per_tick").unwrap() { "auto" => match cluster_type { ClusterType::Development => { @@ -580,6 +597,12 @@ fn main() -> Result<(), Box> { solana_stake_program::add_genesis_accounts(&mut genesis_config); if genesis_config.cluster_type == ClusterType::Development { solana_runtime::genesis_utils::activate_all_features(&mut genesis_config); + if !features_to_deactivate.is_empty() { + solana_runtime::genesis_utils::deactivate_features( + &mut genesis_config, + &features_to_deactivate, + ); + } } if let Some(files) = matches.values_of("primordial_accounts_file") { diff --git a/geyser-plugin-interface/src/geyser_plugin_interface.rs b/geyser-plugin-interface/src/geyser_plugin_interface.rs index d9a3b00f8dc4c8..1dec992bd6f10b 100644 --- a/geyser-plugin-interface/src/geyser_plugin_interface.rs +++ b/geyser-plugin-interface/src/geyser_plugin_interface.rs @@ -8,7 +8,7 @@ use { signature::Signature, transaction::SanitizedTransaction, }, - solana_transaction_status::{Reward, TransactionStatusMeta}, + solana_transaction_status::{Reward, RewardsAndNumPartitions, TransactionStatusMeta}, std::{any::Any, error, io}, thiserror::Error, }; @@ -251,11 +251,27 @@ pub struct ReplicaBlockInfoV3<'a> { pub entry_count: u64, } +/// Extending ReplicaBlockInfo by sending RewardsAndNumPartitions. +#[derive(Clone, Debug)] +#[repr(C)] +pub struct ReplicaBlockInfoV4<'a> { + pub parent_slot: Slot, + pub parent_blockhash: &'a str, + pub slot: Slot, + pub blockhash: &'a str, + pub rewards: &'a RewardsAndNumPartitions, + pub block_time: Option, + pub block_height: Option, + pub executed_transaction_count: u64, + pub entry_count: u64, +} + #[repr(u32)] pub enum ReplicaBlockInfoVersions<'a> { V0_0_1(&'a ReplicaBlockInfo<'a>), V0_0_2(&'a ReplicaBlockInfoV2<'a>), V0_0_3(&'a ReplicaBlockInfoV3<'a>), + V0_0_4(&'a ReplicaBlockInfoV4<'a>), } /// Errors returned by plugin calls diff --git a/geyser-plugin-manager/src/block_metadata_notifier.rs b/geyser-plugin-manager/src/block_metadata_notifier.rs index 87f15f41fc0ae0..6818881922ec33 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier.rs @@ -4,13 +4,14 @@ use { geyser_plugin_manager::GeyserPluginManager, }, agave_geyser_plugin_interface::geyser_plugin_interface::{ - ReplicaBlockInfoV3, ReplicaBlockInfoVersions, + ReplicaBlockInfoV4, ReplicaBlockInfoVersions, }, log::*, solana_measure::measure::Measure, solana_metrics::*, - solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, - solana_transaction_status::{Reward, Rewards}, + solana_runtime::bank::KeyedRewardsAndNumPartitions, + solana_sdk::clock::UnixTimestamp, + solana_transaction_status::{Reward, RewardsAndNumPartitions}, std::sync::{Arc, RwLock}, }; @@ -26,7 +27,7 @@ impl BlockMetadataNotifier for BlockMetadataNotifierImpl { parent_blockhash: &str, slot: u64, blockhash: &str, - rewards: &RwLock>, + rewards: &KeyedRewardsAndNumPartitions, block_time: Option, block_height: Option, executed_transaction_count: u64, @@ -36,22 +37,23 @@ impl BlockMetadataNotifier for BlockMetadataNotifierImpl { if plugin_manager.plugins.is_empty() { return; } + let rewards = Self::build_rewards(rewards); + let block_info = Self::build_replica_block_info( + parent_slot, + parent_blockhash, + slot, + blockhash, + &rewards, + block_time, + block_height, + executed_transaction_count, + entry_count, + ); for plugin in plugin_manager.plugins.iter() { let mut measure = Measure::start("geyser-plugin-update-slot"); - let block_info = Self::build_replica_block_info( - parent_slot, - parent_blockhash, - slot, - blockhash, - &rewards, - block_time, - block_height, - executed_transaction_count, - entry_count, - ); - let block_info = ReplicaBlockInfoVersions::V0_0_3(&block_info); + let block_info = ReplicaBlockInfoVersions::V0_0_4(&block_info); match plugin.notify_block_metadata(block_info) { Err(err) => { error!( @@ -81,18 +83,21 @@ impl BlockMetadataNotifier for BlockMetadataNotifierImpl { } impl BlockMetadataNotifierImpl { - fn build_rewards(rewards: &RwLock>) -> Rewards { - let rewards = rewards.read().unwrap(); - rewards - .iter() - .map(|(pubkey, reward)| Reward { - pubkey: pubkey.to_string(), - lamports: reward.lamports, - post_balance: reward.post_balance, - reward_type: Some(reward.reward_type), - commission: reward.commission, - }) - .collect() + fn build_rewards(rewards: &KeyedRewardsAndNumPartitions) -> RewardsAndNumPartitions { + RewardsAndNumPartitions { + rewards: rewards + .keyed_rewards + .iter() + .map(|(pubkey, reward)| Reward { + pubkey: pubkey.to_string(), + lamports: reward.lamports, + post_balance: reward.post_balance, + reward_type: Some(reward.reward_type), + commission: reward.commission, + }) + .collect(), + num_partitions: rewards.num_partitions, + } } fn build_replica_block_info<'a>( @@ -100,13 +105,13 @@ impl BlockMetadataNotifierImpl { parent_blockhash: &'a str, slot: u64, blockhash: &'a str, - rewards: &'a [Reward], + rewards: &'a RewardsAndNumPartitions, block_time: Option, block_height: Option, executed_transaction_count: u64, entry_count: u64, - ) -> ReplicaBlockInfoV3<'a> { - ReplicaBlockInfoV3 { + ) -> ReplicaBlockInfoV4<'a> { + ReplicaBlockInfoV4 { parent_slot, parent_blockhash, slot, diff --git a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs index bb0ffe4c7f7513..b1f7b1ef50c7e3 100644 --- a/geyser-plugin-manager/src/block_metadata_notifier_interface.rs +++ b/geyser-plugin-manager/src/block_metadata_notifier_interface.rs @@ -1,6 +1,6 @@ use { - solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey, reward_info::RewardInfo}, - std::sync::{Arc, RwLock}, + solana_runtime::bank::KeyedRewardsAndNumPartitions, solana_sdk::clock::UnixTimestamp, + std::sync::Arc, }; /// Interface for notifying block metadata changes @@ -13,7 +13,7 @@ pub trait BlockMetadataNotifier { parent_blockhash: &str, slot: u64, blockhash: &str, - rewards: &RwLock>, + rewards: &KeyedRewardsAndNumPartitions, block_time: Option, block_height: Option, executed_transaction_count: u64, diff --git a/geyser-plugin-manager/src/geyser_plugin_manager.rs b/geyser-plugin-manager/src/geyser_plugin_manager.rs index d5521c9ad41e19..beaa799109b5d7 100644 --- a/geyser-plugin-manager/src/geyser_plugin_manager.rs +++ b/geyser-plugin-manager/src/geyser_plugin_manager.rs @@ -14,13 +14,24 @@ use { pub struct LoadedGeyserPlugin { name: String, plugin: Box, + // NOTE: While we do not access the library, the plugin we have loaded most + // certainly does. To ensure we don't SIGSEGV we must declare the library + // after the plugin so the plugin is dropped first. + // + // Furthermore, a well behaved Geyser plugin must ensure it ceases to run + // any code before returning from Drop. This means if the Geyser plugins + // spawn threads that access the Library, those threads must be `join`ed + // before the Geyser plugin returns from on_unload / Drop. + #[allow(dead_code)] + library: Library, } impl LoadedGeyserPlugin { - pub fn new(plugin: Box, name: Option) -> Self { + pub fn new(library: Library, plugin: Box, name: Option) -> Self { Self { name: name.unwrap_or_else(|| plugin.name().to_owned()), plugin, + library, } } @@ -46,14 +57,12 @@ impl DerefMut for LoadedGeyserPlugin { #[derive(Default, Debug)] pub struct GeyserPluginManager { pub plugins: Vec, - libs: Vec, } impl GeyserPluginManager { pub fn new() -> Self { GeyserPluginManager { plugins: Vec::default(), - libs: Vec::default(), } } @@ -64,10 +73,6 @@ impl GeyserPluginManager { info!("Unloading plugin for {:?}", plugin.name()); plugin.on_unload(); } - - for lib in self.libs.drain(..) { - drop(lib); - } } /// Check if there is any plugin interested in account data @@ -118,7 +123,7 @@ impl GeyserPluginManager { geyser_plugin_config_file: impl AsRef, ) -> JsonRpcResult { // First load plugin - let (mut new_plugin, new_lib, new_config_file) = + let (mut new_plugin, new_config_file) = load_plugin_from_config(geyser_plugin_config_file.as_ref()).map_err(|e| { jsonrpc_core::Error { code: ErrorCode::InvalidRequest, @@ -158,7 +163,6 @@ impl GeyserPluginManager { })?; let name = new_plugin.name().to_string(); self.plugins.push(new_plugin); - self.libs.push(new_lib); Ok(name) } @@ -208,7 +212,7 @@ impl GeyserPluginManager { // Try to load plugin, library // SAFETY: It is up to the validator to ensure this is a valid plugin library. - let (mut new_plugin, new_lib, new_parsed_config_file) = + let (mut new_plugin, new_parsed_config_file) = load_plugin_from_config(config_file.as_ref()).map_err(|err| jsonrpc_core::Error { code: ErrorCode::InvalidRequest, message: err.to_string(), @@ -238,7 +242,6 @@ impl GeyserPluginManager { // On success, push plugin and library Ok(()) => { self.plugins.push(new_plugin); - self.libs.push(new_lib); } // On failure, return error @@ -257,13 +260,9 @@ impl GeyserPluginManager { } fn _drop_plugin(&mut self, idx: usize) { - let current_lib = self.libs.remove(idx); let mut current_plugin = self.plugins.remove(idx); let name = current_plugin.name().to_string(); current_plugin.on_unload(); - // The plugin must be dropped before the library to avoid a crash. - drop(current_plugin); - drop(current_lib); info!("Unloaded plugin {name} at idx {idx}"); } } @@ -339,7 +338,7 @@ pub enum GeyserPluginManagerError { #[cfg(not(test))] pub(crate) fn load_plugin_from_config( geyser_plugin_config_file: &Path, -) -> Result<(LoadedGeyserPlugin, Library, &str), GeyserPluginManagerError> { +) -> Result<(LoadedGeyserPlugin, &str), GeyserPluginManagerError> { use std::{fs::File, io::Read, path::PathBuf}; type PluginConstructor = unsafe fn() -> *mut dyn GeyserPlugin; use libloading::Symbol; @@ -399,8 +398,7 @@ pub(crate) fn load_plugin_from_config( (Box::from_raw(plugin_raw), lib) }; Ok(( - LoadedGeyserPlugin::new(plugin, plugin_name), - lib, + LoadedGeyserPlugin::new(lib, plugin, plugin_name), config_file, )) } @@ -418,7 +416,7 @@ const TESTPLUGIN2_CONFIG: &str = "TESTPLUGIN2_CONFIG"; #[cfg(test)] pub(crate) fn load_plugin_from_config( geyser_plugin_config_file: &Path, -) -> Result<(LoadedGeyserPlugin, Library, &str), GeyserPluginManagerError> { +) -> Result<(LoadedGeyserPlugin, &str), GeyserPluginManagerError> { if geyser_plugin_config_file.ends_with(TESTPLUGIN_CONFIG) { Ok(tests::dummy_plugin_and_library( tests::TestPlugin, @@ -450,14 +448,13 @@ mod tests { pub(super) fn dummy_plugin_and_library( plugin: P, config_path: &'static str, - ) -> (LoadedGeyserPlugin, Library, &'static str) { + ) -> (LoadedGeyserPlugin, &'static str) { #[cfg(unix)] let library = libloading::os::unix::Library::this(); #[cfg(windows)] let library = libloading::os::windows::Library::this().unwrap(); ( - LoadedGeyserPlugin::new(Box::new(plugin), None), - Library::from(library), + LoadedGeyserPlugin::new(Library::from(library), Box::new(plugin), None), config_path, ) } @@ -498,11 +495,9 @@ mod tests { ); // Mock having loaded plugin (TestPlugin) - let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin, DUMMY_CONFIG); + let (mut plugin, config) = dummy_plugin_and_library(TestPlugin, DUMMY_CONFIG); plugin.on_load(config, false).unwrap(); plugin_manager_lock.plugins.push(plugin); - plugin_manager_lock.libs.push(lib); - // plugin_manager_lock.libs.push(lib); assert_eq!(plugin_manager_lock.plugins[0].name(), DUMMY_NAME); plugin_manager_lock.plugins[0].name(); @@ -533,15 +528,13 @@ mod tests { // Load two plugins // First - let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin, TESTPLUGIN_CONFIG); + let (mut plugin, config) = dummy_plugin_and_library(TestPlugin, TESTPLUGIN_CONFIG); plugin.on_load(config, false).unwrap(); plugin_manager_lock.plugins.push(plugin); - plugin_manager_lock.libs.push(lib); // Second - let (mut plugin, lib, config) = dummy_plugin_and_library(TestPlugin2, TESTPLUGIN2_CONFIG); + let (mut plugin, config) = dummy_plugin_and_library(TestPlugin2, TESTPLUGIN2_CONFIG); plugin.on_load(config, false).unwrap(); plugin_manager_lock.plugins.push(plugin); - plugin_manager_lock.libs.push(lib); // Check that both plugins are returned in the list let plugins = plugin_manager_lock.list_plugins().unwrap(); diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index b9ad46a9fda429..7720073236bad1 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -41,9 +41,11 @@ solana-metrics = { workspace = true } solana-net-utils = { workspace = true } solana-perf = { workspace = true } solana-rayon-threadlimit = { workspace = true } +solana-rpc-client = { workspace = true } solana-runtime = { workspace = true } solana-sanitize = { workspace = true } solana-sdk = { workspace = true } +solana-short-vec = { workspace = true } solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } solana-version = { workspace = true } @@ -70,6 +72,7 @@ frozen-abi = [ "solana-perf/frozen-abi", "solana-runtime/frozen-abi", "solana-sdk/frozen-abi", + "solana-short-vec/frozen-abi", "solana-version/frozen-abi", "solana-vote/frozen-abi", "solana-vote-program/frozen-abi", diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 57a1e95babc5a5..0f11489333644d 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -13,12 +13,6 @@ //! //! Bank needs to provide an interface for us to query the stake weight -#[deprecated( - since = "1.10.6", - note = "Please use `solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}` instead" -)] -#[allow(deprecated)] -pub use solana_net_utils::{MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, VALIDATOR_PORT_RANGE}; use { crate::{ cluster_info_metrics::{ @@ -57,6 +51,7 @@ use { bind_common, bind_common_in_range, bind_in_range, bind_in_range_with_config, bind_more_with_config, bind_two_in_range_with_offset_and_config, find_available_port_in_range, multi_bind_in_range, PortRange, SocketConfig, + VALIDATOR_PORT_RANGE, }, solana_perf::{ data_budget::DataBudget, @@ -77,6 +72,7 @@ use { }, solana_streamer::{ packet, + quic::DEFAULT_QUIC_ENDPOINTS, socket::SocketAddrSpace, streamer::{PacketBatchReceiver, PacketBatchSender}, }, @@ -315,7 +311,7 @@ pub(crate) type Ping = ping_pong::Ping<[u8; GOSSIP_PING_TOKEN_SIZE]>; #[cfg_attr( feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "ogEqvffeEkPpojAaSiUbCv2HdJcdXDQ1ykgYyvKvLo2") + frozen_abi(digest = "6YaMJand6tKtNLUrqvusC5QVDmVLCWYRg5LtxYNi6XN4") )] #[derive(Serialize, Deserialize, Debug)] #[allow(clippy::large_enum_variant)] @@ -479,8 +475,7 @@ impl ClusterInfo { contact_save_interval: 0, // disabled socket_addr_space, }; - me.insert_self(); - me.push_self(); + me.refresh_my_gossip_contact_info(); me } @@ -492,29 +487,6 @@ impl ClusterInfo { &self.socket_addr_space } - fn push_self(&self) { - let now = timestamp(); - let node = { - let mut node = self.my_contact_info.write().unwrap(); - node.set_wallclock(now); - node.clone() - }; - let entries: Vec<_> = [ - LegacyContactInfo::try_from(&node) - .map(CrdsData::LegacyContactInfo) - .expect("Operator must spin up node with valid contact-info"), - CrdsData::ContactInfo(node), - CrdsData::NodeInstance(self.instance.read().unwrap().with_wallclock(now)), - ] - .into_iter() - .map(|v| CrdsValue::new_signed(v, &self.keypair())) - .collect(); - self.local_message_pending_push_queue - .lock() - .unwrap() - .extend(entries); - } - fn refresh_push_active_set( &self, recycler: &PacketBatchRecycler, @@ -709,18 +681,16 @@ impl ClusterInfo { *self.keypair.write().unwrap() = new_keypair; self.my_contact_info.write().unwrap().set_pubkey(id); - self.insert_self(); + self.refresh_my_gossip_contact_info(); self.push_message(CrdsValue::new_signed( CrdsData::Version(Version::new(self.id())), &self.keypair(), )); - self.push_self(); } pub fn set_tpu(&self, tpu_addr: SocketAddr) -> Result<(), ContactInfoError> { self.my_contact_info.write().unwrap().set_tpu(tpu_addr)?; - self.insert_self(); - self.push_self(); + self.refresh_my_gossip_contact_info(); Ok(()) } @@ -729,8 +699,7 @@ impl ClusterInfo { .write() .unwrap() .set_tpu_forwards(tpu_forwards_addr)?; - self.insert_self(); - self.push_self(); + self.refresh_my_gossip_contact_info(); Ok(()) } @@ -1474,16 +1443,23 @@ impl ClusterInfo { .collect() } - fn insert_self(&self) { - let node = self.my_contact_info(); + fn refresh_my_gossip_contact_info(&self) { + let keypair: Arc = self.keypair().clone(); + let instance = self.instance.read().unwrap().with_wallclock(timestamp()); + let node = { + let mut node = self.my_contact_info.write().unwrap(); + node.set_wallclock(timestamp()); + node.clone() + }; let entries: Vec<_> = [ LegacyContactInfo::try_from(&node) .map(CrdsData::LegacyContactInfo) .expect("Operator must spin up node with valid contact-info"), CrdsData::ContactInfo(node), + CrdsData::NodeInstance(instance), ] .into_iter() - .map(|entry| CrdsValue::new_signed(entry, &self.keypair())) + .map(|entry| CrdsValue::new_signed(entry, &keypair)) .collect(); let mut gossip_crds = self.gossip.crds.write().unwrap(); for entry in entries { @@ -1544,7 +1520,7 @@ impl ClusterInfo { /// max_chunk_size. /// Note: some messages cannot be contained within that size so in the worst case this returns /// N nested Vecs with 1 item each. - fn split_gossip_messages( + pub fn split_gossip_messages( max_chunk_size: usize, data_feed: I, ) -> impl Iterator> @@ -1936,7 +1912,7 @@ impl ClusterInfo { //TODO: possibly tune this parameter //we saw a deadlock passing an self.read().unwrap().timeout into sleep if start - last_push > CRDS_GOSSIP_PULL_CRDS_TIMEOUT_MS / 2 { - self.push_self(); + self.refresh_my_gossip_contact_info(); self.refresh_push_active_set( &recycler, &stakes, @@ -2273,9 +2249,6 @@ impl ClusterInfo { } self.stats.process_pull_response_count.add_relaxed(1); self.stats.process_pull_response_len.add_relaxed(len as u64); - self.stats - .process_pull_response_timeout - .add_relaxed(pull_stats.timeout_count as u64); self.stats .process_pull_response_fail_insert .add_relaxed(pull_stats.failed_insert as u64); @@ -2288,7 +2261,7 @@ impl ClusterInfo { ( pull_stats.failed_insert + pull_stats.failed_timeout, - pull_stats.timeout_count, + pull_stats.failed_timeout, pull_stats.success, ) } @@ -2905,11 +2878,10 @@ pub struct NodeConfig { pub public_tpu_forwards_addr: Option, /// The number of TVU sockets to create pub num_tvu_sockets: NonZeroUsize, + /// The number of QUIC tpu endpoints + pub num_quic_endpoints: NonZeroUsize, } -// This will be adjusted and parameterized in follow-on PRs. -const QUIC_ENDPOINTS: usize = 1; - #[derive(Debug)] pub struct Node { pub info: ContactInfo, @@ -2921,7 +2893,15 @@ impl Node { let pubkey = solana_sdk::pubkey::new_rand(); Self::new_localhost_with_pubkey(&pubkey) } + pub fn new_localhost_with_pubkey(pubkey: &Pubkey) -> Self { + Self::new_localhost_with_pubkey_and_quic_endpoints(pubkey, DEFAULT_QUIC_ENDPOINTS) + } + + pub fn new_localhost_with_pubkey_and_quic_endpoints( + pubkey: &Pubkey, + num_quic_endpoints: usize, + ) -> Self { let localhost_ip_addr = IpAddr::V4(Ipv4Addr::LOCALHOST); let localhost_bind_addr = format!("{localhost_ip_addr:?}:0"); let unspecified_bind_addr = format!("{:?}:0", IpAddr::V4(Ipv4Addr::UNSPECIFIED)); @@ -2939,7 +2919,7 @@ impl Node { ) .unwrap(); let tpu_quic = - bind_more_with_config(tpu_quic, QUIC_ENDPOINTS, quic_config.clone()).unwrap(); + bind_more_with_config(tpu_quic, num_quic_endpoints, quic_config.clone()).unwrap(); let (gossip_port, (gossip, ip_echo)) = bind_common_in_range(localhost_ip_addr, port_range).unwrap(); let gossip_addr = SocketAddr::new(localhost_ip_addr, gossip_port); @@ -2955,7 +2935,7 @@ impl Node { ) .unwrap(); let tpu_forwards_quic = - bind_more_with_config(tpu_forwards_quic, QUIC_ENDPOINTS, quic_config).unwrap(); + bind_more_with_config(tpu_forwards_quic, num_quic_endpoints, quic_config).unwrap(); let tpu_vote = UdpSocket::bind(&localhost_bind_addr).unwrap(); let repair = UdpSocket::bind(&localhost_bind_addr).unwrap(); let rpc_port = find_available_port_in_range(localhost_ip_addr, port_range).unwrap(); @@ -3076,7 +3056,7 @@ impl Node { ) .unwrap(); let tpu_quic = - bind_more_with_config(tpu_quic, QUIC_ENDPOINTS, quic_config.clone()).unwrap(); + bind_more_with_config(tpu_quic, DEFAULT_QUIC_ENDPOINTS, quic_config.clone()).unwrap(); let ((tpu_forwards_port, tpu_forwards), (_tpu_forwards_quic_port, tpu_forwards_quic)) = bind_two_in_range_with_offset_and_config( bind_ip_addr, @@ -3087,7 +3067,7 @@ impl Node { ) .unwrap(); let tpu_forwards_quic = - bind_more_with_config(tpu_forwards_quic, QUIC_ENDPOINTS, quic_config).unwrap(); + bind_more_with_config(tpu_forwards_quic, DEFAULT_QUIC_ENDPOINTS, quic_config).unwrap(); let (tpu_vote_port, tpu_vote) = Self::bind(bind_ip_addr, port_range); let (_, retransmit_socket) = Self::bind(bind_ip_addr, port_range); let (_, repair) = Self::bind(bind_ip_addr, port_range); @@ -3159,6 +3139,7 @@ impl Node { public_tpu_addr, public_tpu_forwards_addr, num_tvu_sockets, + num_quic_endpoints, } = config; let (gossip_port, (gossip, ip_echo)) = @@ -3178,7 +3159,7 @@ impl Node { quic_config.clone(), ); let tpu_quic = - bind_more_with_config(tpu_quic, QUIC_ENDPOINTS, quic_config.clone()).unwrap(); + bind_more_with_config(tpu_quic, num_quic_endpoints.get(), quic_config.clone()).unwrap(); let (tpu_forwards_port, tpu_forwards_sockets) = multi_bind_in_range(bind_ip_addr, port_range, 8).expect("tpu_forwards multi_bind"); @@ -3191,8 +3172,12 @@ impl Node { ), quic_config.clone(), ); - let tpu_forwards_quic = - bind_more_with_config(tpu_forwards_quic, QUIC_ENDPOINTS, quic_config.clone()).unwrap(); + let tpu_forwards_quic = bind_more_with_config( + tpu_forwards_quic, + num_quic_endpoints.get(), + quic_config.clone(), + ) + .unwrap(); let (tpu_vote_port, tpu_vote_sockets) = multi_bind_in_range(bind_ip_addr, port_range, 1).expect("tpu_vote multi_bind"); @@ -3407,6 +3392,8 @@ mod tests { sync::Arc, }, }; + const DEFAULT_NUM_QUIC_ENDPOINTS: NonZeroUsize = + unsafe { NonZeroUsize::new_unchecked(DEFAULT_QUIC_ENDPOINTS) }; #[test] fn test_gossip_node() { @@ -3821,6 +3808,7 @@ mod tests { public_tpu_addr: None, public_tpu_forwards_addr: None, num_tvu_sockets: MINIMUM_NUM_TVU_SOCKETS, + num_quic_endpoints: DEFAULT_NUM_QUIC_ENDPOINTS, }; let node = Node::new_with_external_ip(&solana_sdk::pubkey::new_rand(), config); @@ -3843,6 +3831,7 @@ mod tests { public_tpu_addr: None, public_tpu_forwards_addr: None, num_tvu_sockets: MINIMUM_NUM_TVU_SOCKETS, + num_quic_endpoints: DEFAULT_NUM_QUIC_ENDPOINTS, }; let node = Node::new_with_external_ip(&solana_sdk::pubkey::new_rand(), config); @@ -4229,7 +4218,7 @@ mod tests { let mut node = cluster_info.my_contact_info.write().unwrap(); node.set_shred_version(42); } - cluster_info.push_self(); + cluster_info.refresh_my_gossip_contact_info(); cluster_info.flush_push_queue(); // Should now include both epoch slots. let slots = cluster_info.get_epoch_slots(&mut Cursor::default()); @@ -4978,7 +4967,7 @@ mod tests { let mut node = cluster_info.my_contact_info.write().unwrap(); node.set_shred_version(42); } - cluster_info.push_self(); + cluster_info.refresh_my_gossip_contact_info(); cluster_info.flush_push_queue(); // Should now include the previous heaviest_fork from the other node. diff --git a/gossip/src/cluster_info_metrics.rs b/gossip/src/cluster_info_metrics.rs index 56a6b0ab2c663c..a902130ac2b56e 100644 --- a/gossip/src/cluster_info_metrics.rs +++ b/gossip/src/cluster_info_metrics.rs @@ -153,7 +153,6 @@ pub struct GossipStats { pub(crate) process_pull_response_fail_timeout: Counter, pub(crate) process_pull_response_len: Counter, pub(crate) process_pull_response_success: Counter, - pub(crate) process_pull_response_timeout: Counter, pub(crate) process_push_message: Counter, pub(crate) prune_message_count: Counter, pub(crate) prune_message_len: Counter, @@ -311,11 +310,6 @@ pub(crate) fn submit_gossip_stats( stats.process_pull_response_success.clear(), i64 ), - ( - "process_pull_resp_timeout", - stats.process_pull_response_timeout.clear(), - i64 - ), ( "num_redundant_pull_responses", crds_stats.num_redundant_pull_responses, diff --git a/gossip/src/contact_info.rs b/gossip/src/contact_info.rs index 57e11bcd843931..9a5c1ce495813b 100644 --- a/gossip/src/contact_info.rs +++ b/gossip/src/contact_info.rs @@ -8,8 +8,9 @@ use { pubkey::Pubkey, quic::QUIC_PORT_OFFSET, rpc_port::{DEFAULT_RPC_PORT, DEFAULT_RPC_PUBSUB_PORT}, - serde_varint, short_vec, + serde_varint, }, + solana_short_vec as short_vec, solana_streamer::socket::SocketAddrSpace, static_assertions::const_assert_eq, std::{ diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 3b35129ac0d103..c708f595343968 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -200,7 +200,6 @@ pub struct ProcessPullStats { pub success: usize, pub failed_insert: usize, pub failed_timeout: usize, - pub timeout_count: usize, } pub struct CrdsGossipPull { @@ -354,7 +353,6 @@ impl CrdsGossipPull { expired_values.push(response); None } else { - stats.timeout_count += 1; stats.failed_timeout += 1; Some(response) } @@ -564,7 +562,7 @@ impl CrdsGossipPull { ); ( stats.failed_timeout + stats.failed_insert, - stats.timeout_count, + stats.failed_timeout, stats.success, ) } diff --git a/gossip/src/gossip_service.rs b/gossip/src/gossip_service.rs index b35cf1c58470bf..9e23e3eeee1c45 100644 --- a/gossip/src/gossip_service.rs +++ b/gossip/src/gossip_service.rs @@ -4,11 +4,10 @@ use { crate::{cluster_info::ClusterInfo, contact_info::ContactInfo}, crossbeam_channel::{unbounded, Sender}, rand::{thread_rng, Rng}, - solana_client::{ - connection_cache::ConnectionCache, rpc_client::RpcClient, tpu_client::TpuClientWrapper, - }, + solana_client::{connection_cache::ConnectionCache, tpu_client::TpuClientWrapper}, solana_net_utils::DEFAULT_IP_ECHO_SERVER_THREADS, solana_perf::recycler::Recycler, + solana_rpc_client::rpc_client::RpcClient, solana_runtime::bank_forks::BankForks, solana_sdk::{ pubkey::Pubkey, diff --git a/inline-spl/Cargo.toml b/inline-spl/Cargo.toml index e3ca8d06357981..82aa5907ce0aa2 100644 --- a/inline-spl/Cargo.toml +++ b/inline-spl/Cargo.toml @@ -11,7 +11,7 @@ edition = { workspace = true } [dependencies] bytemuck = { workspace = true } -solana-sdk = { workspace = true } +solana-program = { workspace = true, default-features = false } [lib] crate-type = ["lib"] @@ -21,6 +21,3 @@ name = "solana_inline_spl" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - -[build-dependencies] -rustc_version = { workspace = true } diff --git a/inline-spl/src/associated_token_account.rs b/inline-spl/src/associated_token_account.rs index 9057bee3840ee2..2048c5b743d222 100644 --- a/inline-spl/src/associated_token_account.rs +++ b/inline-spl/src/associated_token_account.rs @@ -1,6 +1,6 @@ // Partial SPL Associated Token Account declarations inlined to avoid an external dependency on the spl-associated-token-account crate -solana_sdk::declare_id!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); +solana_program::declare_id!("ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL"); pub mod program_v1_1_0 { - solana_sdk::declare_id!("NatA1Zyo48dJ7yuwR7cGURwhskKA8ywUyxb9GvG7mTC"); + solana_program::declare_id!("NatA1Zyo48dJ7yuwR7cGURwhskKA8ywUyxb9GvG7mTC"); } diff --git a/inline-spl/src/token.rs b/inline-spl/src/token.rs index a15822932c8f7c..1a495d8ca3a241 100644 --- a/inline-spl/src/token.rs +++ b/inline-spl/src/token.rs @@ -1,10 +1,10 @@ /// Partial SPL Token declarations inlined to avoid an external dependency on the spl-token crate -use solana_sdk::pubkey::{Pubkey, PUBKEY_BYTES}; +use solana_program::pubkey::{Pubkey, PUBKEY_BYTES}; -solana_sdk::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA"); +solana_program::declare_id!("TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA"); pub mod program_v3_4_0 { - solana_sdk::declare_id!("NToK4t5AQzxPNpUA84DkxgfXaVDbDQQjpHKCqsbY46B"); + solana_program::declare_id!("NToK4t5AQzxPNpUA84DkxgfXaVDbDQQjpHKCqsbY46B"); } /* @@ -72,7 +72,7 @@ impl GenericTokenAccount for Account { } pub mod native_mint { - solana_sdk::declare_id!("So11111111111111111111111111111111111111112"); + solana_program::declare_id!("So11111111111111111111111111111111111111112"); /* Mint { diff --git a/inline-spl/src/token_2022.rs b/inline-spl/src/token_2022.rs index 1787fa817645f9..4b0e0d1b3c05db 100644 --- a/inline-spl/src/token_2022.rs +++ b/inline-spl/src/token_2022.rs @@ -1,7 +1,7 @@ /// Partial SPL Token declarations inlined to avoid an external dependency on the spl-token-2022 crate use crate::token::{self, GenericTokenAccount}; -solana_sdk::declare_id!("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); +solana_program::declare_id!("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); // `spl_token_program_2022::extension::AccountType::Account` ordinal value pub const ACCOUNTTYPE_ACCOUNT: u8 = 2; diff --git a/lattice-hash/Cargo.toml b/lattice-hash/Cargo.toml new file mode 100644 index 00000000000000..44d257091f642f --- /dev/null +++ b/lattice-hash/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "solana-lattice-hash" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +base64 = { workspace = true } +blake3 = { workspace = true } +bytemuck = { workspace = true, features = ["must_cast"] } + +[dev-dependencies] +criterion = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } + +[[bench]] +name = "bench_lt_hash" +harness = false diff --git a/lattice-hash/benches/bench_lt_hash.rs b/lattice-hash/benches/bench_lt_hash.rs new file mode 100644 index 00000000000000..33106b3fc89d8e --- /dev/null +++ b/lattice-hash/benches/bench_lt_hash.rs @@ -0,0 +1,60 @@ +use { + criterion::{criterion_group, criterion_main, Criterion}, + rand::prelude::*, + rand_chacha::ChaChaRng, + solana_lattice_hash::lt_hash::LtHash, +}; + +fn new_random_lt_hash(rng: &mut impl Rng) -> LtHash { + let mut hasher = blake3::Hasher::new(); + hasher.update(&rng.gen::().to_le_bytes()); + LtHash::with(&hasher) +} + +fn bench_mix_in(c: &mut Criterion) { + let mut rng = ChaChaRng::seed_from_u64(11); + let mut lt_hash1 = new_random_lt_hash(&mut rng); + let lt_hash2 = new_random_lt_hash(&mut rng); + + c.bench_function("mix_in", |b| { + b.iter(|| lt_hash1.mix_in(<_hash2)); + }); +} + +fn bench_mix_out(c: &mut Criterion) { + let mut rng = ChaChaRng::seed_from_u64(22); + let mut lt_hash1 = new_random_lt_hash(&mut rng); + let lt_hash2 = new_random_lt_hash(&mut rng); + + c.bench_function("mix_out", |b| { + b.iter(|| lt_hash1.mix_out(<_hash2)); + }); +} + +fn bench_checksum(c: &mut Criterion) { + let mut rng = ChaChaRng::seed_from_u64(33); + let lt_hash = new_random_lt_hash(&mut rng); + + c.bench_function("checksum", |b| { + b.iter(|| lt_hash.checksum()); + }); +} + +fn bench_with(c: &mut Criterion) { + let mut rng = ChaChaRng::seed_from_u64(44); + let mut hasher = blake3::Hasher::new(); + hasher.update(&rng.gen::().to_le_bytes()); + + c.bench_function("with", |b| { + b.iter(|| LtHash::with(&hasher)); + }); +} + +criterion_group!( + benches, + bench_mix_in, + bench_mix_out, + bench_checksum, + bench_with +); +criterion_main!(benches); diff --git a/lattice-hash/src/lib.rs b/lattice-hash/src/lib.rs new file mode 100644 index 00000000000000..f40989f67d604b --- /dev/null +++ b/lattice-hash/src/lib.rs @@ -0,0 +1 @@ +pub mod lt_hash; diff --git a/lattice-hash/src/lt_hash.rs b/lattice-hash/src/lt_hash.rs new file mode 100644 index 00000000000000..aad54c728147eb --- /dev/null +++ b/lattice-hash/src/lt_hash.rs @@ -0,0 +1,360 @@ +use { + base64::{display::Base64Display, prelude::BASE64_STANDARD}, + std::fmt, +}; + +/// A 16-bit, 1024 element lattice-based incremental hash based on blake3 +// +// Developer notes: +// - Do not derive Copy because this type is large and copying will not be fast/free. +// - Do not derive Default because hashes do not have a meaningful "default". +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct LtHash(pub [u16; LtHash::NUM_ELEMENTS]); + +impl LtHash { + pub const NUM_ELEMENTS: usize = 1024; + + /// Creates a new LtHash from `hasher` + /// + /// The caller should hash in all inputs of interest prior to calling. + #[must_use] + pub fn with(hasher: &blake3::Hasher) -> Self { + let mut reader = hasher.finalize_xof(); + let mut inner = [0; Self::NUM_ELEMENTS]; + reader.fill(bytemuck::must_cast_slice_mut(inner.as_mut_slice())); + Self(inner) + } + + /// Mixes `other` into `self` + /// + /// This can be thought of as akin to 'insert' + pub fn mix_in(&mut self, other: &Self) { + for i in 0..self.0.len() { + self.0[i] = self.0[i].wrapping_add(other.0[i]); + } + } + + /// Mixes `other` out of `self` + /// + /// This can be thought of as akin to 'remove' + pub fn mix_out(&mut self, other: &Self) { + for i in 0..self.0.len() { + self.0[i] = self.0[i].wrapping_sub(other.0[i]); + } + } + + /// Computes a checksum of the LtHash + pub fn checksum(&self) -> Checksum { + let hash = blake3::hash(bytemuck::must_cast_slice(&self.0)); + Checksum(hash.into()) + } +} + +impl fmt::Display for LtHash { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let base64 = Base64Display::new(bytemuck::must_cast_slice(&self.0), &BASE64_STANDARD); + write!(f, "{base64}") + } +} + +/// A smaller "checksum" of the LtHash, useful when 2 KiB is too large +// +// Developer notes: +// - Do not derive Copy because copying may not be fast/free. +// - Do not derive Default because there is not a meaningful "default". +#[derive(Debug, Eq, PartialEq, Clone)] +pub struct Checksum(pub [u8; Checksum::NUM_ELEMENTS]); + +impl Checksum { + pub const NUM_ELEMENTS: usize = 32; +} + +impl fmt::Display for Checksum { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let base64 = Base64Display::new(&self.0, &BASE64_STANDARD); + write!(f, "{base64}") + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + rand::prelude::*, + std::ops::{Add, Sub}, + }; + + impl LtHash { + const fn new_zeroed() -> Self { + Self([0; Self::NUM_ELEMENTS]) + } + + fn new_random() -> Self { + let mut new = Self::new_zeroed(); + thread_rng().fill(&mut new.0); + new + } + } + + impl Add for LtHash { + type Output = Self; + fn add(mut self, rhs: Self) -> Self { + self.mix_in(&rhs); + self + } + } + + impl Sub for LtHash { + type Output = Self; + fn sub(mut self, rhs: Self) -> Self { + self.mix_out(&rhs); + self + } + } + + // Ensure that if you mix-in then mix-out a hash, you get the original value + #[test] + fn test_inverse() { + let a = LtHash::new_random(); + let b = LtHash::new_random(); + assert_eq!(a.clone(), a.clone() + b.clone() - b.clone()); + } + + // Ensure that mixing is commutative + #[test] + fn test_commutative() { + let a = LtHash::new_random(); + let b = LtHash::new_random(); + assert_eq!(a.clone() + b.clone(), b.clone() + a.clone()); + } + + // Ensure that mixing is associative + #[test] + fn test_associative() { + let a = LtHash::new_random(); + let b = LtHash::new_random(); + let c = LtHash::new_random(); + assert_eq!( + (a.clone() + b.clone()) + c.clone(), + a.clone() + (b.clone() + c.clone()), + ); + } + + // Ensure the correct lattice hash and checksum values are produced + #[test] + fn test_hello_world() { + let expected_hello_lt_hash = LtHash([ + 0x8fea, 0x3d16, 0x86b3, 0x9282, 0x445e, 0xc591, 0x8de5, 0xb34b, 0x6e50, 0xc1f8, 0xb74e, + 0x868a, 0x08e9, 0x62c5, 0x674a, 0x0f20, 0x92e9, 0x5f40, 0x780d, 0x595b, 0x2e9a, 0x8733, + 0xd3f6, 0x014d, 0xccfa, 0xb2fe, 0xb62f, 0xef97, 0xd53f, 0x4135, 0x1a24, 0x8c33, 0x88c6, + 0x5676, 0xb58a, 0xe5c6, 0xab24, 0xfebc, 0x1e88, 0x4e5b, 0xc91a, 0x6f33, 0x933f, 0x412d, + 0x4822, 0x82c9, 0x3695, 0x9f69, 0xa107, 0xceb1, 0xff35, 0xe0df, 0x5dbe, 0xc000, 0xa883, + 0xd2df, 0x9a9c, 0x0343, 0x37d1, 0xd74c, 0x6a0e, 0xecbc, 0x6b6e, 0x6c79, 0xac92, 0x0905, + 0xc1cf, 0xaa9d, 0x6969, 0x736e, 0xcf4c, 0x0029, 0xcf70, 0x8f05, 0xde0f, 0x3fc9, 0x1db6, + 0x6d09, 0x2e08, 0xf4aa, 0x7208, 0x2cc1, 0x8cfb, 0x276e, 0xd62e, 0x2211, 0xf254, 0x8518, + 0x4d07, 0x1594, 0xf13f, 0xab12, 0xcc65, 0x4d4a, 0xceba, 0xfe93, 0x589f, 0x9f4e, 0xe7ea, + 0x63a8, 0xe612, 0x4ced, 0x58a5, 0x43b3, 0x39f6, 0x457c, 0x474f, 0x9aff, 0x5124, 0x63f6, + 0x450d, 0x3fc2, 0x9ccf, 0xf0c6, 0xc69f, 0x2bd3, 0x7a5d, 0x9574, 0x2f2c, 0xf934, 0xcc03, + 0x9342, 0x9998, 0x0da9, 0x6dd1, 0x460d, 0x3e00, 0xcdde, 0xf14d, 0x06ec, 0x6b74, 0x9551, + 0x68c4, 0x0f94, 0x4ac6, 0xed49, 0xd886, 0x24cb, 0x2a29, 0xf4a4, 0x3a83, 0x1f81, 0xe97a, + 0xfa1e, 0xb1c5, 0xfcd5, 0xb24c, 0xdb92, 0x2b62, 0xa4f1, 0x498e, 0xf00d, 0x63be, 0x7f6e, + 0x2c33, 0xdc3e, 0xb0fb, 0xe854, 0x8ee3, 0x5d95, 0xc613, 0x670b, 0xf4aa, 0x5570, 0x04bc, + 0xf606, 0x664f, 0xe5ec, 0xd65b, 0x0ea1, 0xf37c, 0x7745, 0x809b, 0x031e, 0xed80, 0x7254, + 0x211b, 0x0cce, 0x94e1, 0x6bf6, 0x95b1, 0x49ba, 0x64c0, 0x8ec9, 0x3b27, 0x5f21, 0xafc8, + 0x3b86, 0x2ea5, 0x8c30, 0x168e, 0xc147, 0x1fd5, 0x1637, 0x88f5, 0x9321, 0x63aa, 0xaae5, + 0x33bb, 0xd983, 0xb09a, 0xf24e, 0xa1e5, 0x2b39, 0xd434, 0x7135, 0x61ed, 0x57ad, 0x5940, + 0xe53f, 0x727d, 0x4882, 0x8c44, 0xa61b, 0x1b9f, 0xcee4, 0xf462, 0xc875, 0xc019, 0x9310, + 0x7dc2, 0xf55c, 0xcb36, 0x9505, 0xebb5, 0x8a2b, 0x2b07, 0x0a36, 0x3890, 0x54c8, 0x5a76, + 0xece7, 0x96f1, 0xe3f7, 0x6d99, 0x83e4, 0xff35, 0x1d04, 0x8783, 0xbf2e, 0xb846, 0x79a9, + 0x69ba, 0xb980, 0x28f6, 0x2325, 0x7d13, 0xc44c, 0xacba, 0x134e, 0xa877, 0x6b67, 0x8027, + 0xba94, 0xf564, 0x2174, 0xf985, 0x91c8, 0xd568, 0x319f, 0x6d4e, 0xa59b, 0xd344, 0x4a67, + 0x801d, 0x7aeb, 0x20c0, 0xba23, 0x9744, 0xdd93, 0x4cc5, 0x1148, 0xdf86, 0xad19, 0x06b7, + 0xa824, 0x8e56, 0x2cab, 0x9ad1, 0x5ec0, 0xd57c, 0x0f2b, 0x8d85, 0x65e2, 0xd9c0, 0xc824, + 0x3cae, 0xed26, 0x5c7c, 0x41f9, 0x4767, 0xf730, 0xe210, 0x2926, 0xb68f, 0xcf36, 0x22b9, + 0x5f1b, 0x4ae4, 0xcdcd, 0xe69a, 0x9f4c, 0x1036, 0x8e7c, 0x48de, 0xee0f, 0xbcbd, 0x6bc7, + 0x067a, 0x35e6, 0x98fa, 0x2dcb, 0xa442, 0xbcd0, 0xa02c, 0xc746, 0x60b9, 0x479e, 0x6f56, + 0xff1a, 0xe6f0, 0xef75, 0x5dad, 0x2096, 0xbd07, 0x96e2, 0x2bc6, 0xee33, 0xd122, 0x05f7, + 0x2177, 0x2dbc, 0x729b, 0xfdf0, 0x2c18, 0x800c, 0xdb7d, 0xfb19, 0x0002, 0x3895, 0x5b72, + 0xfbe7, 0x16ce, 0x671f, 0x2175, 0x7c84, 0xc8dc, 0x9690, 0xf594, 0x31b4, 0x47f3, 0xe3f2, + 0x8911, 0x747d, 0x25c2, 0x480a, 0x16ff, 0xba50, 0x8bcb, 0xe9d7, 0xec54, 0x7df4, 0x4b9a, + 0xf4bb, 0x3100, 0x86cc, 0x62c2, 0x9b73, 0x06d7, 0x157b, 0x0922, 0xab9e, 0x83a6, 0x2f28, + 0x30ce, 0x3eff, 0x5134, 0xc9d5, 0x74ae, 0x295c, 0x9af8, 0x482a, 0x61dc, 0xe555, 0x9c7c, + 0x57de, 0xfe56, 0xd898, 0x19c6, 0x444f, 0x9636, 0x9297, 0xea84, 0xeaba, 0xce24, 0x6dc0, + 0x14c3, 0x6e7d, 0x2a65, 0x3bb5, 0x679d, 0x22a1, 0x8ea1, 0xc564, 0xca61, 0x0b2a, 0x38ea, + 0xe029, 0xcf07, 0x4280, 0xff2a, 0x8697, 0x8d30, 0x185b, 0x919a, 0x8f7c, 0x046c, 0x9390, + 0x50ab, 0xcb51, 0x2334, 0x616f, 0x998f, 0x1d2d, 0xd294, 0x74f1, 0x822c, 0xe50d, 0xdcc6, + 0xbafc, 0x7d92, 0xe202, 0xe28e, 0x2e19, 0xecaa, 0x7cf5, 0x25aa, 0x7a1a, 0x389a, 0xc189, + 0x6af0, 0x6fa3, 0x16c3, 0xa318, 0x8cb5, 0x348e, 0x627b, 0xd144, 0x7d8d, 0xc43c, 0xca5b, + 0xf4bd, 0xb174, 0x4734, 0x3520, 0xbeb9, 0x4f79, 0xa628, 0xe4bd, 0x1bc7, 0xa9f4, 0x3ad2, + 0x959b, 0xe178, 0x1ba2, 0x48bb, 0x5e79, 0xd594, 0xf41e, 0x78ce, 0x685c, 0x79d4, 0xedae, + 0xe11d, 0x2172, 0xb9ab, 0x5ca2, 0xf9ff, 0x2812, 0x66b7, 0xed6d, 0x7eff, 0x960f, 0x4844, + 0x9484, 0x504a, 0x5b29, 0xca8b, 0xdafd, 0xa6b7, 0xef3a, 0xe2e0, 0xa137, 0x1b05, 0x16c2, + 0xefbd, 0x06ac, 0xf3f1, 0xa94f, 0xcade, 0x7087, 0x2ec9, 0x6543, 0x49a1, 0xf4c3, 0x3157, + 0xed65, 0xfc85, 0xefd4, 0x30b8, 0xa5e8, 0x093f, 0xcbe2, 0x8e2b, 0x2fd4, 0xae39, 0x3e37, + 0x37c5, 0xf02f, 0xf643, 0xc03e, 0xe4d0, 0xe305, 0xfd1a, 0x698d, 0x1285, 0x19de, 0x1582, + 0x251f, 0xe136, 0x3eec, 0x862b, 0xbf4d, 0xab67, 0x0c90, 0x3eb5, 0x58d0, 0xc300, 0x7f93, + 0x03e1, 0xf2f9, 0x78fd, 0x93b6, 0x5add, 0x865a, 0x8b20, 0x89e4, 0x7585, 0x6e40, 0x5a8a, + 0x8623, 0x7335, 0xa9e1, 0xfecf, 0x83cb, 0xe9de, 0xf07c, 0x36ca, 0x5a7b, 0x9fff, 0xe419, + 0x8e48, 0xa704, 0xbcab, 0x44ae, 0x6dfa, 0x810c, 0x94f4, 0x62fb, 0xa34e, 0xa9a5, 0x1d13, + 0x98a9, 0x88ba, 0x7bc2, 0x7a59, 0x188a, 0x1855, 0xd27d, 0x6781, 0xcf08, 0xde49, 0x5588, + 0x5c8b, 0x1f4a, 0xd22b, 0x3959, 0xe754, 0xf071, 0xdfc2, 0xf352, 0x255c, 0x2d36, 0x59d0, + 0x4621, 0x1ed0, 0xa0b5, 0x457d, 0xd3d7, 0xd137, 0x10ca, 0xeeb1, 0xec30, 0x96af, 0x9be5, + 0x2181, 0xe570, 0x8a33, 0x137e, 0x861e, 0xd155, 0x950d, 0xc6e4, 0x5c1f, 0xe4dc, 0x4466, + 0x7078, 0x75a5, 0x7a51, 0x1339, 0xa1a8, 0xcb89, 0xf383, 0xabf0, 0x0170, 0xbb1d, 0xea76, + 0xe491, 0xf911, 0xdc42, 0xec04, 0x82b8, 0xeadd, 0xc890, 0x505c, 0xafa7, 0x42cb, 0xfd99, + 0x127e, 0x0724, 0xd4f9, 0x94ef, 0xf060, 0x67fe, 0x038d, 0x2876, 0xb812, 0xbf05, 0xe904, + 0x003e, 0x2ee4, 0xe8f5, 0x0a66, 0xd790, 0x3ccc, 0x28be, 0xdbc2, 0x073c, 0xd4a5, 0x904c, + 0x60ad, 0x4f67, 0x77ac, 0xae49, 0x2d6c, 0x9220, 0xde9c, 0x2a2b, 0xf99c, 0xb54f, 0x8290, + 0x2e7d, 0x0ca1, 0xf79b, 0xc6ff, 0x3e6e, 0x8eb4, 0x66b1, 0xc6e6, 0x600f, 0xda08, 0xa933, + 0x2cad, 0x308a, 0x93f2, 0x4f70, 0x72d3, 0x56e0, 0x4ddd, 0x682c, 0x589f, 0xd461, 0x06ad, + 0x4e9a, 0x1af7, 0x901c, 0xa1d4, 0xb990, 0xbbcc, 0xdcbb, 0xe46f, 0xe585, 0x9800, 0x86e6, + 0xa735, 0xac0f, 0xb666, 0xaeac, 0x6e00, 0x8b36, 0xc4ce, 0x7261, 0xf078, 0xb42a, 0x86fb, + 0xd4d8, 0x1402, 0xd7ac, 0x69c6, 0x8b29, 0x66ce, 0x512d, 0x93f8, 0x811b, 0x7b2c, 0x1a3b, + 0x88fb, 0x8ca2, 0x197e, 0xbd7b, 0x5c5c, 0xf2c3, 0x803b, 0xe9f2, 0x6fd2, 0x8c05, 0x6966, + 0x2249, 0xceab, 0xe42b, 0x8195, 0x9ddc, 0x79ee, 0x1e35, 0x3fd4, 0x6fc4, 0x9b26, 0x85b0, + 0x45a4, 0x5a6b, 0xf43b, 0x0f07, 0x3104, 0x463d, 0x710a, 0x288e, 0x0dcd, 0x8f1a, 0xa307, + 0x6790, 0x1f2e, 0x991a, 0x7fcc, 0x241a, 0x80d9, 0x9f22, 0xac19, 0x0015, 0x5690, 0x45ba, + 0x4a3f, 0x84f1, 0x01c5, 0xc2b8, 0xa512, 0xffc0, 0xebbd, 0x3c5f, 0x66dc, 0x9fdd, 0xe066, + 0x5b39, 0x2fa1, 0x9432, 0xad65, 0xf397, 0x528a, 0x0c94, 0xe646, 0xbeb5, 0xe91c, 0x7d24, + 0x305c, 0x2c7b, 0x3f93, 0x860e, 0x6e39, 0x953a, 0xb010, 0xbb1b, 0x15a2, 0x369b, 0xf840, + 0xa258, 0xb39a, 0x522b, 0xedbb, 0x7fb9, 0xb94c, 0x45d0, 0x34c0, 0xd516, 0xb52d, 0xdce1, + 0x35e4, 0x3801, 0x3e5c, 0x6826, 0x3b4e, 0xc688, 0xe612, 0x64a8, 0x7898, 0xd07f, 0xa93e, + 0x0f42, 0x9392, 0xa877, 0xd68f, 0xd947, 0x7615, 0xac5e, 0x6f1c, 0x3a42, 0x04c8, 0x993e, + 0x53e5, 0x272e, 0x3021, 0xa3d2, 0xfc24, 0xbd1e, 0xf109, 0x3b8f, 0x6566, 0x48f9, 0x4ef5, + 0x777d, 0xcbaa, 0x029e, 0x8867, 0xda07, 0xa941, 0xeb45, 0x8ad2, 0x9c78, 0xa7c9, 0xdf67, + 0x2ec0, 0x8c0b, 0x6827, 0x18ca, 0x78c2, 0xc9df, 0x8a0e, 0x2aae, 0x4e31, 0xa7ec, 0xd0e5, + 0x748c, 0x1556, 0x44ad, 0xec45, 0x9e48, 0x13d1, 0x74ae, 0x1382, 0x6fdd, 0x6d15, 0x39b9, + 0x4a8a, 0xe31d, 0x4732, 0xb215, 0x5b5e, 0x5b7a, 0x5981, 0x4e94, 0x2ccd, 0x12b6, 0x5072, + 0x4e2b, 0x078f, 0x6896, 0xec47, 0x1165, 0x2625, 0x7fd3, 0xe652, 0xb05f, 0x6fc8, 0xfcb0, + 0xf199, 0xef36, 0x89db, 0xb274, 0x3e7c, 0x9985, 0xbc7a, 0xbd5e, 0x9f19, 0x6068, 0x47f2, + 0xc8db, 0x8025, 0x3e28, 0xf0b2, 0xbad1, 0x1237, 0x3b1d, 0xe2fc, 0x24b7, 0xb8b8, 0x4d82, + 0x5adc, 0x16b4, 0x1bb7, 0xedec, 0x9f94, 0x3557, 0x4ce4, 0x9995, 0xec62, 0xce8e, 0x597e, + 0x0161, 0x12f7, 0xa4d3, 0x98c7, 0xaede, 0x7e2d, 0xaa32, 0x98e4, 0xbfd7, 0x7e5a, 0x9507, + 0x8900, 0x1f5a, 0x46f5, 0x64cf, 0x6885, 0x6977, 0x26c4, 0xd94a, 0xe454, 0xcd75, 0xeda1, + 0x476b, 0x697c, 0xe522, 0x4ab9, 0x9e88, 0xde52, 0x67e4, 0xb170, 0x3270, 0x6291, 0x2422, + 0x95bb, 0xcf27, 0x90da, 0x12b2, 0x1305, 0x029b, 0x8427, 0x52e5, 0x3e64, 0x7a88, 0xd34d, + 0x68ee, 0x6099, 0xae6d, 0x622f, 0x1237, 0x33bd, 0x0143, 0x1e1c, 0xd463, 0xda74, 0x7272, + 0xa794, 0x1714, 0x8ec6, 0xf919, 0xdb4c, 0x60d7, 0xa3ae, 0xe336, 0x12bf, 0xc469, 0xfc67, + 0x9037, 0xcb6a, 0x5ebd, 0x85b5, 0x6c11, 0xa54e, 0x7e7f, 0xec0d, 0x46e5, 0x43ec, 0x6bf5, + 0x086f, 0x9421, 0xf5f7, 0xdbdf, 0x9994, 0x072c, 0xe5d9, 0x19a5, 0x8458, 0xec68, 0xba3f, + 0x9924, + ]); + let expected_hello_checksum = Checksum([ + 79, 156, 26, 184, 156, 205, 94, 208, 182, 235, 33, 147, 111, 153, 229, 152, 207, 133, + 75, 109, 182, 198, 119, 61, 11, 81, 41, 70, 24, 87, 100, 85, + ]); + + let expected_world_lt_hash = LtHash([ + 0x56dc, 0x1d98, 0x5420, 0x810d, 0x936f, 0x1011, 0xa2ff, 0x6681, 0x637e, 0x9f2c, 0x0024, + 0xebd4, 0xe5f2, 0x3382, 0xd48b, 0x209e, 0xb031, 0xe7a5, 0x026f, 0x55f1, 0xc0cf, 0xe566, + 0x9eb0, 0x0a41, 0x3eb1, 0x3d36, 0x1b7c, 0x83ca, 0x9aa6, 0x2264, 0x8794, 0xfb85, 0x71e0, + 0x64c9, 0x227c, 0xed27, 0x09e0, 0xe5d5, 0xc8da, 0x88a5, 0x8b49, 0xf5a5, 0x3137, 0xbeed, + 0xca0e, 0x7690, 0x0570, 0xa5de, 0x4e0b, 0x4827, 0x4ae4, 0x2dad, 0x0ce4, 0xd56f, 0x9819, + 0x5d4e, 0xe93a, 0x0024, 0xb7b2, 0xc7ba, 0xa00c, 0x6709, 0x1d26, 0x53d3, 0x17b1, 0xebdf, + 0xb18f, 0xb30a, 0x3d6b, 0x1d75, 0x26a0, 0x260e, 0x6585, 0x2ba6, 0xc88d, 0x70ef, 0xf6f4, + 0x8b7f, 0xc03b, 0x285b, 0x997b, 0x933e, 0xf139, 0xe097, 0x3eff, 0xd9f7, 0x605a, 0xaeec, + 0xee8d, 0x1527, 0x3bff, 0x7081, 0xda28, 0x4c0f, 0x44b0, 0xb7d0, 0x8f9b, 0xa657, 0x8e47, + 0xa405, 0x5507, 0xe5f9, 0x52ed, 0xc4e1, 0x300c, 0x0db3, 0xbf93, 0xfddd, 0x8f21, 0x10c5, + 0x4bfd, 0x5f13, 0xe136, 0xd72f, 0x1822, 0xb424, 0x996f, 0x8fdd, 0x0703, 0xa57f, 0x7923, + 0x0755, 0x7aee, 0x168d, 0x1525, 0xf912, 0xb48d, 0xfb9e, 0xd606, 0xb2ce, 0x98ef, 0x20fb, + 0xd21a, 0x8261, 0xd6db, 0x61bf, 0xdbc6, 0x02b1, 0x45e9, 0x1ffa, 0x071f, 0xa2c0, 0x74a8, + 0xae54, 0x59e1, 0xe2dc, 0x0ec9, 0x35ac, 0xbbb0, 0x5938, 0x2210, 0xcf9e, 0x2d9f, 0x7e01, + 0x2ab7, 0xd7d8, 0x8e36, 0x6b09, 0x262c, 0xb017, 0x9b6e, 0x1455, 0x7401, 0x8a8a, 0x6491, + 0x9de9, 0x7856, 0x8fb3, 0x8fcb, 0x3c05, 0x3e74, 0x40a4, 0x682a, 0x1a67, 0x9888, 0xb949, + 0xbb75, 0x6ef9, 0xc457, 0xa83a, 0x7965, 0x159e, 0xa415, 0x1c6b, 0x1b94, 0xaa10, 0x137d, + 0xbc3a, 0xc6bd, 0xf303, 0x7758, 0xc8da, 0xf5a3, 0x5826, 0x2b48, 0x9852, 0x3033, 0xfa85, + 0x3f85, 0x9b38, 0xd409, 0x4813, 0x36b2, 0x43d7, 0xdc0a, 0xfb54, 0x22b2, 0xf1e1, 0xfe5a, + 0x44ff, 0x217c, 0x158d, 0x2041, 0x7d2a, 0x4a78, 0xfc39, 0xb7db, 0x4786, 0xf8ee, 0xc353, + 0x96c2, 0x7be2, 0xd18d, 0x0407, 0x7b0e, 0x04f5, 0x3c63, 0x415e, 0xb1d1, 0x31cc, 0x25ac, + 0x9d8a, 0x4845, 0xd2b4, 0x0cdd, 0xf9a4, 0xae8f, 0x7fe5, 0x2285, 0xa749, 0x43cb, 0x16ae, + 0x09a9, 0xbd32, 0x923c, 0x2825, 0xbe21, 0xfa66, 0x2638, 0x3435, 0x6d79, 0xdf4b, 0xaab4, + 0xf2b1, 0x08f4, 0x64fd, 0x7364, 0x14e4, 0x1457, 0xbce3, 0xe114, 0xeccb, 0x2490, 0xae79, + 0x7448, 0x6310, 0xeff6, 0x2bb1, 0x79e7, 0xf5ae, 0xab40, 0xff6d, 0x889b, 0xe5f5, 0x69ee, + 0x3298, 0x512a, 0x2573, 0xf85c, 0xc69a, 0xb142, 0x3ed0, 0x7b9d, 0xc7a5, 0xea5d, 0xd085, + 0x4e99, 0xaf95, 0x404b, 0x8aca, 0x870f, 0x098a, 0x7c9c, 0x30cf, 0x3e16, 0x9010, 0xa94b, + 0x3cca, 0x00bc, 0xddb8, 0xbf1b, 0xc61a, 0x7121, 0xd668, 0xf4ba, 0xb339, 0xa66c, 0xd5b9, + 0x557c, 0x70a0, 0x34e4, 0x43a5, 0x9c32, 0x2e94, 0xa47f, 0x0b21, 0xb594, 0xb483, 0xf823, + 0x8c56, 0x9ee9, 0x71aa, 0xf97c, 0x1c62, 0xe003, 0xcbbe, 0xca8f, 0x58e5, 0xcbee, 0x758e, + 0x5511, 0x38da, 0x7816, 0xd6a1, 0x4550, 0x09e9, 0x682f, 0xf2ca, 0x5ea1, 0x58c2, 0x78ed, + 0xb630, 0xee80, 0xa2df, 0xa890, 0x8b42, 0x83d0, 0x7ec6, 0xa87e, 0x896c, 0xf649, 0x173d, + 0x4950, 0x5d0a, 0xd1a8, 0x7376, 0x4a4a, 0xe53f, 0x447d, 0x6efd, 0xd202, 0x1da3, 0x4825, + 0xd44b, 0x4343, 0xa1a9, 0x8aac, 0x5b50, 0xc8e6, 0x8086, 0xd64f, 0xd077, 0x76f0, 0x9443, + 0xcd70, 0x950d, 0x0369, 0xf1be, 0xb771, 0x5222, 0x4b40, 0x4846, 0x3fab, 0x1d5d, 0xc69d, + 0xa200, 0xe217, 0xb8bd, 0x2ef7, 0xed6b, 0xa78c, 0xe978, 0x0e16, 0x72bf, 0x05a3, 0xdcb4, + 0x4024, 0xfca2, 0x0219, 0x0d3e, 0xa83f, 0x6127, 0x33ab, 0x3ae5, 0xe7a1, 0x2e76, 0xf6f5, + 0xbee1, 0xa712, 0xab89, 0xf058, 0x71ed, 0xd39e, 0xa383, 0x5f64, 0xe2b6, 0xbe86, 0xee47, + 0x5bd8, 0x1536, 0xc6ed, 0x1c40, 0x836d, 0xcc40, 0x18ff, 0xe30a, 0xae2c, 0xc709, 0x7b40, + 0xddf8, 0x7b72, 0x97da, 0x3f71, 0x6dba, 0x578b, 0x980a, 0x2e0e, 0xd0c0, 0x871f, 0xde9b, + 0xa821, 0x1a41, 0xbff0, 0x04cb, 0x40d6, 0x9942, 0xf717, 0x2c1a, 0x65f9, 0xae3d, 0x9e4e, + 0x3ca6, 0x2d53, 0x3f6e, 0xc886, 0x5bbc, 0x9936, 0x09de, 0xb4ab, 0xc044, 0xa7a0, 0x8c37, + 0x383a, 0x3ab9, 0xcd16, 0x33c2, 0x908e, 0x75c3, 0x51da, 0xcb86, 0x4640, 0xe2b7, 0xbc2f, + 0x1bbb, 0xc1c0, 0xc4ce, 0x821d, 0x0a46, 0x178c, 0x1291, 0xfe6e, 0xd15f, 0x8d3e, 0x9d01, + 0x79b2, 0xfe4c, 0x75eb, 0x176c, 0x6be7, 0x6efa, 0xdcc6, 0x2127, 0xef2b, 0xb83a, 0xe10b, + 0x3206, 0xc2fe, 0x1a3d, 0x62c8, 0xf55e, 0xc594, 0x81ba, 0x0188, 0x962a, 0x0f1c, 0x2489, + 0xb3ca, 0x0d9a, 0xca06, 0xfe37, 0x2cb0, 0x87a1, 0xd33b, 0x31b0, 0x1efe, 0x08f2, 0xc55a, + 0xcb8a, 0x1633, 0x9df2, 0xc468, 0xd5e3, 0x3117, 0x3333, 0x488f, 0x4a9d, 0xc68f, 0x73f9, + 0xa82d, 0xe1af, 0xeb4e, 0xe41b, 0x33f5, 0x051f, 0x7592, 0x0528, 0x7aee, 0xc3eb, 0x7010, + 0x03f4, 0xaba4, 0x3e8f, 0x4abd, 0x2b41, 0x5390, 0x21a1, 0x6dc6, 0xd828, 0xa9b4, 0xc63a, + 0x3ab3, 0x14aa, 0xdc3a, 0x513f, 0x9886, 0x0000, 0x1169, 0xbba0, 0xb2fe, 0x4b09, 0x0198, + 0xcfff, 0xb898, 0x8cfe, 0x3def, 0x0b4b, 0xc154, 0x2491, 0x28d7, 0x757f, 0x06c5, 0x98c5, + 0x2dfa, 0xc068, 0xc74d, 0x521e, 0x70d5, 0xde35, 0x7718, 0xddf8, 0xa387, 0x807d, 0x0056, + 0x697b, 0x3043, 0x4ec8, 0xc2be, 0xa867, 0x0555, 0x2d3f, 0xc9f1, 0xfe7c, 0xe851, 0x5b85, + 0x2175, 0x741d, 0x1e5b, 0xafd3, 0xf757, 0x1bd9, 0x96df, 0x03df, 0x28d6, 0xbb77, 0xd5b5, + 0x03d3, 0xc078, 0x255b, 0xee39, 0x9705, 0x7fcc, 0xf16e, 0x16ca, 0x71d1, 0x9107, 0x00a5, + 0x103d, 0x0b12, 0xea24, 0xdf09, 0x7745, 0x7c1b, 0xcdba, 0x3093, 0x742e, 0x1e4c, 0x087b, + 0x9661, 0x0f3a, 0x6c51, 0xdc63, 0xb9d8, 0xf518, 0x09e1, 0x1426, 0xb6dc, 0xc246, 0xa273, + 0x5562, 0x8fde, 0x8f0e, 0xd034, 0x6651, 0x95ec, 0x6452, 0x95d4, 0xdf84, 0x118c, 0x44ab, + 0x328b, 0xf3d1, 0xb048, 0x2081, 0x748a, 0x05ee, 0x0f9b, 0x8110, 0x46e8, 0x6476, 0x8863, + 0x9850, 0xcb94, 0x2d2e, 0xcbac, 0xce53, 0x91bb, 0xa605, 0xfe50, 0x06f5, 0xef2d, 0xbd7c, + 0x736b, 0xf371, 0x6055, 0x6ab9, 0x135f, 0xb572, 0x5eb1, 0x7a36, 0xe4d5, 0xb998, 0xa7ea, + 0x1d06, 0x1275, 0x7f89, 0x3c92, 0xe906, 0x40c1, 0x8207, 0x058e, 0xa660, 0x72cd, 0xce25, + 0xd92a, 0x7731, 0x7633, 0xc6da, 0xb213, 0x0a93, 0x30c0, 0x58d3, 0x5ac0, 0x3ce7, 0x1028, + 0x4bcd, 0x86b9, 0x7f60, 0x22a6, 0x0ce9, 0xb569, 0x8c83, 0xb5bf, 0x2dd9, 0x7bdd, 0xc4bc, + 0xce57, 0x0b0b, 0x0a9c, 0xd74a, 0x6936, 0x0e40, 0xa874, 0x02b2, 0xfe8d, 0x0c16, 0xa0e0, + 0x5b01, 0x6f18, 0x6264, 0x4e77, 0x01a0, 0x3484, 0xe5b4, 0xf0cc, 0xd30d, 0x7904, 0x8216, + 0x46dd, 0x6fc0, 0xfa77, 0x8c3e, 0x5c10, 0xf776, 0x3043, 0x23dc, 0xfffc, 0x35c0, 0x8007, + 0x7993, 0xf198, 0x94eb, 0xe9bf, 0x7cc0, 0x170d, 0xea0d, 0xa7d0, 0x3d77, 0x7d6e, 0xc8f7, + 0x9a86, 0x6462, 0xc8d2, 0x357a, 0x8fa0, 0xf201, 0x55e5, 0x5235, 0x7da1, 0x52e6, 0xcc31, + 0xbecd, 0x3343, 0x343a, 0x2b1f, 0xd19e, 0x4cc6, 0x83a2, 0x6d16, 0x9c97, 0xa61b, 0xde54, + 0x6da1, 0xa57e, 0x44a7, 0x1e84, 0x98e7, 0x0e44, 0x5494, 0xe013, 0x0ed2, 0x0b3a, 0xa2db, + 0xc93a, 0xe6a0, 0xdccd, 0x84ac, 0xc898, 0xb974, 0x3d62, 0xe4cf, 0xcbc3, 0xa7bd, 0xde59, + 0x9391, 0x5635, 0xdac1, 0xd9b6, 0x1700, 0x7b35, 0x9555, 0x648e, 0xdacd, 0xffdf, 0xdd6a, + 0x9616, 0xea2e, 0xb1a4, 0x80c1, 0xdb21, 0x1076, 0x9543, 0xc165, 0x66d8, 0x26b8, 0x7095, + 0xdf4f, 0xcf4b, 0x1cec, 0xb231, 0x4037, 0x9fa5, 0x3637, 0xf96e, 0x215a, 0x65c9, 0x4696, + 0x734a, 0x556e, 0xb47f, 0x5160, 0xbf85, 0x850b, 0x06e0, 0x8181, 0x45f7, 0x202b, 0x86d1, + 0x5de7, 0x8ecd, 0xf77c, 0x031f, 0xa330, 0x79b4, 0xf38b, 0x59a8, 0x68cf, 0xf885, 0xfc87, + 0x4054, 0xe627, 0x845e, 0xa77f, 0x8450, 0x2302, 0x86e6, 0x2d94, 0xbbf7, 0x9e54, 0x2d79, + 0x1aa6, 0x6c50, 0xaef5, 0xbd9d, 0x85f3, 0x7b05, 0x5ec3, 0x6d70, 0x3ff3, 0x62a6, 0x252a, + 0x72c4, 0x2f56, 0xf9c1, 0xadf9, 0x00ff, 0xedfc, 0xddf3, 0x439c, 0x2777, 0xb742, 0xddfd, + 0x14fc, 0xa147, 0xd950, 0x37bd, 0x6296, 0xf816, 0x29af, 0x297c, 0xbf24, 0x6f05, 0xe8a4, + 0x17f4, 0xc8ab, 0xc0d1, 0x87b2, 0xeca2, 0x1b31, 0xa20b, 0xaad8, 0xd46c, 0x636f, 0x3975, + 0x363e, 0xdc79, 0xc450, 0x507e, 0xd8d5, 0x74c9, 0x56de, 0x92bc, 0x05eb, 0x749a, 0x3d98, + 0xf26a, 0x23fe, 0x4f29, 0x7856, 0x968c, 0x8794, 0x2835, 0x8dc3, 0xa440, 0x3b7b, 0xcc28, + 0x98e6, 0x36f1, 0xf305, 0x7641, 0xe895, 0x88d7, 0xedb3, 0x934a, 0x88c2, 0x0d19, 0xd558, + 0xe4bd, 0xe365, 0x5b52, 0xd26d, 0x77be, 0xe2cc, 0xd759, 0xb890, 0x5924, 0xf681, 0xfd5f, + 0xccf7, 0xc9b7, 0x544a, 0x1fe8, 0xacd1, 0x349e, 0xf889, 0x3e38, 0x980a, 0xfcf6, 0x4aaf, + 0xc970, 0x2699, 0xce48, 0x3229, 0x148e, 0x2c20, 0x28c1, 0x7fc3, 0x1cf6, 0x080c, 0x2f85, + 0x6ed0, 0xa884, 0xd958, 0xd555, 0x480d, 0x8874, 0xe8d4, 0x7c66, 0x226f, 0xbf4f, 0xbcea, + 0x3eeb, 0xac04, 0xc774, 0xbc95, 0xa97f, 0x8382, 0x165b, 0xc178, 0x708e, 0x8be5, 0x7eb4, + 0x84ad, 0x15d5, 0x5193, 0x4114, 0xd320, 0x9add, 0x85a3, 0x8b70, 0x1be3, 0xa39d, 0xbf82, + 0x6e04, 0x3bd2, 0xdf31, 0x0741, 0xaab8, 0xd398, 0x01f4, 0xdd3a, 0x2f9d, 0x2b55, 0x6811, + 0x171f, + ]); + let expected_world_checksum = Checksum([ + 171, 53, 185, 10, 179, 49, 48, 151, 87, 43, 141, 13, 43, 152, 121, 1, 144, 7, 120, 188, + 115, 248, 214, 220, 229, 210, 175, 134, 215, 231, 18, 245, + ]); + + for (input, expected_lt_hash, expected_checksum) in [ + ("hello", expected_hello_lt_hash, expected_hello_checksum), + ("world!", expected_world_lt_hash, expected_world_checksum), + ] { + let mut hasher = blake3::Hasher::new(); + hasher.update(input.as_bytes()); + let actual_lt_hash = LtHash::with(&hasher); + assert_eq!(actual_lt_hash, expected_lt_hash); + let actual_checksum = actual_lt_hash.checksum(); + assert_eq!(actual_checksum, expected_checksum); + } + } +} diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index d9e55d9771b36c..663201514932e9 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -38,6 +38,7 @@ solana-entry = { workspace = true } solana-geyser-plugin-manager = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true, features = ["dev-context-only-utils"] } +solana-log-collector = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } diff --git a/ledger-tool/src/args.rs b/ledger-tool/src/args.rs index 94d0baa4f7b4e5..131bc91e9a7914 100644 --- a/ledger-tool/src/args.rs +++ b/ledger-tool/src/args.rs @@ -2,7 +2,8 @@ use { crate::LEDGER_TOOL_DIRECTORY, clap::{value_t, value_t_or_exit, values_t, values_t_or_exit, Arg, ArgMatches}, solana_accounts_db::{ - accounts_db::{AccountsDb, AccountsDbConfig}, + accounts_db::{AccountsDb, AccountsDbConfig, CreateAncientStorage}, + accounts_file::StorageAccess, accounts_index::{AccountsIndexConfig, IndexLimitMb}, partitioned_rewards::TestPartitionedEpochRewards, utils::create_and_canonicalize_directories, @@ -108,6 +109,20 @@ pub fn accounts_db_args<'a, 'b>() -> Box<[Arg<'a, 'b>]> { together.", ) .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_squash_storages_method") + .long("accounts-db-squash-storages-method") + .value_name("METHOD") + .takes_value(true) + .possible_values(&["pack", "append"]) + .help("Squash multiple account storage files together using this method") + .hidden(hidden_unless_forced()), + Arg::with_name("accounts_db_access_storages_method") + .long("accounts-db-access-storages-method") + .value_name("METHOD") + .takes_value(true) + .possible_values(&["mmap", "file"]) + .help("Access account storage using this method") + .hidden(hidden_unless_forced()), ] .into_boxed_slice() } @@ -271,6 +286,29 @@ pub fn get_accounts_db_config( .pop() .unwrap(); + let create_ancient_storage = arg_matches + .value_of("accounts_db_squash_storages_method") + .map(|method| match method { + "pack" => CreateAncientStorage::Pack, + "append" => CreateAncientStorage::Append, + _ => { + // clap will enforce one of the above values is given + unreachable!("invalid value given to accounts-db-squash-storages-method") + } + }) + .unwrap_or_default(); + let storage_access = arg_matches + .value_of("accounts_db_access_storages_method") + .map(|method| match method { + "mmap" => StorageAccess::Mmap, + "file" => StorageAccess::File, + _ => { + // clap will enforce one of the above values is given + unreachable!("invalid value given to accounts-db-access-storages-method") + } + }) + .unwrap_or_default(); + AccountsDbConfig { index: Some(accounts_index_config), base_working_path: Some(ledger_tool_ledger_path), @@ -282,6 +320,8 @@ pub fn get_accounts_db_config( test_partitioned_epoch_rewards, test_skip_rewrites_but_include_in_bank_hash: arg_matches .is_present("accounts_db_test_skip_rewrites"), + create_ancient_storage, + storage_access, ..AccountsDbConfig::default() } } diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index a79645e4282e08..ed7c9b8814f9ce 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -19,7 +19,7 @@ use { serde_json::json, solana_clap_utils::{ input_parsers::pubkey_of, - input_validators::{is_slot, is_valid_pubkey}, + input_validators::{is_parsable, is_slot, is_valid_pubkey}, }, solana_cli_output::{ display::println_transaction, CliBlock, CliTransaction, CliTransactionConfirmation, @@ -173,11 +173,80 @@ async fn entries( Ok(()) } -struct ShredConfig { - shred_version: u16, +enum ShredPohGenerationMode { + AllowMockPoh(MockPohConfig), + RequireEntryData, +} + +struct MockPohConfig { num_hashes_per_tick: u64, num_ticks_per_slot: u64, +} + +struct ShredConfig { + shred_version: u16, + poh_generation_mode: ShredPohGenerationMode, +} + +fn get_shred_config_from_ledger( + arg_matches: &ArgMatches, + ledger_path: &Path, + blockstore: Arc, allow_mock_poh: bool, + starting_slot: Slot, + ending_slot: Slot, +) -> ShredConfig { + let process_options = parse_process_options(ledger_path, arg_matches); + let genesis_config = open_genesis_config_by(ledger_path, arg_matches); + let LoadAndProcessLedgerOutput { bank_forks, .. } = load_and_process_ledger_or_exit( + arg_matches, + &genesis_config, + blockstore.clone(), + process_options, + None, + ); + + let bank = bank_forks.read().unwrap().working_bank(); + let shred_version = compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())); + // If mock PoH is allowed, ensure that the requested slots are in + // the same epoch as the working bank. This will ensure the values + // extracted from the Bank are accurate for the slot range + if allow_mock_poh { + let working_bank_epoch = bank.epoch(); + let epoch_schedule = bank.epoch_schedule(); + let starting_epoch = epoch_schedule.get_epoch(starting_slot); + let ending_epoch = epoch_schedule.get_epoch(ending_slot); + if starting_epoch != ending_epoch { + eprintln!( + "The specified --starting-slot and --ending-slot must be in the\ + same epoch. --starting-slot {starting_slot} is in epoch {starting_epoch},\ + but --ending-slot {ending_slot} is in epoch {ending_epoch}." + ); + exit(1); + } + if starting_epoch != working_bank_epoch { + eprintln!( + "The range of slots between --starting-slot and --ending-slot are in a \ + different epoch than the working bank. The specified range is in epoch \ + {starting_epoch}, but the working bank is in {working_bank_epoch}." + ); + exit(1); + } + + let mock_poh_config = MockPohConfig { + num_hashes_per_tick: bank.hashes_per_tick().unwrap_or(0), + num_ticks_per_slot: bank.ticks_per_slot(), + }; + ShredConfig { + shred_version, + poh_generation_mode: ShredPohGenerationMode::AllowMockPoh(mock_poh_config), + } + } else { + ShredConfig { + shred_version, + poh_generation_mode: ShredPohGenerationMode::RequireEntryData, + } + } } async fn shreds( @@ -205,34 +274,13 @@ async fn shreds( // shreds being signed with the "dummy" keyapir can still be inserted and // later read/replayed/etc let keypair = keypair_from_seed(&[0; 64])?; - let ShredConfig { - shred_version, - num_hashes_per_tick, - num_ticks_per_slot, - allow_mock_poh, - } = shred_config; for slot in slots.iter() { let block = bigtable.get_confirmed_block(*slot).await?; - let entry_summaries = match bigtable.get_entries(*slot).await { - Ok(summaries) => Some(summaries), - Err(err) => { - let err_msg = format!("Failed to get PoH entries for {slot}: {err}"); - - if allow_mock_poh { - warn!("{err_msg}. Will create mock PoH entries instead."); - } else { - return Err(format!( - "{err_msg}. Try passing --allow-mock-poh to allow \ - creation of shreds with mocked PoH entries" - ))?; - } - None - } - }; + let entry_summaries = bigtable.get_entries(*slot).await; let entries = match entry_summaries { - Some(entry_summaries) => entry_summaries + Ok(entry_summaries) => entry_summaries .enumerate() .map(|(i, entry_summary)| { let num_hashes = entry_summary.num_hashes; @@ -263,7 +311,25 @@ async fn shreds( }) }) .collect::, std::string::String>>()?, - None => { + Err(err) => { + let err_msg = format!("Failed to get PoH entries for {slot}: {err}"); + let (num_hashes_per_tick, num_ticks_per_slot) = + match shred_config.poh_generation_mode { + ShredPohGenerationMode::RequireEntryData => { + return Err(format!( + "{err_msg}. Try passing --allow-mock-poh to allow creation of \ + shreds with mocked PoH entries" + ))?; + } + ShredPohGenerationMode::AllowMockPoh(ref mock_poh_config) => { + warn!("{err_msg}. Will create mock PoH entries instead."); + ( + mock_poh_config.num_hashes_per_tick, + mock_poh_config.num_ticks_per_slot, + ) + } + }; + let num_total_ticks = ((slot - block.parent_slot) * num_ticks_per_slot) as usize; let num_total_entries = num_total_ticks + block.transactions.len(); let mut entries = Vec::with_capacity(num_total_entries); @@ -320,7 +386,7 @@ async fn shreds( } }; - let shredder = Shredder::new(*slot, block.parent_slot, 0, shred_version)?; + let shredder = Shredder::new(*slot, block.parent_slot, 0, shred_config.shred_version)?; let (data_shreds, _coding_shreds) = shredder.entries_to_shreds( &keypair, &entries, @@ -1058,6 +1124,17 @@ impl BigTableSubCommand for App<'_, '_> { the shredded block(s) to be replayable if PoH verification is \ disabled.", ), + ) + .arg( + Arg::with_name("shred_version") + .long("shred-version") + .validator(is_parsable::) + .takes_value(true) + .conflicts_with("allow_mock_poh") + .help( + "The version to encode in created shreds. Specifying this \ + value will avoid determining the value from a rebuilt Bank.", + ), ), ) .subcommand( @@ -1365,59 +1442,29 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { exit(1); } let allow_mock_poh = arg_matches.is_present("allow_mock_poh"); + let shred_version = value_t!(arg_matches, "shred_version", u16).ok(); let ledger_path = canonicalize_ledger_path(ledger_path); - let process_options = parse_process_options(&ledger_path, arg_matches); - let genesis_config = open_genesis_config_by(&ledger_path, arg_matches); let blockstore = Arc::new(crate::open_blockstore( &ledger_path, arg_matches, AccessType::Primary, )); - let LoadAndProcessLedgerOutput { bank_forks, .. } = load_and_process_ledger_or_exit( - arg_matches, - &genesis_config, - blockstore.clone(), - process_options, - None, - ); - let bank = bank_forks.read().unwrap().working_bank(); - // If mock PoH is allowed, ensure that the requested slots are in - // the same epoch as the working bank. This will ensure the values - // extracted from the Bank are accurate for the slot range - if allow_mock_poh { - let working_bank_epoch = bank.epoch(); - let epoch_schedule = bank.epoch_schedule(); - let starting_epoch = epoch_schedule.get_epoch(starting_slot); - let ending_epoch = epoch_schedule.get_epoch(ending_slot); - if starting_epoch != ending_epoch { - eprintln!( - "The specified --starting-slot and --ending-slot must be in the\ - same epoch. --starting-slot {starting_slot} is in epoch {starting_epoch},\ - but --ending-slot {ending_slot} is in epoch {ending_epoch}." - ); - exit(1); - } - if starting_epoch != working_bank_epoch { - eprintln!( - "The range of slots between --starting-slot and --ending-slot are in a \ - different epoch than the working bank. The specified range is in epoch \ - {starting_epoch}, but the working bank is in {working_bank_epoch}." - ); - exit(1); + let shred_config = if let Some(shred_version) = shred_version { + ShredConfig { + shred_version, + poh_generation_mode: ShredPohGenerationMode::RequireEntryData, } - } - - let shred_version = - compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())); - let num_hashes_per_tick = bank.hashes_per_tick().unwrap_or(0); - let num_ticks_per_slot = bank.ticks_per_slot(); - let shred_config = ShredConfig { - shred_version, - num_hashes_per_tick, - num_ticks_per_slot, - allow_mock_poh, + } else { + get_shred_config_from_ledger( + arg_matches, + &ledger_path, + blockstore.clone(), + allow_mock_poh, + starting_slot, + ending_slot, + ) }; let config = solana_storage_bigtable::LedgerStorageConfig { diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index 98a647e21f2851..152ec84ed66f70 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -25,7 +25,7 @@ use { }, use_snapshot_archives_at_startup::UseSnapshotArchivesAtStartup, }, - solana_measure::measure, + solana_measure::measure_time, solana_rpc::transaction_status_service::TransactionStatusService, solana_runtime::{ accounts_background_service::{ @@ -245,7 +245,7 @@ pub fn load_and_process_ledger( // From now on, use run/ paths in the same way as the previous account_paths. let account_paths = account_run_paths; - let (_, measure_clean_account_paths) = measure!( + let (_, measure_clean_account_paths) = measure_time!( account_paths.iter().for_each(|path| { if path.exists() { info!("Cleaning contents of account path: {}", path.display()); @@ -365,7 +365,6 @@ pub fn load_and_process_ledger( exit.clone(), abs_request_handler, process_options.accounts_db_test_hash_calculation, - None, ); let enable_rpc_transaction_history = arg_matches.is_present("enable_rpc_transaction_history"); diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index 8339c0a14d07ff..78c36acf910ca7 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -42,7 +42,7 @@ use { ProcessSlotCallback, TransactionStatusMessage, TransactionStatusSender, }, }, - solana_measure::{measure, measure::Measure}, + solana_measure::{measure::Measure, measure_time}, solana_runtime::{ bank::{ bank_hash_details::{self, SlotDetails, TransactionDetails}, @@ -94,6 +94,7 @@ use { atomic::{AtomicBool, Ordering}, Arc, Mutex, RwLock, }, + thread::JoinHandle, }, }; @@ -522,7 +523,7 @@ fn minimize_bank_for_snapshot( snapshot_slot: Slot, ending_slot: Slot, ) -> bool { - let ((transaction_account_set, possibly_incomplete), transaction_accounts_measure) = measure!( + let ((transaction_account_set, possibly_incomplete), transaction_accounts_measure) = measure_time!( blockstore.get_accounts_used_in_range(bank, snapshot_slot, ending_slot), "get transaction accounts" ); @@ -538,6 +539,209 @@ fn assert_capitalization(bank: &Bank) { assert!(bank.calculate_and_verify_capitalization(debug_verify)); } +struct SlotRecorderConfig { + transaction_recorder: Option>, + transaction_status_sender: Option, + slot_details: Arc>>, + file: File, +} + +fn setup_slot_recording( + arg_matches: &ArgMatches, +) -> (Option, Option) { + let record_slots = arg_matches.occurrences_of("record_slots") > 0; + let verify_slots = arg_matches.occurrences_of("verify_slots") > 0; + match (record_slots, verify_slots) { + (false, false) => (None, None), + (true, true) => { + // .default_value() does not work with .conflicts_with() in clap 2.33 + // .conflicts_with("verify_slots") + // https://github.com/clap-rs/clap/issues/1605#issuecomment-722326915 + // So open-code the conflicts_with() here + eprintln!( + "error: The argument '--verify-slots ' cannot be used with \ + '--record-slots '" + ); + exit(1); + } + (true, false) => { + let filename = Path::new(arg_matches.value_of_os("record_slots").unwrap()); + let file = File::create(filename).unwrap_or_else(|err| { + eprintln!("Unable to write to file: {}: {:#}", filename.display(), err); + exit(1); + }); + + let mut include_bank = false; + let mut include_tx = false; + if let Some(args) = arg_matches.values_of("record_slots_config") { + for arg in args { + match arg { + "tx" => include_tx = true, + "accounts" => include_bank = true, + _ => unreachable!(), + } + } + } + + let slot_details = Arc::new(Mutex::new(Vec::new())); + let (transaction_status_sender, transaction_recorder) = if include_tx { + let (sender, receiver) = crossbeam_channel::unbounded(); + + let slots = Arc::clone(&slot_details); + let transaction_recorder = Some(std::thread::spawn(move || { + record_transactions(receiver, slots); + })); + + ( + Some(TransactionStatusSender { sender }), + transaction_recorder, + ) + } else { + (None, None) + }; + + let slot_callback = Arc::new({ + let slots = Arc::clone(&slot_details); + move |bank: &Bank| { + let mut details = if include_bank { + bank_hash_details::SlotDetails::try_from(bank).unwrap() + } else { + bank_hash_details::SlotDetails { + slot: bank.slot(), + bank_hash: bank.hash().to_string(), + ..Default::default() + } + }; + + let mut slots = slots.lock().unwrap(); + + if let Some(recorded_slot) = slots.iter_mut().find(|f| f.slot == details.slot) { + // copy all fields except transactions + swap(&mut recorded_slot.transactions, &mut details.transactions); + + *recorded_slot = details; + } else { + slots.push(details); + } + } + }); + + ( + Some(slot_callback as ProcessSlotCallback), + Some(SlotRecorderConfig { + transaction_recorder, + transaction_status_sender, + slot_details, + file, + }), + ) + } + (false, true) => { + let filename = Path::new(arg_matches.value_of_os("verify_slots").unwrap()); + let file = File::open(filename).unwrap_or_else(|err| { + eprintln!("Unable to read file: {}: {err:#}", filename.display()); + exit(1); + }); + let reader = std::io::BufReader::new(file); + let details: bank_hash_details::BankHashDetails = serde_json::from_reader(reader) + .unwrap_or_else(|err| { + eprintln!("Error loading slots file: {err:#}"); + exit(1); + }); + + let slots = Arc::new(Mutex::new(details.bank_hash_details)); + let slot_callback = Arc::new(move |bank: &Bank| { + if slots.lock().unwrap().is_empty() { + error!( + "Expected slot: not found got slot: {} hash: {}", + bank.slot(), + bank.hash() + ); + } else { + let bank_hash_details::SlotDetails { + slot: expected_slot, + bank_hash: expected_hash, + .. + } = slots.lock().unwrap().remove(0); + if bank.slot() != expected_slot || bank.hash().to_string() != expected_hash { + error!("Expected slot: {expected_slot} hash: {expected_hash} got slot: {} hash: {}", + bank.slot(), bank.hash()); + } else { + info!("Expected slot: {expected_slot} hash: {expected_hash} correct"); + } + } + }); + + (Some(slot_callback as ProcessSlotCallback), None) + } + } +} + +fn record_transactions( + recv: crossbeam_channel::Receiver, + slots: Arc>>, +) { + for tsm in recv { + if let TransactionStatusMessage::Batch(batch) = tsm { + let slot = batch.bank.slot(); + + assert_eq!(batch.transactions.len(), batch.commit_results.len()); + + let transactions: Vec<_> = batch + .transactions + .iter() + .zip(batch.commit_results) + .zip(batch.transaction_indexes) + .map(|((tx, commit_result), index)| { + let message = tx.message(); + + let accounts: Vec = message + .account_keys() + .iter() + .map(|acc| acc.to_string()) + .collect(); + + let instructions = message + .instructions() + .iter() + .map(|ix| UiInstruction::parse(ix, &message.account_keys(), None)) + .collect(); + + let is_simple_vote_tx = tx.is_simple_vote_transaction(); + let execution_results = commit_result + .ok() + .map(|committed_tx| committed_tx.execution_details); + + TransactionDetails { + signature: tx.signature().to_string(), + accounts, + instructions, + is_simple_vote_tx, + execution_results, + index, + } + }) + .collect(); + + let mut slots = slots.lock().unwrap(); + + if let Some(recorded_slot) = slots.iter_mut().find(|f| f.slot == slot) { + recorded_slot.transactions.extend(transactions); + } else { + slots.push(SlotDetails { + slot, + transactions, + ..Default::default() + }); + } + } + } + + for slot in slots.lock().unwrap().iter_mut() { + slot.transactions.sort_by(|a, b| a.index.cmp(&b.index)); + } +} + #[cfg(not(target_env = "msvc"))] use jemallocator::Jemalloc; @@ -665,9 +869,11 @@ fn main() { let matches = App::new(crate_name!()) .about(crate_description!()) .version(solana_version::version!()) - .setting(AppSettings::InferSubcommands) + .global_setting(AppSettings::ColoredHelp) + .global_setting(AppSettings::InferSubcommands) + .global_setting(AppSettings::UnifiedHelpMessage) + .global_setting(AppSettings::VersionlessSubcommands) .setting(AppSettings::SubcommandRequiredElseHelp) - .setting(AppSettings::VersionlessSubcommands) .arg( Arg::with_name("ledger_path") .short("l") @@ -990,6 +1196,7 @@ fn main() { .subcommand( SubCommand::with_name("create-snapshot") .about("Create a new ledger snapshot") + .arg(&os_memory_stats_reporting_arg) .arg(&load_genesis_config_arg) .args(&accounts_db_config_args) .args(&snapshot_config_args) @@ -1467,147 +1674,11 @@ fn main() { ); let mut process_options = parse_process_options(&ledger_path, arg_matches); - - // .default_value() does not work with .conflicts_with() in clap 2.33 - // .conflicts_with("verify_slots") - // https://github.com/clap-rs/clap/issues/1605#issuecomment-722326915 - // So open-code the conflicts_with() here - if arg_matches.occurrences_of("record_slots") > 0 - && arg_matches.occurrences_of("verify_slots") > 0 - { - eprintln!( - "error: The argument '--verify-slots ' cannot be used with '--record-slots '" - ); - exit(1); - } - - let mut transaction_status_sender = None; - let mut tx_receiver = None; - - let (slot_callback, record_slots_file, recorded_slots) = if arg_matches - .occurrences_of("record_slots") - > 0 - { - let filename = Path::new(arg_matches.value_of_os("record_slots").unwrap()); - - let file = File::create(filename).unwrap_or_else(|err| { - eprintln!("Unable to write to file: {}: {:#}", filename.display(), err); - exit(1); - }); - - let mut include_bank = false; - let mut include_tx = false; - - if let Some(args) = arg_matches.values_of("record_slots_config") { - for arg in args { - match arg { - "tx" => include_tx = true, - "accounts" => include_bank = true, - _ => unreachable!(), - } - } - } - - let slot_hashes = Arc::new(Mutex::new(Vec::new())); - - if include_tx { - let (sender, receiver) = crossbeam_channel::unbounded(); - - transaction_status_sender = Some(TransactionStatusSender { sender }); - - let slots = Arc::clone(&slot_hashes); - - tx_receiver = Some(std::thread::spawn(move || { - record_transactions(receiver, slots); - })); - } - - let slot_callback = Arc::new({ - let slots = Arc::clone(&slot_hashes); - move |bank: &Bank| { - let mut details = if include_bank { - bank_hash_details::SlotDetails::try_from(bank).unwrap() - } else { - bank_hash_details::SlotDetails { - slot: bank.slot(), - bank_hash: bank.hash().to_string(), - ..Default::default() - } - }; - - let mut slots = slots.lock().unwrap(); - - if let Some(recorded_slot) = - slots.iter_mut().find(|f| f.slot == details.slot) - { - // copy all fields except transactions - swap( - &mut recorded_slot.transactions, - &mut details.transactions, - ); - - *recorded_slot = details; - } else { - slots.push(details); - } - } - }); - - ( - Some(slot_callback as ProcessSlotCallback), - Some(file), - Some(slot_hashes), - ) - } else if arg_matches.occurrences_of("verify_slots") > 0 { - let filename = Path::new(arg_matches.value_of_os("verify_slots").unwrap()); - - let file = File::open(filename).unwrap_or_else(|err| { - eprintln!("Unable to read file: {}: {err:#}", filename.display()); - exit(1); - }); - - let reader = std::io::BufReader::new(file); - - let details: bank_hash_details::BankHashDetails = - serde_json::from_reader(reader).unwrap_or_else(|err| { - eprintln!("Error loading slots file: {err:#}"); - exit(1); - }); - - let slots = Arc::new(Mutex::new(details.bank_hash_details)); - - let slot_callback = Arc::new(move |bank: &Bank| { - if slots.lock().unwrap().is_empty() { - error!( - "Expected slot: not found got slot: {} hash: {}", - bank.slot(), - bank.hash() - ); - } else { - let bank_hash_details::SlotDetails { - slot: expected_slot, - bank_hash: expected_hash, - .. - } = slots.lock().unwrap().remove(0); - if bank.slot() != expected_slot - || bank.hash().to_string() != expected_hash - { - error!("Expected slot: {expected_slot} hash: {expected_hash} got slot: {} hash: {}", - bank.slot(), bank.hash()); - } else { - info!( - "Expected slot: {expected_slot} hash: {expected_hash} correct" - ); - } - } - }); - - (Some(slot_callback as ProcessSlotCallback), None, None) - } else { - (None, None, None) - }; - + let (slot_callback, slot_recorder_config) = setup_slot_recording(arg_matches); process_options.slot_callback = slot_callback; + let transaction_status_sender = slot_recorder_config + .as_ref() + .and_then(|config| config.transaction_status_sender.clone()); let output_format = OutputFormat::from_matches(arg_matches, "output_format", false); @@ -1651,22 +1722,26 @@ fn main() { .ok(); } - if let Some(tx_receiver) = tx_receiver { - tx_receiver.join().unwrap(); - } - - if let Some(recorded_slots_file) = record_slots_file { - if let Ok(recorded_slots) = recorded_slots.clone().unwrap().lock() { - let bank_hashes = - bank_hash_details::BankHashDetails::new(recorded_slots.to_vec()); - - // writing the json file ends up with a syscall for each number, comma, indentation etc. - // use BufWriter to speed things up + if let Some(mut slot_recorder_config) = slot_recorder_config { + // Drop transaction_status_sender to break transaction_recorder + // out of its' recieve loop + let transaction_status_sender = + slot_recorder_config.transaction_status_sender.take(); + drop(transaction_status_sender); + if let Some(transaction_recorder) = + slot_recorder_config.transaction_recorder + { + transaction_recorder.join().unwrap(); + } - let writer = std::io::BufWriter::new(recorded_slots_file); + let slot_details = slot_recorder_config.slot_details.lock().unwrap(); + let bank_hashes = + bank_hash_details::BankHashDetails::new(slot_details.to_vec()); - serde_json::to_writer_pretty(writer, &bank_hashes).unwrap(); - } + // writing the json file ends up with a syscall for each number, comma, indentation etc. + // use BufWriter to speed things up + let writer = std::io::BufWriter::new(slot_recorder_config.file); + serde_json::to_writer_pretty(writer, &bank_hashes).unwrap(); } exit_signal.store(true, Ordering::Relaxed); @@ -1716,6 +1791,21 @@ fn main() { } } ("create-snapshot", Some(arg_matches)) => { + let exit_signal = Arc::new(AtomicBool::new(false)); + let system_monitor_service = arg_matches + .is_present("os_memory_stats_reporting") + .then(|| { + SystemMonitorService::new( + Arc::clone(&exit_signal), + SystemMonitorStatsReportConfig { + report_os_memory_stats: true, + report_os_network_stats: false, + report_os_cpu_stats: false, + report_os_disk_stats: false, + }, + ) + }); + let is_incremental = arg_matches.is_present("incremental"); let is_minimized = arg_matches.is_present("minimized"); let output_directory = value_t!(arg_matches, "output_directory", PathBuf) @@ -1871,11 +1961,6 @@ fn main() { process_options, None, ); - // Snapshot creation will implicitly perform AccountsDb - // flush and clean operations. These operations cannot be - // run concurrently, so ensure ABS is stopped to avoid that - // possibility. - accounts_background_service.join().unwrap(); let mut bank = bank_forks .read() @@ -1886,6 +1971,24 @@ fn main() { exit(1); }); + // Snapshot creation will implicitly perform AccountsDb + // flush and clean operations. These operations cannot be + // run concurrently, so ensure ABS is stopped to avoid that + // possibility. + accounts_background_service.join().unwrap(); + + // Similar to waiting for ABS to stop, we also wait for the initial startup + // verification to complete. The startup verification runs in the background + // and verifies the snapshot's accounts hashes are correct. We only want a + // single accounts hash calculation to run at a time, and since snapshot + // creation below will calculate the accounts hash, we wait for the startup + // verification to complete before proceeding. + bank.rc + .accounts + .accounts_db + .verify_accounts_hash_in_bg + .wait_for_complete(); + let child_bank_required = rent_burn_percentage.is_ok() || hashes_per_tick.is_some() || remove_stake_accounts @@ -2249,6 +2352,11 @@ fn main() { "Shred version: {}", compute_shred_version(&genesis_config.hash(), Some(&bank.hard_forks())) ); + + if let Some(system_monitor_service) = system_monitor_service { + exit_signal.store(true, Ordering::Relaxed); + system_monitor_service.join().unwrap(); + } } ("accounts", Some(arg_matches)) => { let process_options = parse_process_options(&ledger_path, arg_matches); @@ -2294,7 +2402,7 @@ fn main() { let accounts_streamer = AccountsOutputStreamer::new(bank, output_format, config); - let (_, scan_time) = measure!( + let (_, scan_time) = measure_time!( accounts_streamer .output() .map_err(|err| error!("Error while outputting accounts: {err}")), @@ -2839,64 +2947,3 @@ fn main() { measure_total_execution_time.stop(); info!("{}", measure_total_execution_time); } - -fn record_transactions( - recv: crossbeam_channel::Receiver, - slots: Arc>>, -) { - for tsm in recv { - if let TransactionStatusMessage::Batch(batch) = tsm { - let slot = batch.bank.slot(); - - assert_eq!(batch.transactions.len(), batch.execution_results.len()); - - let transactions: Vec<_> = batch - .transactions - .iter() - .zip(batch.execution_results) - .zip(batch.transaction_indexes) - .map(|((tx, execution_results), index)| { - let message = tx.message(); - - let accounts: Vec = message - .account_keys() - .iter() - .map(|acc| acc.to_string()) - .collect(); - - let instructions = message - .instructions() - .iter() - .map(|ix| UiInstruction::parse(ix, &message.account_keys(), None)) - .collect(); - - let is_simple_vote_tx = tx.is_simple_vote_transaction(); - - TransactionDetails { - accounts, - instructions, - is_simple_vote_tx, - execution_results, - index, - } - }) - .collect(); - - let mut slots = slots.lock().unwrap(); - - if let Some(recorded_slot) = slots.iter_mut().find(|f| f.slot == slot) { - recorded_slot.transactions.extend(transactions); - } else { - slots.push(SlotDetails { - slot, - transactions, - ..Default::default() - }); - } - } - } - - for slot in slots.lock().unwrap().iter_mut() { - slot.transactions.sort_by(|a, b| a.index.cmp(&b.index)); - } -} diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 8ac69167384bf2..61ab02470b5f05 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -31,6 +31,7 @@ mockall = { workspace = true } num_cpus = { workspace = true } num_enum = { workspace = true } prost = { workspace = true } +qualifier_attr = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } rayon = { workspace = true } @@ -57,6 +58,7 @@ solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-storage-proto = { workspace = true } solana-svm = { workspace = true } +solana-timings = { workspace = true } solana-transaction-status = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } diff --git a/ledger/benches/blockstore_processor.rs b/ledger/benches/blockstore_processor.rs index 489da8f432cfd2..e72f75186ca6f2 100644 --- a/ledger/benches/blockstore_processor.rs +++ b/ledger/benches/blockstore_processor.rs @@ -10,9 +10,8 @@ use { blockstore_processor::{execute_batch, TransactionBatchWithIndexes}, genesis_utils::{create_genesis_config, GenesisConfigInfo}, }, - solana_program_runtime::timings::ExecuteTimings, solana_runtime::{ - bank::Bank, prioritization_fee_cache::PrioritizationFeeCache, + bank::Bank, bank_forks::BankForks, prioritization_fee_cache::PrioritizationFeeCache, transaction_batch::TransactionBatch, }, solana_sdk::{ @@ -24,7 +23,11 @@ use { system_program, system_transaction, transaction::SanitizedTransaction, }, - std::{borrow::Cow, sync::Arc}, + solana_timings::ExecuteTimings, + std::{ + borrow::Cow, + sync::{Arc, RwLock}, + }, test::Bencher, }; @@ -74,6 +77,7 @@ fn create_transactions(bank: &Bank, num: usize) -> Vec { struct BenchFrame { bank: Arc, + _bank_forks: Arc>, prioritization_fee_cache: PrioritizationFeeCache, } @@ -100,10 +104,11 @@ fn setup(apply_cost_tracker_during_replay: bool) -> BenchFrame { bank.write_cost_tracker() .unwrap() .set_limits(u64::MAX, u64::MAX, u64::MAX); - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); let prioritization_fee_cache = PrioritizationFeeCache::default(); BenchFrame { bank, + _bank_forks: bank_forks, prioritization_fee_cache, } } @@ -124,6 +129,7 @@ fn bench_execute_batch( let BenchFrame { bank, + _bank_forks, prioritization_fee_cache, } = setup(apply_cost_tracker_during_replay); let transactions = create_transactions(&bank, 2_usize.pow(20)); diff --git a/ledger/src/bank_forks_utils.rs b/ledger/src/bank_forks_utils.rs index 6a48ebd2cebc9d..10a82e25ee2a53 100644 --- a/ledger/src/bank_forks_utils.rs +++ b/ledger/src/bank_forks_utils.rs @@ -336,6 +336,14 @@ fn bank_forks_from_snapshot( bank }; + // We must inform accounts-db of the latest full snapshot slot, which is used by the background + // processes to handle zero lamport accounts. Since we've now successfully loaded the bank + // from snapshots, this is a good time to do that update. + bank.rc + .accounts + .accounts_db + .set_latest_full_snapshot_slot(full_snapshot_archive_info.slot()); + let full_snapshot_hash = FullSnapshotHash(( full_snapshot_archive_info.slot(), *full_snapshot_archive_info.hash(), diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 0342a323905876..21d5418b7c6038 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -15,6 +15,7 @@ use { AccessType, BlockstoreOptions, LedgerColumnOptions, BLOCKSTORE_DIRECTORY_ROCKS_FIFO, BLOCKSTORE_DIRECTORY_ROCKS_LEVEL, }, + blockstore_processor::BlockstoreProcessorError, leader_schedule_cache::LeaderScheduleCache, next_slots_iterator::NextSlotsIterator, shred::{ @@ -47,6 +48,7 @@ use { account::ReadableAccount, address_lookup_table::state::AddressLookupTable, clock::{Slot, UnixTimestamp, DEFAULT_TICKS_PER_SECOND}, + feature_set::FeatureSet, genesis_config::{GenesisConfig, DEFAULT_GENESIS_ARCHIVE, DEFAULT_GENESIS_FILE}, hash::Hash, pubkey::Pubkey, @@ -170,6 +172,31 @@ impl AsRef for WorkingEntry { } } +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct LastFECSetCheckResults { + last_fec_set_merkle_root: Option, + is_retransmitter_signed: bool, +} + +impl LastFECSetCheckResults { + fn get_last_fec_set_merkle_root( + &self, + feature_set: &FeatureSet, + ) -> std::result::Result, BlockstoreProcessorError> { + if feature_set.is_active(&solana_sdk::feature_set::vote_only_full_fec_sets::id()) + && self.last_fec_set_merkle_root.is_none() + { + return Err(BlockstoreProcessorError::IncompleteFinalFecSet); + } else if feature_set + .is_active(&solana_sdk::feature_set::vote_only_retransmitter_signed_fec_sets::id()) + && !self.is_retransmitter_signed + { + return Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet); + } + Ok(self.last_fec_set_merkle_root) + } +} + pub struct InsertResults { completed_data_set_infos: Vec, duplicate_shreds: Vec, @@ -2412,7 +2439,7 @@ impl Blockstore { DEFAULT_TICKS_PER_SECOND * timestamp().saturating_sub(first_timestamp) / 1000; // Seek to the first shred with index >= start_index - db_iterator.seek(&C::key((slot, start_index))); + db_iterator.seek(C::key((slot, start_index))); // The index of the first missing shred in the slot let mut prev_index = start_index; @@ -3680,15 +3707,53 @@ impl Blockstore { self.get_slot_entries_in_block(slot, vec![(start_index, end_index)], slot_meta) } - /// Returns true if the last `DATA_SHREDS_PER_FEC_BLOCK` data shreds of a - /// slot have the same merkle root, indicating they are a part of the same - /// FEC set. - /// Will fail if: + /// Performs checks on the last fec set of a replayed slot, and returns the block_id. + /// Returns: + /// - BlockstoreProcessorError::IncompleteFinalFecSet + /// if the last fec set is not full + /// - BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet + /// if the last fec set is not signed by retransmitters + pub fn check_last_fec_set_and_get_block_id( + &self, + slot: Slot, + bank_hash: Hash, + feature_set: &FeatureSet, + ) -> std::result::Result, BlockstoreProcessorError> { + let results = self.check_last_fec_set(slot); + let Ok(results) = results else { + warn!( + "Unable to check the last fec set for slot {} {}, + marking as dead: {results:?}", + slot, bank_hash, + ); + if feature_set.is_active(&solana_sdk::feature_set::vote_only_full_fec_sets::id()) { + return Err(BlockstoreProcessorError::IncompleteFinalFecSet); + } + return Ok(None); + }; + // Update metrics + if results.last_fec_set_merkle_root.is_none() { + datapoint_warn!("incomplete_final_fec_set", ("slot", slot, i64),); + } + // Return block id / error based on feature flags + results.get_last_fec_set_merkle_root(feature_set) + } + + /// Performs checks on the last FEC set for this slot. + /// - `block_id` will be `Some(mr)` if the last `DATA_SHREDS_PER_FEC_BLOCK` data shreds of + /// `slot` have the same merkle root of `mr`, indicating they are a part of the same FEC set. + /// This indicates that the last FEC set is sufficiently sized. + /// - `is_retransmitter_signed` will be true if the last `DATA_SHREDS_PER_FEC_BLOCK` + /// data shreds of `slot` are of the retransmitter variant. Since we already discard + /// invalid signatures on ingestion, this indicates that the last FEC set is properly + /// signed by retransmitters. + /// + /// Will error if: /// - Slot meta is missing /// - LAST_SHRED_IN_SLOT flag has not been received /// - There are missing shreds in the last fec set /// - The block contains legacy shreds - pub fn is_last_fec_set_full(&self, slot: Slot) -> Result { + fn check_last_fec_set(&self, slot: Slot) -> Result { // We need to check if the last FEC set index contains at least `DATA_SHREDS_PER_FEC_BLOCK` data shreds. // We compare the merkle roots of the last `DATA_SHREDS_PER_FEC_BLOCK` shreds in this block. // Since the merkle root contains the fec_set_index, if all of them match, we know that the last fec set has @@ -3703,11 +3768,14 @@ impl Blockstore { const_assert_eq!(MINIMUM_INDEX, 31); let Some(start_index) = last_shred_index.checked_sub(MINIMUM_INDEX) else { warn!("Slot {slot} has only {} shreds, fewer than the {DATA_SHREDS_PER_FEC_BLOCK} required", last_shred_index + 1); - return Ok(false); + return Ok(LastFECSetCheckResults { + last_fec_set_merkle_root: None, + is_retransmitter_signed: false, + }); }; let keys = (start_index..=last_shred_index).map(|index| (slot, index)); - let last_merkle_roots: Vec = self + let deduped_shred_checks: Vec<(Hash, bool)> = self .data_shred_cf .multi_get_bytes(keys) .into_iter() @@ -3718,17 +3786,34 @@ impl Blockstore { warn!("Missing shred for {slot} index {shred_index}"); BlockstoreError::MissingShred(slot, shred_index) })?; - shred::layout::get_merkle_root(&shred_bytes).ok_or_else(|| { - let shred_index = start_index + u64::try_from(offset).unwrap(); - warn!("Found legacy shred for {slot}, index {shred_index}"); - BlockstoreError::LegacyShred(slot, shred_index) - }) + let is_retransmitter_signed = + shred::layout::is_retransmitter_signed_variant(&shred_bytes).map_err(|_| { + let shred_index = start_index + u64::try_from(offset).unwrap(); + warn!("Found legacy shred for {slot}, index {shred_index}"); + BlockstoreError::LegacyShred(slot, shred_index) + })?; + let merkle_root = + shred::layout::get_merkle_root(&shred_bytes).ok_or_else(|| { + let shred_index = start_index + u64::try_from(offset).unwrap(); + warn!("Unable to read merkle root for {slot}, index {shred_index}"); + BlockstoreError::MissingMerkleRoot(slot, shred_index) + })?; + Ok((merkle_root, is_retransmitter_signed)) }) .dedup_by(|res1, res2| res1.as_ref().ok() == res2.as_ref().ok()) - .collect::>>()?; + .collect::>>()?; - // After the dedup there should be exactly one Hash left if the shreds were part of the same FEC set. - Ok(last_merkle_roots.len() == 1) + // After the dedup there should be exactly one Hash left and one true value + let &[(block_id, is_retransmitter_signed)] = deduped_shred_checks.as_slice() else { + return Ok(LastFECSetCheckResults { + last_fec_set_merkle_root: None, + is_retransmitter_signed: false, + }); + }; + Ok(LastFECSetCheckResults { + last_fec_set_merkle_root: Some(block_id), + is_retransmitter_signed, + }) } /// Returns a mapping from each elements of `slots` to a list of the @@ -5224,6 +5309,7 @@ pub mod tests { solana_runtime::bank::{Bank, RewardType}, solana_sdk::{ clock::{DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT}, + feature_set::{vote_only_full_fec_sets, vote_only_retransmitter_signed_fec_sets}, hash::{self, hash, Hash}, instruction::CompiledInstruction, message::v0::LoadedAddresses, @@ -11870,7 +11956,7 @@ pub mod tests { } #[test] - fn test_is_last_fec_set_full() { + fn test_check_last_fec_set() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); @@ -11887,7 +11973,7 @@ pub mod tests { // Missing slot meta assert_matches!( - blockstore.is_last_fec_set_full(0), + blockstore.check_last_fec_set(0), Err(BlockstoreError::SlotUnavailable) ); @@ -11902,7 +11988,7 @@ pub mod tests { let meta = blockstore.meta(slot).unwrap().unwrap(); assert!(meta.last_index.is_none()); assert_matches!( - blockstore.is_last_fec_set_full(slot), + blockstore.check_last_fec_set(slot), Err(BlockstoreError::UnknownLastIndex(_)) ); blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap(); @@ -11914,14 +12000,17 @@ pub mod tests { let meta = blockstore.meta(slot).unwrap().unwrap(); assert_eq!(meta.last_index, Some(total_shreds - 1)); assert_matches!( - blockstore.is_last_fec_set_full(slot), + blockstore.check_last_fec_set(slot), Err(BlockstoreError::MissingShred(_, _)) ); blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap(); // Full slot + let block_id = data_shreds[0].merkle_root().unwrap(); blockstore.insert_shreds(data_shreds, None, false).unwrap(); - assert!(blockstore.is_last_fec_set_full(slot).unwrap()); + let results = blockstore.check_last_fec_set(slot).unwrap(); + assert_eq!(results.last_fec_set_merkle_root, Some(block_id)); + assert!(results.is_retransmitter_signed); blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap(); // Slot has less than DATA_SHREDS_PER_FEC_BLOCK shreds in total @@ -11959,7 +12048,9 @@ pub mod tests { let mut slot_meta = blockstore.meta(slot).unwrap().unwrap(); slot_meta.last_index = Some(last_index as u64); blockstore.put_meta(slot, &slot_meta).unwrap(); - assert!(!blockstore.is_last_fec_set_full(slot).unwrap()); + let results = blockstore.check_last_fec_set(slot).unwrap(); + assert!(results.last_fec_set_merkle_root.is_none()); + assert!(!results.is_retransmitter_signed); blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap(); // Slot has more than DATA_SHREDS_PER_FEC_BLOCK in total, but last FEC set has less @@ -11998,6 +12089,123 @@ pub mod tests { let mut slot_meta = blockstore.meta(slot).unwrap().unwrap(); slot_meta.last_index = Some(last_index as u64); blockstore.put_meta(slot, &slot_meta).unwrap(); - assert!(!blockstore.is_last_fec_set_full(slot).unwrap()); + let results = blockstore.check_last_fec_set(slot).unwrap(); + assert!(results.last_fec_set_merkle_root.is_none()); + assert!(!results.is_retransmitter_signed); + blockstore.run_purge(slot, slot, PurgeType::Exact).unwrap(); + + // Slot is full, but does not contain retransmitter shreds + let fec_set_index = 0; + let (first_data_shreds, _, _) = + setup_erasure_shreds_with_index_and_chained_merkle_and_last_in_slot( + slot, + parent_slot, + 200, + fec_set_index, + // Do not set merkle root, so shreds are not signed + None, + true, + ); + assert!(first_data_shreds.len() > DATA_SHREDS_PER_FEC_BLOCK); + let block_id = first_data_shreds[0].merkle_root().unwrap(); + blockstore + .insert_shreds(first_data_shreds, None, false) + .unwrap(); + let results = blockstore.check_last_fec_set(slot).unwrap(); + assert_eq!(results.last_fec_set_merkle_root, Some(block_id)); + assert!(!results.is_retransmitter_signed); + } + + #[test] + fn test_last_fec_set_check_results() { + let enabled_feature_set = FeatureSet::all_enabled(); + let disabled_feature_set = FeatureSet::default(); + let mut full_only = FeatureSet::default(); + full_only.activate(&vote_only_full_fec_sets::id(), 0); + let mut retransmitter_only = FeatureSet::default(); + retransmitter_only.activate(&vote_only_retransmitter_signed_fec_sets::id(), 0); + + let results = LastFECSetCheckResults { + last_fec_set_merkle_root: None, + is_retransmitter_signed: false, + }; + assert_matches!( + results.get_last_fec_set_merkle_root(&enabled_feature_set), + Err(BlockstoreProcessorError::IncompleteFinalFecSet) + ); + assert_matches!( + results.get_last_fec_set_merkle_root(&full_only), + Err(BlockstoreProcessorError::IncompleteFinalFecSet) + ); + assert_matches!( + results.get_last_fec_set_merkle_root(&retransmitter_only), + Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet) + ); + assert!(results + .get_last_fec_set_merkle_root(&disabled_feature_set) + .unwrap() + .is_none()); + + let block_id = Hash::new_unique(); + let results = LastFECSetCheckResults { + last_fec_set_merkle_root: Some(block_id), + is_retransmitter_signed: false, + }; + assert_matches!( + results.get_last_fec_set_merkle_root(&enabled_feature_set), + Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet) + ); + assert_eq!( + results.get_last_fec_set_merkle_root(&full_only).unwrap(), + Some(block_id) + ); + assert_matches!( + results.get_last_fec_set_merkle_root(&retransmitter_only), + Err(BlockstoreProcessorError::InvalidRetransmitterSignatureFinalFecSet) + ); + assert_eq!( + results + .get_last_fec_set_merkle_root(&disabled_feature_set) + .unwrap(), + Some(block_id) + ); + + let results = LastFECSetCheckResults { + last_fec_set_merkle_root: None, + is_retransmitter_signed: true, + }; + assert_matches!( + results.get_last_fec_set_merkle_root(&enabled_feature_set), + Err(BlockstoreProcessorError::IncompleteFinalFecSet) + ); + assert_matches!( + results.get_last_fec_set_merkle_root(&full_only), + Err(BlockstoreProcessorError::IncompleteFinalFecSet) + ); + assert!(results + .get_last_fec_set_merkle_root(&retransmitter_only) + .unwrap() + .is_none()); + assert!(results + .get_last_fec_set_merkle_root(&disabled_feature_set) + .unwrap() + .is_none()); + + let block_id = Hash::new_unique(); + let results = LastFECSetCheckResults { + last_fec_set_merkle_root: Some(block_id), + is_retransmitter_signed: true, + }; + for feature_set in [ + enabled_feature_set, + disabled_feature_set, + full_only, + retransmitter_only, + ] { + assert_eq!( + results.get_last_fec_set_merkle_root(&feature_set).unwrap(), + Some(block_id) + ); + } } } diff --git a/ledger/src/blockstore_db.rs b/ledger/src/blockstore_db.rs index dca8d9b524c20e..00eea6f811ebcb 100644 --- a/ledger/src/blockstore_db.rs +++ b/ledger/src/blockstore_db.rs @@ -159,6 +159,8 @@ pub enum BlockstoreError { MissingShred(Slot, u64), #[error("legacy shred slot {0}, index {1}")] LegacyShred(Slot, u64), + #[error("unable to read merkle root slot {0}, index {1}")] + MissingMerkleRoot(Slot, u64), } pub type Result = std::result::Result; diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index 002d4970dd5403..f3940fe618444d 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -122,7 +122,11 @@ pub struct ShredIndex { /// Erasure coding information pub struct ErasureMeta { /// Which erasure set in the slot this is - fec_set_index: u64, + #[serde( + serialize_with = "serde_compat_cast::serialize::<_, u64, _>", + deserialize_with = "serde_compat_cast::deserialize::<_, u64, _>" + )] + fec_set_index: u32, /// First coding index in the FEC set first_coding_index: u64, /// Index of the first received coding shred in the FEC set @@ -131,6 +135,39 @@ pub struct ErasureMeta { config: ErasureConfig, } +// Helper module to serde values by type-casting to an intermediate +// type for backward compatibility. +mod serde_compat_cast { + use super::*; + + // Serializes a value of type T by first type-casting to type R. + pub(super) fn serialize( + &val: &T, + serializer: S, + ) -> Result + where + R: TryFrom + Serialize, + >::Error: std::fmt::Display, + { + R::try_from(val) + .map_err(serde::ser::Error::custom)? + .serialize(serializer) + } + + // Deserializes a value of type R and type-casts it to type T. + pub(super) fn deserialize<'de, D, R, T>(deserializer: D) -> Result + where + D: Deserializer<'de>, + R: Deserialize<'de>, + T: TryFrom, + >::Error: std::fmt::Display, + { + R::deserialize(deserializer) + .map(T::try_from)? + .map_err(serde::de::Error::custom) + } +} + #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)] pub(crate) struct ErasureConfig { num_data: usize, @@ -349,7 +386,7 @@ impl ErasureMeta { let first_coding_index = u64::from(shred.first_coding_index()?); let first_received_coding_index = u64::from(shred.index()); let erasure_meta = ErasureMeta { - fec_set_index: u64::from(shred.fec_set_index()), + fec_set_index: shred.fec_set_index(), config, first_coding_index, first_received_coding_index, @@ -384,7 +421,8 @@ impl ErasureMeta { pub(crate) fn data_shreds_indices(&self) -> Range { let num_data = self.config.num_data as u64; - self.fec_set_index..self.fec_set_index + num_data + let fec_set_index = u64::from(self.fec_set_index); + fec_set_index..fec_set_index + num_data } pub(crate) fn coding_shreds_indices(&self) -> Range { @@ -397,11 +435,8 @@ impl ErasureMeta { } pub(crate) fn next_fec_set_index(&self) -> Option { - let num_data = u64::try_from(self.config.num_data).ok()?; - self.fec_set_index - .checked_add(num_data) - .map(u32::try_from)? - .ok() + let num_data = u32::try_from(self.config.num_data).ok()?; + self.fec_set_index.checked_add(num_data) } pub(crate) fn status(&self, index: &Index) -> ErasureMetaStatus { @@ -575,7 +610,7 @@ mod test { }; let e_meta = ErasureMeta { fec_set_index, - first_coding_index: fec_set_index, + first_coding_index: u64::from(fec_set_index), config: erasure_config, first_received_coding_index: 0, }; @@ -754,7 +789,7 @@ mod test { config: erasure_config, }; let mut new_erasure_meta = ErasureMeta { - fec_set_index: set_index, + fec_set_index: u32::try_from(set_index).unwrap(), first_coding_index: set_index, first_received_coding_index: 0, config: erasure_config, diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index c21ebda1cd55c1..24ff5ce8fb7baa 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -1,3 +1,5 @@ +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; use { crate::{ block_error::BlockError, @@ -25,12 +27,8 @@ use { solana_entry::entry::{ self, create_ticks, Entry, EntrySlice, EntryType, EntryVerificationStatus, VerifyRecyclers, }, - solana_measure::{measure, measure::Measure}, + solana_measure::{measure::Measure, measure_us}, solana_metrics::datapoint_error, - solana_program_runtime::{ - report_execute_timings, - timings::{ExecuteTimingType, ExecuteTimings}, - }, solana_rayon_threadlimit::{get_max_thread_count, get_thread_count}, solana_runtime::{ accounts_background_service::{AbsRequestSender, SnapshotRequestKind}, @@ -50,7 +48,6 @@ use { genesis_config::GenesisConfig, hash::Hash, pubkey::Pubkey, - rent_debits::RentDebits, saturating_add_assign, signature::{Keypair, Signature}, timing, @@ -60,12 +57,10 @@ use { }, }, solana_svm::{ + transaction_commit_result::TransactionCommitResult, transaction_processor::ExecutionRecordingConfig, - transaction_results::{ - TransactionExecutionDetails, TransactionExecutionResult, - TransactionLoadedAccountsStats, TransactionResults, - }, }, + solana_timings::{report_execute_timings, ExecuteTimingType, ExecuteTimings}, solana_transaction_status::token_balances::TransactionTokenBalancesSet, solana_vote::vote_account::VoteAccountsHashMap, std::{ @@ -106,16 +101,13 @@ fn first_err(results: &[Result<()>]) -> Result<()> { // Includes transaction signature for unit-testing fn get_first_error( batch: &TransactionBatch, - fee_collection_results: Vec>, + commit_results: &[TransactionCommitResult], ) -> Option<(Result<()>, Signature)> { let mut first_err = None; - for (result, transaction) in fee_collection_results - .iter() - .zip(batch.sanitized_transactions()) - { - if let Err(ref err) = result { + for (commit_result, transaction) in commit_results.iter().zip(batch.sanitized_transactions()) { + if let Err(err) = commit_result { if first_err.is_none() { - first_err = Some((result.clone(), *transaction.signature())); + first_err = Some((Err(err.clone()), *transaction.signature())); } warn!( "Unexpected validator error: {:?}, transaction: {:?}", @@ -165,7 +157,7 @@ pub fn execute_batch( vec![] }; - let (tx_results, balances) = batch.bank().load_execute_and_commit_transactions( + let (commit_results, balances) = batch.bank().load_execute_and_commit_transactions( batch, MAX_PROCESSING_AGE, transaction_status_sender.is_some(), @@ -176,45 +168,33 @@ pub fn execute_batch( bank_utils::find_and_send_votes( batch.sanitized_transactions(), - &tx_results, + &commit_results, replay_vote_sender, ); - let TransactionResults { - fee_collection_results, - loaded_accounts_stats, - execution_results, - rent_debits, - .. - } = tx_results; - - let (check_block_cost_limits_result, check_block_cost_limits_time): (Result<()>, Measure) = - measure!(if bank - .feature_set - .is_active(&feature_set::apply_cost_tracker_during_replay::id()) - { - check_block_cost_limits( - bank, - &loaded_accounts_stats, - &execution_results, - batch.sanitized_transactions(), - ) - } else { - Ok(()) - }); + let (check_block_cost_limits_result, check_block_cost_limits_us) = measure_us!(if bank + .feature_set + .is_active(&feature_set::apply_cost_tracker_during_replay::id()) + { + check_block_cost_limits(bank, &commit_results, batch.sanitized_transactions()) + } else { + Ok(()) + }); timings.saturating_add_in_place( ExecuteTimingType::CheckBlockLimitsUs, - check_block_cost_limits_time.as_us(), + check_block_cost_limits_us, ); check_block_cost_limits_result?; - let executed_transactions = execution_results + let committed_transactions = commit_results .iter() .zip(batch.sanitized_transactions()) - .filter_map(|(execution_result, tx)| execution_result.was_executed().then_some(tx)) + .filter_map(|(commit_result, tx)| commit_result.is_ok().then_some(tx)) .collect_vec(); + let first_err = get_first_error(batch, &commit_results); + if let Some(transaction_status_sender) = transaction_status_sender { let transactions = batch.sanitized_transactions().to_vec(); let post_token_balances = if record_token_balances { @@ -229,17 +209,15 @@ pub fn execute_batch( transaction_status_sender.send_transaction_status_batch( bank.clone(), transactions, - execution_results, + commit_results, balances, token_balances, - rent_debits, transaction_indexes.to_vec(), ); } - prioritization_fee_cache.update(bank, executed_transactions.into_iter()); + prioritization_fee_cache.update(bank, committed_transactions.into_iter()); - let first_err = get_first_error(batch, fee_collection_results); first_err.map(|(result, _)| result).unwrap_or(Ok(())) } @@ -248,27 +226,22 @@ pub fn execute_batch( // reported to metric `replay-stage-mark_dead_slot` fn check_block_cost_limits( bank: &Bank, - loaded_accounts_stats: &[Result], - execution_results: &[TransactionExecutionResult], + commit_results: &[TransactionCommitResult], sanitized_transactions: &[SanitizedTransaction], ) -> Result<()> { - assert_eq!(loaded_accounts_stats.len(), execution_results.len()); + assert_eq!(sanitized_transactions.len(), commit_results.len()); - let tx_costs_with_actual_execution_units: Vec<_> = execution_results + let tx_costs_with_actual_execution_units: Vec<_> = commit_results .iter() - .zip(loaded_accounts_stats) .zip(sanitized_transactions) - .filter_map(|((execution_result, loaded_accounts_stats), tx)| { - if let Some(details) = execution_result.details() { - let tx_cost = CostModel::calculate_cost_for_executed_transaction( + .filter_map(|(commit_result, tx)| { + if let Ok(committed_tx) = commit_result { + Some(CostModel::calculate_cost_for_executed_transaction( tx, - details.executed_units, - loaded_accounts_stats - .as_ref() - .map_or(0, |stats| stats.loaded_accounts_data_size), + committed_tx.execution_details.executed_units, + committed_tx.loaded_account_stats.loaded_accounts_data_size, &bank.feature_set, - ); - Some(tx_cost) + )) } else { None } @@ -329,20 +302,15 @@ fn execute_batches_internal( let transaction_count = transaction_batch.batch.sanitized_transactions().len() as u64; let mut timings = ExecuteTimings::default(); - let (result, execute_batches_time): (Result<()>, Measure) = measure!( - { - execute_batch( - transaction_batch, - bank, - transaction_status_sender, - replay_vote_sender, - &mut timings, - log_messages_bytes_limit, - prioritization_fee_cache, - ) - }, - "execute_batch", - ); + let (result, execute_batches_us) = measure_us!(execute_batch( + transaction_batch, + bank, + transaction_status_sender, + replay_vote_sender, + &mut timings, + log_messages_bytes_limit, + prioritization_fee_cache, + )); let thread_index = replay_tx_thread_pool.current_thread_index().unwrap(); execution_timings_per_thread @@ -355,14 +323,14 @@ fn execute_batches_internal( total_transactions_executed, execute_timings: total_thread_execute_timings, } = thread_execution_time; - *total_thread_us += execute_batches_time.as_us(); + *total_thread_us += execute_batches_us; *total_transactions_executed += transaction_count; total_thread_execute_timings .saturating_add_in_place(ExecuteTimingType::TotalBatchesLen, 1); total_thread_execute_timings.accumulate(&timings); }) .or_insert(ThreadExecuteTimings { - total_thread_us: execute_batches_time.as_us(), + total_thread_us: execute_batches_us, total_transactions_executed: transaction_count, execute_timings: timings, }); @@ -769,6 +737,9 @@ pub enum BlockstoreProcessorError { #[error("incomplete final fec set")] IncompleteFinalFecSet, + + #[error("invalid retransmitter signature final fec set")] + InvalidRetransmitterSignatureFinalFecSet, } /// Callback for accessing bank state after each slot is confirmed while @@ -1078,6 +1049,7 @@ fn verify_ticks( } #[allow(clippy::too_many_arguments)] +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] fn confirm_full_slot( blockstore: &Blockstore, bank: &BankWithScheduler, @@ -1684,6 +1656,7 @@ fn confirm_slot_entries( } // Special handling required for processing the entries in slot 0 +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] fn process_bank_0( bank0: &BankWithScheduler, blockstore: &Blockstore, @@ -1726,7 +1699,7 @@ fn process_next_slots( blockstore: &Blockstore, leader_schedule_cache: &LeaderScheduleCache, pending_slots: &mut Vec<(SlotMeta, Bank, Hash)>, - halt_at_slot: Option, + opts: &ProcessOptions, ) -> result::Result<(), BlockstoreProcessorError> { if meta.next_slots.is_empty() { return Ok(()); @@ -1734,10 +1707,13 @@ fn process_next_slots( // This is a fork point if there are multiple children, create a new child bank for each fork for next_slot in &meta.next_slots { - let skip_next_slot = halt_at_slot - .map(|halt_at_slot| *next_slot > halt_at_slot) - .unwrap_or(false); - if skip_next_slot { + if opts + .halt_at_slot + .is_some_and(|halt_at_slot| *next_slot > halt_at_slot) + { + continue; + } + if !opts.allow_dead_slots && blockstore.is_dead(*next_slot) { continue; } @@ -1816,7 +1792,7 @@ fn load_frozen_forks( blockstore, leader_schedule_cache, &mut pending_slots, - opts.halt_at_slot, + opts, )?; let on_halt_store_hash_raw_data_for_debug = opts.on_halt_store_hash_raw_data_for_debug; @@ -1995,7 +1971,7 @@ fn load_frozen_forks( blockstore, leader_schedule_cache, &mut pending_slots, - opts.halt_at_slot, + opts, )?; } } else if on_halt_store_hash_raw_data_for_debug { @@ -2134,10 +2110,9 @@ pub enum TransactionStatusMessage { pub struct TransactionStatusBatch { pub bank: Arc, pub transactions: Vec, - pub execution_results: Vec>, + pub commit_results: Vec, pub balances: TransactionBalancesSet, pub token_balances: TransactionTokenBalancesSet, - pub rent_debits: Vec, pub transaction_indexes: Vec, } @@ -2151,10 +2126,9 @@ impl TransactionStatusSender { &self, bank: Arc, transactions: Vec, - execution_results: Vec, + commit_results: Vec, balances: TransactionBalancesSet, token_balances: TransactionTokenBalancesSet, - rent_debits: Vec, transaction_indexes: Vec, ) { let slot = bank.slot(); @@ -2164,16 +2138,9 @@ impl TransactionStatusSender { .send(TransactionStatusMessage::Batch(TransactionStatusBatch { bank, transactions, - execution_results: execution_results - .into_iter() - .map(|result| match result { - TransactionExecutionResult::Executed { details, .. } => Some(details), - TransactionExecutionResult::NotExecuted(_) => None, - }) - .collect(), + commit_results, balances, token_balances, - rent_debits, transaction_indexes, })) { @@ -2265,16 +2232,24 @@ pub mod tests { solana_sdk::{ account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, + fee::FeeDetails, hash::Hash, instruction::{Instruction, InstructionError}, native_token::LAMPORTS_PER_SOL, pubkey::Pubkey, + rent_debits::RentDebits, signature::{Keypair, Signer}, system_instruction::SystemError, system_transaction, transaction::{Transaction, TransactionError}, }, - solana_svm::transaction_processor::ExecutionRecordingConfig, + solana_svm::{ + transaction_commit_result::CommittedTransaction, + transaction_execution_result::{ + TransactionExecutionDetails, TransactionLoadedAccountsStats, + }, + transaction_processor::ExecutionRecordingConfig, + }, solana_vote::vote_account::VoteAccount, solana_vote_program::{ self, @@ -3007,7 +2982,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(2); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair = Keypair::new(); let slot_entries = create_ticks(genesis_config.ticks_per_slot, 1, genesis_config.hash()); let tx = system_transaction::transfer( @@ -3172,7 +3147,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); @@ -3209,7 +3184,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3269,7 +3244,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3419,12 +3394,11 @@ pub mod tests { let mock_program_id = solana_sdk::pubkey::new_rand(); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, mock_program_id, MockBuiltinOk::vm, - ) - .0; + ); let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -3463,12 +3437,11 @@ pub mod tests { let mut bankhash_err = None; (0..get_instruction_errors().len()).for_each(|err| { - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, mock_program_id, MockBuiltinErr::vm, - ) - .0; + ); let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -3504,7 +3477,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3598,7 +3571,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3644,7 +3617,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1_000_000_000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); const NUM_TRANSFERS_PER_ENTRY: usize = 8; const NUM_TRANSFERS: usize = NUM_TRANSFERS_PER_ENTRY * 32; @@ -3711,7 +3684,7 @@ pub mod tests { .. } = create_genesis_config((num_accounts + 1) as u64 * initial_lamports); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut keypairs: Vec = vec![]; @@ -3778,7 +3751,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let keypair3 = Keypair::new(); @@ -3840,7 +3813,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(11_000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let pubkey = solana_sdk::pubkey::new_rand(); bank.transfer(1_000, &mint_keypair, &pubkey).unwrap(); assert_eq!(bank.transaction_count(), 1); @@ -3881,7 +3854,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(11_000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let keypair1 = Keypair::new(); let keypair2 = Keypair::new(); let success_tx = system_transaction::transfer( @@ -4181,7 +4154,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(100); - let bank0 = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank0, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let genesis_hash = genesis_config.hash(); let keypair = Keypair::new(); @@ -4245,7 +4218,7 @@ pub mod tests { mint_keypair, .. } = create_genesis_config(1_000_000_000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let present_account_key = Keypair::new(); let present_account = AccountSharedData::new(1, 10, &Pubkey::default()); @@ -4269,13 +4242,7 @@ pub mod tests { ); let txs = vec![account_not_found_tx, invalid_blockhash_tx]; let batch = bank.prepare_batch_for_tests(txs); - let ( - TransactionResults { - fee_collection_results, - .. - }, - _balances, - ) = batch.bank().load_execute_and_commit_transactions( + let (commit_results, _) = batch.bank().load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, false, @@ -4283,7 +4250,7 @@ pub mod tests { &mut ExecuteTimings::default(), None, ); - let (err, signature) = get_first_error(&batch, fee_collection_results).unwrap(); + let (err, signature) = get_first_error(&batch, &commit_results).unwrap(); assert_eq!(err.unwrap_err(), TransactionError::AccountNotFound); assert_eq!(signature, account_not_found_sig); } @@ -4714,9 +4681,8 @@ pub mod tests { .. } = create_genesis_config(100 * LAMPORTS_PER_SOL); let genesis_hash = genesis_config.hash(); - let bank = BankWithScheduler::new_without_scheduler( - Bank::new_with_bank_forks_for_tests(&genesis_config).0, - ); + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = BankWithScheduler::new_without_scheduler(bank); let replay_tx_thread_pool = create_thread_pool(1); let mut timing = ConfirmationTiming::default(); let mut progress = ConfirmationProgress::new(genesis_hash); @@ -5111,33 +5077,30 @@ pub mod tests { .unwrap() .set_limits(u64::MAX, block_limit, u64::MAX); let txs = vec![tx.clone(), tx]; - let results = vec![ - TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { + let commit_results = vec![ + Ok(CommittedTransaction { + loaded_account_stats: TransactionLoadedAccountsStats { + loaded_accounts_data_size: actual_loaded_accounts_data_size, + loaded_accounts_count: 2, + }, + execution_details: TransactionExecutionDetails { status: Ok(()), log_messages: None, inner_instructions: None, - fee_details: solana_sdk::fee::FeeDetails::default(), return_data: None, executed_units: actual_execution_cu, accounts_data_len_delta: 0, }, - programs_modified_by_tx: HashMap::new(), - }, - TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound), - ]; - let loaded_accounts_stats = vec![ - Ok(TransactionLoadedAccountsStats { - loaded_accounts_data_size: actual_loaded_accounts_data_size, - loaded_accounts_count: 2 - }); - 2 + fee_details: FeeDetails::default(), + rent_debits: RentDebits::default(), + }), + Err(TransactionError::AccountNotFound), ]; - assert!(check_block_cost_limits(&bank, &loaded_accounts_stats, &results, &txs).is_ok()); + assert!(check_block_cost_limits(&bank, &commit_results, &txs).is_ok()); assert_eq!( Err(TransactionError::WouldExceedMaxBlockCostLimit), - check_block_cost_limits(&bank, &loaded_accounts_stats, &results, &txs) + check_block_cost_limits(&bank, &commit_results, &txs) ); } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index f2b7a84ed1e2ca..814ec2b5bf303a 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -786,6 +786,22 @@ pub mod layout { .ok_or(Error::InvalidPayloadSize(shred.len())) } + pub fn is_retransmitter_signed_variant(shred: &[u8]) -> Result { + match get_shred_variant(shred)? { + ShredVariant::LegacyCode | ShredVariant::LegacyData => Ok(false), + ShredVariant::MerkleCode { + proof_size: _, + chained: _, + resigned, + } => Ok(resigned), + ShredVariant::MerkleData { + proof_size: _, + chained: _, + resigned, + } => Ok(resigned), + } + } + pub(crate) fn set_retransmitter_signature( shred: &mut [u8], signature: &Signature, diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index 656dbb83725f23..9374e93770ba90 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -8,7 +8,7 @@ use { itertools::izip, log::*, solana_accounts_db::utils::create_accounts_run_and_snapshot_dirs, - solana_client::{connection_cache::ConnectionCache, rpc_client::RpcClient}, + solana_client::connection_cache::ConnectionCache, solana_core::{ consensus::tower_storage::FileTowerStorage, validator::{Validator, ValidatorConfig, ValidatorStartProgress}, @@ -19,6 +19,7 @@ use { gossip_service::discover_cluster, }, solana_ledger::{create_new_tmp_ledger, shred::Shred}, + solana_rpc_client::rpc_client::RpcClient, solana_runtime::{ genesis_utils::{ create_genesis_config_with_vote_accounts_and_cluster_type, GenesisConfigInfo, diff --git a/local-cluster/src/validator_configs.rs b/local-cluster/src/validator_configs.rs index c01106bda4d4e5..a2366eb41489c8 100644 --- a/local-cluster/src/validator_configs.rs +++ b/local-cluster/src/validator_configs.rs @@ -64,6 +64,7 @@ pub fn safe_clone_config(config: &ValidatorConfig) -> ValidatorConfig { banking_trace_dir_byte_limit: config.banking_trace_dir_byte_limit, block_verification_method: config.block_verification_method.clone(), block_production_method: config.block_production_method.clone(), + enable_block_production_forwarding: config.enable_block_production_forwarding, generator_config: config.generator_config.clone(), use_snapshot_archives_at_startup: config.use_snapshot_archives_at_startup, wen_restart_proto_path: config.wen_restart_proto_path.clone(), diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 95ee93df416b70..62f7fd32435205 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -1794,12 +1794,9 @@ fn test_validator_saves_tower() { // Wait for the first new root let last_replayed_root = loop { - #[allow(deprecated)] - // This test depends on knowing the immediate root, without any delay from the commitment - // service, so the deprecated CommitmentConfig::root() is retained if let Ok(root) = validator_client .rpc_client() - .get_slot_with_commitment(CommitmentConfig::root()) + .get_slot_with_commitment(CommitmentConfig::finalized()) { trace!("current root: {}", root); if root > 0 { @@ -1826,12 +1823,9 @@ fn test_validator_saves_tower() { // Wait for a new root, demonstrating the validator was able to make progress from the older `tower1` let new_root = loop { - #[allow(deprecated)] - // This test depends on knowing the immediate root, without any delay from the commitment - // service, so the deprecated CommitmentConfig::root() is retained if let Ok(root) = validator_client .rpc_client() - .get_slot_with_commitment(CommitmentConfig::root()) + .get_slot_with_commitment(CommitmentConfig::finalized()) { trace!( "current root: {}, last_replayed_root: {}", @@ -1862,12 +1856,9 @@ fn test_validator_saves_tower() { // Wait for another new root let new_root = loop { - #[allow(deprecated)] - // This test depends on knowing the immediate root, without any delay from the commitment - // service, so the deprecated CommitmentConfig::root() is retained if let Ok(root) = validator_client .rpc_client() - .get_slot_with_commitment(CommitmentConfig::root()) + .get_slot_with_commitment(CommitmentConfig::finalized()) { trace!("current root: {}, last tower root: {}", root, tower3_root); if root > tower3_root { @@ -4623,6 +4614,7 @@ fn test_slot_hash_expiry() { // #[test] #[serial] +#[ignore] fn test_duplicate_with_pruned_ancestor() { solana_logger::setup_with("info,solana_metrics=off"); solana_core::repair::duplicate_repair_status::set_ancestor_hash_repair_sample_size_for_tests_only(3); @@ -4968,22 +4960,11 @@ fn test_boot_from_local_state() { info!("Waiting for validator2 to create a new bank snapshot..."); let timer = Instant::now(); let bank_snapshot = loop { - if let Some(full_snapshot_slot) = snapshot_utils::get_highest_full_snapshot_archive_slot( - &validator2_config.full_snapshot_archives_dir, - ) { - if let Some(incremental_snapshot_slot) = - snapshot_utils::get_highest_incremental_snapshot_archive_slot( - &validator2_config.incremental_snapshot_archives_dir, - full_snapshot_slot, - ) - { - if let Some(bank_snapshot) = snapshot_utils::get_highest_bank_snapshot_post( - &validator2_config.bank_snapshots_dir, - ) { - if bank_snapshot.slot > incremental_snapshot_slot { - break bank_snapshot; - } - } + if let Some(bank_snapshot) = + snapshot_utils::get_highest_bank_snapshot_post(&validator2_config.bank_snapshots_dir) + { + if bank_snapshot.slot > incremental_snapshot_archive.slot() { + break bank_snapshot; } } assert!( diff --git a/log-collector/Cargo.toml b/log-collector/Cargo.toml new file mode 100644 index 00000000000000..e22c14c09f65e4 --- /dev/null +++ b/log-collector/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "solana-log-collector" +description = "Solana log collector" +documentation = "https://docs.rs/solana-log-collector" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +log = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/program-runtime/src/log_collector.rs b/log-collector/src/lib.rs similarity index 97% rename from program-runtime/src/log_collector.rs rename to log-collector/src/lib.rs index 0a06bd168df753..612300f8e94bfc 100644 --- a/program-runtime/src/log_collector.rs +++ b/log-collector/src/lib.rs @@ -64,7 +64,7 @@ impl LogCollector { #[macro_export] macro_rules! ic_logger_msg { ($log_collector:expr, $message:expr) => { - $crate::log_collector::log::debug!( + $crate::log::debug!( target: "solana_runtime::message_processor::stable_log", "{}", $message @@ -76,7 +76,7 @@ macro_rules! ic_logger_msg { } }; ($log_collector:expr, $fmt:expr, $($arg:tt)*) => { - $crate::log_collector::log::debug!( + $crate::log::debug!( target: "solana_runtime::message_processor::stable_log", $fmt, $($arg)* diff --git a/measure/src/macros.rs b/measure/src/macros.rs index de1d74ebf1ceec..1dddcbb4074072 100644 --- a/measure/src/macros.rs +++ b/measure/src/macros.rs @@ -1,6 +1,6 @@ /// Measure this expression /// -/// Use `measure!()` when you have an expression that you want to measure. `measure!()` will start +/// Use `measure_time!()` when you have an expression that you want to measure. `measure_time!()` will start /// a new [`Measure`], evaluate your expression, stop the [`Measure`], and then return the /// [`Measure`] object along with your expression's return value. /// @@ -12,20 +12,20 @@ /// /// ``` /// // Measure functions -/// # use solana_measure::{measure, measure_us}; +/// # use solana_measure::{measure_time, measure_us}; /// # fn foo() {} /// # fn bar(x: i32) {} /// # fn add(x: i32, y: i32) -> i32 {x + y} -/// let (result, measure) = measure!(foo(), "foo takes no parameters"); -/// let (result, measure) = measure!(bar(42), "bar takes one parameter"); -/// let (result, measure) = measure!(add(1, 2), "add takes two parameters and returns a value"); +/// let (result, measure) = measure_time!(foo(), "foo takes no parameters"); +/// let (result, measure) = measure_time!(bar(42), "bar takes one parameter"); +/// let (result, measure) = measure_time!(add(1, 2), "add takes two parameters and returns a value"); /// let (result, measure_us) = measure_us!(add(1, 2)); /// # assert_eq!(result, 1 + 2); /// ``` /// /// ``` /// // Measure methods -/// # use solana_measure::{measure, measure_us}; +/// # use solana_measure::{measure_time, measure_us}; /// # struct Foo { /// # f: i32, /// # } @@ -35,18 +35,18 @@ /// # } /// # } /// let foo = Foo { f: 42 }; -/// let (result, measure) = measure!(foo.frobnicate(2), "measure methods"); +/// let (result, measure) = measure_time!(foo.frobnicate(2), "measure methods"); /// let (result, measure_us) = measure_us!(foo.frobnicate(2)); /// # assert_eq!(result, 42 * 2); /// ``` /// /// ``` /// // Measure expression blocks -/// # use solana_measure::measure; +/// # use solana_measure::measure_time; /// # fn complex_calculation() -> i32 { 42 } /// # fn complex_transform(x: i32) -> i32 { x + 3 } /// # fn record_result(y: i32) {} -/// let (result, measure) = measure!( +/// let (result, measure) = measure_time!( /// { /// let x = complex_calculation(); /// # assert_eq!(x, 42); @@ -62,13 +62,13 @@ /// /// ``` /// // The `name` parameter is optional -/// # use solana_measure::{measure, measure_us}; +/// # use solana_measure::{measure_time, measure_us}; /// # fn meow() {}; -/// let (result, measure) = measure!(meow()); +/// let (result, measure) = measure_time!(meow()); /// let (result, measure_us) = measure_us!(meow()); /// ``` #[macro_export] -macro_rules! measure { +macro_rules! measure_time { ($val:expr, $name:tt $(,)?) => {{ let mut measure = $crate::measure::Measure::start($name); let result = $val; @@ -76,7 +76,7 @@ macro_rules! measure { (result, measure) }}; ($val:expr) => { - measure!($val, "") + measure_time!($val, "") }; } @@ -114,7 +114,7 @@ mod tests { fn test_measure_macro() { // Ensure that the measurement side actually works { - let (_result, measure) = measure!(sleep(Duration::from_secs(1)), "test"); + let (_result, measure) = measure_time!(sleep(Duration::from_secs(1)), "test"); assert!(measure.as_s() >= 0.99f32 && measure.as_s() <= 1.01f32); assert!(measure.as_ms() >= 990 && measure.as_ms() <= 1_010); assert!(measure.as_us() >= 999_000 && measure.as_us() <= 1_010_000); @@ -122,35 +122,35 @@ mod tests { // Ensure that the macro can be called with functions { - let (result, _measure) = measure!(my_multiply(3, 4), "test"); + let (result, _measure) = measure_time!(my_multiply(3, 4), "test"); assert_eq!(result, 3 * 4); - let (result, _measure) = measure!(square(5), "test"); + let (result, _measure) = measure_time!(square(5), "test"); assert_eq!(result, 5 * 5) } // Ensure that the macro can be called with methods { let some_struct = SomeStruct { x: 42 }; - let (result, _measure) = measure!(some_struct.add_to(4), "test"); + let (result, _measure) = measure_time!(some_struct.add_to(4), "test"); assert_eq!(result, 42 + 4); } // Ensure that the macro can be called with blocks { - let (result, _measure) = measure!({ 1 + 2 }, "test"); + let (result, _measure) = measure_time!({ 1 + 2 }, "test"); assert_eq!(result, 3); } // Ensure that the macro can be called with a trailing comma { - let (result, _measure) = measure!(square(5), "test",); + let (result, _measure) = measure_time!(square(5), "test",); assert_eq!(result, 5 * 5) } // Ensure that the macro can be called without a name { - let (result, _measure) = measure!(square(5)); + let (result, _measure) = measure_time!(square(5)); assert_eq!(result, 5 * 5) } } diff --git a/multinode-demo/validator.sh b/multinode-demo/validator.sh index efb7a6afd56ea0..77082f6589245b 100755 --- a/multinode-demo/validator.sh +++ b/multinode-demo/validator.sh @@ -164,9 +164,6 @@ while [[ -n $1 ]]; do elif [[ $1 = --known-validator ]]; then args+=("$1" "$2") shift 2 - elif [[ $1 = --halt-on-known-validators-accounts-hash-mismatch ]]; then - args+=("$1") - shift elif [[ $1 = --max-genesis-archive-unpacked-size ]]; then args+=("$1" "$2") shift 2 diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 1ba1f95ee4b46f..f1e0f892aa0964 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -11,7 +11,7 @@ edition = { workspace = true } [dependencies] bincode = { workspace = true } -clap = { version = "3.1.5", features = ["cargo"] } +clap = { version = "3.1.5", features = ["cargo"], optional = true } crossbeam-channel = { workspace = true } log = { workspace = true } nix = { workspace = true, features = ["socket"] } @@ -19,23 +19,31 @@ rand = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } socket2 = { workspace = true } -solana-logger = { workspace = true } +solana-logger = { workspace = true, optional = true } solana-sdk = { workspace = true } -solana-version = { workspace = true } -static_assertions = { workspace = true } +solana-version = { workspace = true, optional = true } tokio = { workspace = true, features = ["full"] } url = { workspace = true } +[dev-dependencies] +solana-logger = { workspace = true } + +[features] +default = [] +clap = ["dep:clap", "dep:solana-logger", "dep:solana-version"] + [lib] name = "solana_net_utils" [[bin]] name = "solana-ip-address" path = "src/bin/ip_address.rs" +required-features = ["clap"] [[bin]] name = "solana-ip-address-server" path = "src/bin/ip_address_server.rs" +required-features = ["clap"] [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/perf/Cargo.toml b/perf/Cargo.toml index 8b7b4c6cb12747..71a213e7693d5c 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -26,6 +26,7 @@ solana-frozen-abi-macro = { workspace = true, optional = true } solana-metrics = { workspace = true } solana-rayon-threadlimit = { workspace = true } solana-sdk = { workspace = true } +solana-short-vec = { workspace = true } solana-vote-program = { workspace = true } [target."cfg(target_os = \"linux\")".dependencies] @@ -50,6 +51,7 @@ frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-sdk/frozen-abi", + "solana-short-vec/frozen-abi", "solana-vote-program/frozen-abi", ] diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 6078961d42db71..0d29bfe571b81b 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -17,9 +17,9 @@ use { hash::Hash, message::{MESSAGE_HEADER_LENGTH, MESSAGE_VERSION_PREFIX}, pubkey::Pubkey, - short_vec::decode_shortu16_len, signature::Signature, }, + solana_short_vec::decode_shortu16_len, std::{convert::TryFrom, mem::size_of}, }; diff --git a/poh/src/poh_recorder.rs b/poh/src/poh_recorder.rs index 3ac35335c75107..f9a22a9c27afb1 100644 --- a/poh/src/poh_recorder.rs +++ b/poh/src/poh_recorder.rs @@ -21,7 +21,7 @@ use { poh::Poh, }, solana_ledger::{blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache}, - solana_measure::{measure, measure_us}, + solana_measure::measure_us, solana_metrics::poh_timing_point::{send_poh_timing_point, PohTimingSender, SlotPohTimingInfo}, solana_runtime::{bank::Bank, installed_scheduler_pool::BankWithScheduler}, solana_sdk::{ @@ -452,15 +452,8 @@ impl PohRecorder { }) } - fn prev_slot_was_mine(&self, my_pubkey: &Pubkey, current_slot: Slot) -> bool { - if let Some(leader_id) = self - .leader_schedule_cache - .slot_leader_at(current_slot.saturating_sub(1), None) - { - &leader_id == my_pubkey - } else { - false - } + fn start_slot_was_mine(&self, my_pubkey: &Pubkey) -> bool { + self.start_bank.collector_id() == my_pubkey } // Active descendants of the last reset bank that are smaller than the @@ -471,57 +464,63 @@ impl PohRecorder { .any(|pending_slot| *pending_slot < next_slot) } + fn can_skip_grace_ticks(&self, my_pubkey: &Pubkey) -> bool { + let next_tick_height = self.tick_height.saturating_add(1); + let next_slot = self.slot_for_tick_height(next_tick_height); + + if self.start_slot_was_mine(my_pubkey) { + // Building off my own block. No need to wait. + return true; + } + + if self.is_same_fork_as_previous_leader(next_slot) { + // Planning to build off block produced by the leader previous to + // me. Need to wait. + return false; + } + + if !self.is_new_reset_bank_pending(next_slot) { + // No pending blocks from previous leader have been observed. No + // need to wait. + return true; + } + + self.report_pending_fork_was_detected(next_slot); + if !self.delay_leader_block_for_pending_fork { + // Not configured to wait for pending blocks from previous leader. + return true; + } + + // Wait for grace ticks + false + } + fn reached_leader_tick( &self, my_pubkey: &Pubkey, leader_first_tick_height_including_grace_ticks: u64, ) -> bool { - // Check if PoH was reset to run immediately if self.start_tick_height + self.grace_ticks == leader_first_tick_height_including_grace_ticks { + // PoH was reset to run immediately. return true; } - // Check if we have finished waiting for grace ticks let target_tick_height = leader_first_tick_height_including_grace_ticks.saturating_sub(1); if self.tick_height >= target_tick_height { + // We have finished waiting for grace ticks. return true; } - // Check if we ticked to our leader slot and can skip grace ticks let ideal_target_tick_height = target_tick_height.saturating_sub(self.grace_ticks); - if self.tick_height >= ideal_target_tick_height { - let next_tick_height = self.tick_height.saturating_add(1); - let next_slot = self.slot_for_tick_height(next_tick_height); - - // If the previous slot was mine, skip grace ticks - if self.prev_slot_was_mine(my_pubkey, next_slot) { - return true; - } - - // If we are not reset to a bank by the previous leader, skip grace - // ticks if there isn't a pending reset bank or we aren't configured - // to apply grace ticks when we detect a pending reset bank. - if !self.is_same_fork_as_previous_leader(next_slot) { - // If there is no pending reset bank, skip grace ticks - if !self.is_new_reset_bank_pending(next_slot) { - return true; - } - - // If we aren't configured to wait for pending forks, skip grace ticks - self.report_pending_fork_was_detected(next_slot); - if !self.delay_leader_block_for_pending_fork { - return true; - } - } - - // Wait for grace ticks + if self.tick_height < ideal_target_tick_height { + // We haven't ticked to our leader slot yet. return false; } - // Keep ticking - false + // We're in the grace tick zone. Check if we can skip grace ticks. + self.can_skip_grace_ticks(my_pubkey) } // Report metrics when poh recorder detects a pending fork that could @@ -565,20 +564,25 @@ impl PohRecorder { let next_tick_height = self.tick_height + 1; let next_poh_slot = self.slot_for_tick_height(next_tick_height); - if let Some(leader_first_tick_height_including_grace_ticks) = + let Some(leader_first_tick_height_including_grace_ticks) = self.leader_first_tick_height_including_grace_ticks - { - if self.reached_leader_tick(my_pubkey, leader_first_tick_height_including_grace_ticks) { - assert!(next_tick_height >= self.start_tick_height); - let poh_slot = next_poh_slot; - let parent_slot = self.start_slot(); - return PohLeaderStatus::Reached { - poh_slot, - parent_slot, - }; - } + else { + // No next leader slot, so no leader slot has been reached. + return PohLeaderStatus::NotReached; + }; + + if !self.reached_leader_tick(my_pubkey, leader_first_tick_height_including_grace_ticks) { + // PoH hasn't ticked far enough yet. + return PohLeaderStatus::NotReached; + } + + assert!(next_tick_height >= self.start_tick_height); + let poh_slot = next_poh_slot; + let parent_slot = self.start_slot(); + PohLeaderStatus::Reached { + poh_slot, + parent_slot, } - PohLeaderStatus::NotReached } // returns (leader_first_tick_height_including_grace_ticks, leader_last_tick_height, grace_ticks) given the next @@ -848,20 +852,17 @@ impl PohRecorder { } pub fn tick(&mut self) { - let ((poh_entry, target_time), tick_lock_contention_time) = measure!( - { - let mut poh_l = self.poh.lock().unwrap(); - let poh_entry = poh_l.tick(); - let target_time = if poh_entry.is_some() { - Some(poh_l.target_poh_time(self.target_ns_per_tick)) - } else { - None - }; - (poh_entry, target_time) - }, - "tick_lock_contention", - ); - self.tick_lock_contention_us += tick_lock_contention_time.as_us(); + let ((poh_entry, target_time), tick_lock_contention_us) = measure_us!({ + let mut poh_l = self.poh.lock().unwrap(); + let poh_entry = poh_l.tick(); + let target_time = if poh_entry.is_some() { + Some(poh_l.target_poh_time(self.target_ns_per_tick)) + } else { + None + }; + (poh_entry, target_time) + }); + self.tick_lock_contention_us += tick_lock_contention_us; if let Some(poh_entry) = poh_entry { self.tick_height += 1; @@ -884,24 +885,19 @@ impl PohRecorder { self.tick_height, )); - let (_flush_res, flush_cache_and_tick_time) = - measure!(self.flush_cache(true), "flush_cache_and_tick"); - self.flush_cache_tick_us += flush_cache_and_tick_time.as_us(); - - let sleep_time = measure!( - { - let target_time = target_time.unwrap(); - // sleep is not accurate enough to get a predictable time. - // Kernel can not schedule the thread for a while. - while Instant::now() < target_time { - // TODO: a caller could possibly desire to reset or record while we're spinning here - std::hint::spin_loop(); - } - }, - "poh_sleep", - ) - .1; - self.total_sleep_us += sleep_time.as_us(); + let (_flush_res, flush_cache_and_tick_us) = measure_us!(self.flush_cache(true)); + self.flush_cache_tick_us += flush_cache_and_tick_us; + + let (_, sleep_us) = measure_us!({ + let target_time = target_time.unwrap(); + // sleep is not accurate enough to get a predictable time. + // Kernel can not schedule the thread for a while. + while Instant::now() < target_time { + // TODO: a caller could possibly desire to reset or record while we're spinning here + std::hint::spin_loop(); + } + }); + self.total_sleep_us += sleep_us; } } @@ -949,13 +945,12 @@ impl PohRecorder { // cannot be generated by `record()` assert!(!transactions.is_empty(), "No transactions provided"); - let ((), report_metrics_time) = measure!(self.report_metrics(bank_slot), "report_metrics"); - self.report_metrics_us += report_metrics_time.as_us(); + let ((), report_metrics_us) = measure_us!(self.report_metrics(bank_slot)); + self.report_metrics_us += report_metrics_us; loop { - let (flush_cache_res, flush_cache_time) = - measure!(self.flush_cache(false), "flush_cache"); - self.flush_cache_no_tick_us += flush_cache_time.as_us(); + let (flush_cache_res, flush_cache_us) = measure_us!(self.flush_cache(false)); + self.flush_cache_no_tick_us += flush_cache_us; flush_cache_res?; let working_bank = self @@ -966,30 +961,26 @@ impl PohRecorder { return Err(PohRecorderError::MaxHeightReached); } - let (mut poh_lock, poh_lock_time) = measure!(self.poh.lock().unwrap(), "poh_lock"); - self.record_lock_contention_us += poh_lock_time.as_us(); + let (mut poh_lock, poh_lock_us) = measure_us!(self.poh.lock().unwrap()); + self.record_lock_contention_us += poh_lock_us; - let (record_mixin_res, record_mixin_time) = - measure!(poh_lock.record(mixin), "record_mixin"); - self.record_us += record_mixin_time.as_us(); + let (record_mixin_res, record_mixin_us) = measure_us!(poh_lock.record(mixin)); + self.record_us += record_mixin_us; drop(poh_lock); if let Some(poh_entry) = record_mixin_res { let num_transactions = transactions.len(); - let (send_entry_res, send_entry_time) = measure!( - { - let entry = Entry { - num_hashes: poh_entry.num_hashes, - hash: poh_entry.hash, - transactions, - }; - let bank_clone = working_bank.bank.clone(); - self.sender.send((bank_clone, (entry, self.tick_height))) - }, - "send_poh_entry", - ); - self.send_entry_us += send_entry_time.as_us(); + let (send_entry_res, send_entry_us) = measure_us!({ + let entry = Entry { + num_hashes: poh_entry.num_hashes, + hash: poh_entry.hash, + transactions, + }; + let bank_clone = working_bank.bank.clone(); + self.sender.send((bank_clone, (entry, self.tick_height))) + }); + self.send_entry_us += send_entry_us; send_entry_res?; let starting_transaction_index = working_bank.transaction_index.map(|transaction_index| { @@ -1874,17 +1865,38 @@ mod tests { fn test_reached_leader_tick() { solana_logger::setup(); - let ledger_path = get_tmp_ledger_path_auto_delete!(); - let blockstore = Blockstore::open(ledger_path.path()) - .expect("Expected to be able to open database ledger"); + // Setup genesis. let GenesisConfigInfo { genesis_config, validator_pubkey, .. } = create_genesis_config(2); + + // Setup start bank. let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let prev_hash = bank.last_blockhash(); - let leader_schedule_cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank)); + + // Setup leader schedule. + let leader_a_pubkey = validator_pubkey; + let leader_b_pubkey = Pubkey::new_unique(); + let leader_c_pubkey = Pubkey::new_unique(); + let consecutive_leader_slots = NUM_CONSECUTIVE_LEADER_SLOTS as usize; + let mut slot_leaders = Vec::with_capacity(consecutive_leader_slots * 3); + slot_leaders.extend(std::iter::repeat(leader_a_pubkey).take(consecutive_leader_slots)); + slot_leaders.extend(std::iter::repeat(leader_b_pubkey).take(consecutive_leader_slots)); + slot_leaders.extend(std::iter::repeat(leader_c_pubkey).take(consecutive_leader_slots)); + let mut leader_schedule_cache = LeaderScheduleCache::new_from_bank(&bank); + let fixed_schedule = solana_ledger::leader_schedule::FixedSchedule { + leader_schedule: Arc::new( + solana_ledger::leader_schedule::LeaderSchedule::new_from_schedule(slot_leaders), + ), + }; + leader_schedule_cache.set_fixed_leader_schedule(Some(fixed_schedule)); + + // Setup PoH recorder. + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()) + .expect("Expected to be able to open database ledger"); let (mut poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( 0, prev_hash, @@ -1892,48 +1904,82 @@ mod tests { None, bank.ticks_per_slot(), Arc::new(blockstore), - &leader_schedule_cache, + &Arc::new(leader_schedule_cache), &PohConfig::default(), Arc::new(AtomicBool::default()), ); + let grace_ticks = bank.ticks_per_slot() * MAX_GRACE_SLOTS; + poh_recorder.grace_ticks = grace_ticks; - assert!(poh_recorder.reached_leader_tick(&validator_pubkey, 0)); + // Setup leader start ticks. + let ticks_in_leader_slot_set = bank.ticks_per_slot() * NUM_CONSECUTIVE_LEADER_SLOTS; + let leader_a_start_tick = 0; + let leader_b_start_tick = leader_a_start_tick + ticks_in_leader_slot_set; + let leader_c_start_tick = leader_b_start_tick + ticks_in_leader_slot_set; - let grace_ticks = bank.ticks_per_slot() * MAX_GRACE_SLOTS; - let new_tick_height = NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot(); - for _ in 0..new_tick_height { + // True, because we've ticked through all the grace ticks + assert!(poh_recorder.reached_leader_tick(&leader_a_pubkey, leader_a_start_tick)); + + // True, because from Leader A's perspective, the previous slot was also + // it's own slot, and validators don't give grace periods if previous + // slot was also their own. + assert!( + poh_recorder.reached_leader_tick(&leader_a_pubkey, leader_a_start_tick + grace_ticks) + ); + + // False, because we haven't ticked to our slot yet. + assert!(!poh_recorder.reached_leader_tick(&leader_b_pubkey, leader_b_start_tick)); + + // Tick through Leader A's slots. + for _ in 0..ticks_in_leader_slot_set { poh_recorder.tick(); } - poh_recorder.grace_ticks = grace_ticks; + // False, because the Poh was reset on slot 0, which is a block produced + // by previous leader A, so a grace period must be given. + assert!( + !poh_recorder.reached_leader_tick(&leader_b_pubkey, leader_b_start_tick + grace_ticks) + ); - // False, because the Poh was reset on slot 0, which - // is a block produced by the previous leader, so a grace - // period must be given - let test_validator_pubkey = Pubkey::new_unique(); - assert!(!poh_recorder - .reached_leader_tick(&test_validator_pubkey, new_tick_height + grace_ticks)); + // Tick through Leader B's grace period. + for _ in 0..grace_ticks { + poh_recorder.tick(); + } + + // True, because we've ticked through all the grace ticks + assert!( + poh_recorder.reached_leader_tick(&leader_b_pubkey, leader_b_start_tick + grace_ticks) + ); - // Tick `NUM_CONSECUTIVE_LEADER_SLOTS` more times - let new_tick_height = 2 * NUM_CONSECUTIVE_LEADER_SLOTS * bank.ticks_per_slot(); - for _ in 0..new_tick_height { + // Tick through Leader B's remaining slots. + for _ in 0..ticks_in_leader_slot_set - grace_ticks { poh_recorder.tick(); } - // True, because - // 1) the Poh was reset on slot 0 - // 2) Our slot starts at 2 * NUM_CONSECUTIVE_LEADER_SLOTS, which means - // none of the previous leader's `NUM_CONSECUTIVE_LEADER_SLOTS` were slots - // this Poh built on (previous leader was on different fork). Thus, skip the - // grace period. + + // True, because Leader C is not building on any of Leader B's slots. + // The Poh was reset on slot 0, built by Leader A. + assert!( + poh_recorder.reached_leader_tick(&leader_c_pubkey, leader_c_start_tick + grace_ticks) + ); + + // Add some active (partially received) blocks to the active fork. + let active_descendants = vec![NUM_CONSECUTIVE_LEADER_SLOTS]; + poh_recorder.update_start_bank_active_descendants(&active_descendants); + + // True, because there are pending blocks from Leader B on the active + // fork, but the config to delay for these is not set. assert!( - poh_recorder.reached_leader_tick(&test_validator_pubkey, new_tick_height + grace_ticks) + poh_recorder.reached_leader_tick(&leader_c_pubkey, leader_c_start_tick + grace_ticks) ); - // From the bootstrap validator's perspective, it should have reached - // the tick because the previous slot was also it's own slot (all slots - // belong to the bootstrap leader b/c it's the only staked node!), and - // validators don't give grace periods if previous slot was also their own. - assert!(poh_recorder.reached_leader_tick(&validator_pubkey, new_tick_height + grace_ticks)); + // Flip the config to delay for pending blocks. + poh_recorder.delay_leader_block_for_pending_fork = true; + + // False, because there are pending blocks from Leader B on the active + // fork, and the config to delay for these is set. + assert!( + !poh_recorder.reached_leader_tick(&leader_c_pubkey, leader_c_start_tick + grace_ticks) + ); } #[test] @@ -2040,7 +2086,7 @@ mod tests { ); // Check that if prev slot was mine, grace ticks are ignored assert_eq!( - poh_recorder.reached_leader_slot(&validator_pubkey), + poh_recorder.reached_leader_slot(bank1.collector_id()), PohLeaderStatus::Reached { poh_slot: 3, parent_slot: 1 diff --git a/poh/src/poh_service.rs b/poh/src/poh_service.rs index 8cd0d40266b37d..231ec623fd454a 100644 --- a/poh/src/poh_service.rs +++ b/poh/src/poh_service.rs @@ -5,7 +5,7 @@ use { crossbeam_channel::Receiver, log::*, solana_entry::poh::Poh, - solana_measure::{measure, measure::Measure}, + solana_measure::{measure::Measure, measure_us}, solana_sdk::poh_config::PohConfig, std::{ sync::{ @@ -260,21 +260,16 @@ impl PohService { record.mixin, std::mem::take(&mut record.transactions), ); - // what do we do on failure here? Ignore for now. - let (_send_res, send_record_result_time) = - measure!(record.sender.send(res), "send_record_result"); - timing.total_send_record_result_us += send_record_result_time.as_us(); - timing.num_hashes += 1; // note: may have also ticked inside record + let (send_res, send_record_result_us) = measure_us!(record.sender.send(res)); + debug_assert!(send_res.is_ok(), "Record wasn't sent."); - let new_record_result = record_receiver.try_recv(); - match new_record_result { - Ok(new_record) => { - // we already have second request to record, so record again while we still have the mutex - record = new_record; - } - Err(_) => { - break; - } + timing.total_send_record_result_us += send_record_result_us; + timing.num_hashes += 1; // note: may have also ticked inside record + if let Ok(new_record) = record_receiver.try_recv() { + // we already have second request to record, so record again while we still have the mutex + record = new_record; + } else { + break; } } record_time.stop(); @@ -400,7 +395,7 @@ mod tests { fn test_poh_service() { solana_logger::setup(); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(2); - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let prev_hash = bank.last_blockhash(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()) diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index c714945419ca9b..0f9a22f7cd8d4a 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -12,7 +12,6 @@ edition = { workspace = true } [dependencies] base64 = { workspace = true } bincode = { workspace = true } -eager = { workspace = true } enum-iterator = { workspace = true } itertools = { workspace = true } libc = { workspace = true } @@ -25,9 +24,11 @@ serde = { workspace = true } solana-compute-budget = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } +solana-log-collector = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-sdk = { workspace = true } +solana-timings = { workspace = true } solana-type-overrides = { workspace = true } solana-vote = { workspace = true } solana_rbpf = { workspace = true } @@ -56,4 +57,4 @@ frozen-abi = [ "solana-compute-budget/frozen-abi", "solana-sdk/frozen-abi", ] -shuttle-test = ["solana-type-overrides/shuttle-test"] +shuttle-test = ["solana-type-overrides/shuttle-test", "solana_rbpf/shuttle-test"] diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 05404aa51c2c98..5c732463c9c353 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -1,16 +1,14 @@ use { crate::{ - ic_msg, loaded_programs::{ ProgramCacheEntry, ProgramCacheEntryType, ProgramCacheForTxBatch, ProgramRuntimeEnvironments, }, - log_collector::LogCollector, stable_log, sysvar_cache::SysvarCache, - timings::{ExecuteDetailsTimings, ExecuteTimings}, }, solana_compute_budget::compute_budget::ComputeBudget, + solana_log_collector::{ic_msg, LogCollector}, solana_measure::measure::Measure, solana_rbpf::{ ebpf::MM_HEAP_START, @@ -36,6 +34,7 @@ use { IndexOfAccount, InstructionAccount, TransactionAccount, TransactionContext, }, }, + solana_timings::{ExecuteDetailsTimings, ExecuteTimings}, solana_type_overrides::sync::{atomic::Ordering, Arc}, solana_vote::vote_account::VoteAccountsHashMap, std::{ @@ -476,6 +475,7 @@ impl<'a> InvokeContext<'a> { let process_executable_chain_time = Measure::start("process_executable_chain_time"); let builtin_id = { + debug_assert!(instruction_context.get_number_of_program_accounts() <= 1); let borrowed_root_account = instruction_context .try_borrow_program_account(self.transaction_context, 0) .map_err(|_| InstructionError::UnsupportedProgramId)?; @@ -677,6 +677,7 @@ macro_rules! with_mock_invoke_context { ) => { use { solana_compute_budget::compute_budget::ComputeBudget, + solana_log_collector::LogCollector, solana_sdk::{ account::ReadableAccount, feature_set::FeatureSet, hash::Hash, sysvar::rent::Rent, transaction_context::TransactionContext, @@ -685,7 +686,6 @@ macro_rules! with_mock_invoke_context { $crate::{ invoke_context::{EnvironmentConfig, InvokeContext}, loaded_programs::ProgramCacheForTxBatch, - log_collector::LogCollector, sysvar_cache::SysvarCache, }, }; @@ -768,9 +768,11 @@ pub fn mock_process_instruction { /// Statistics counters pub stats: ProgramCacheStats, /// Reference to the block store - pub fork_graph: Option>>, + pub fork_graph: Option>>, /// Coordinates TX batches waiting for others to complete their task during cooperative loading pub loading_task_waiter: Arc, } @@ -825,7 +824,7 @@ impl ProgramCache { } } - pub fn set_fork_graph(&mut self, fork_graph: Arc>) { + pub fn set_fork_graph(&mut self, fork_graph: Weak>) { self.fork_graph = Some(fork_graph); } @@ -948,6 +947,7 @@ impl ProgramCache { error!("Program cache doesn't have fork graph."); return; }; + let fork_graph = fork_graph.upgrade().unwrap(); let Ok(fork_graph) = fork_graph.read() else { error!("Failed to lock fork graph for reading."); return; @@ -1059,7 +1059,8 @@ impl ProgramCache { is_first_round: bool, ) -> Option<(Pubkey, u64)> { debug_assert!(self.fork_graph.is_some()); - let locked_fork_graph = self.fork_graph.as_ref().unwrap().read().unwrap(); + let fork_graph = self.fork_graph.as_ref().unwrap().upgrade().unwrap(); + let locked_fork_graph = fork_graph.read().unwrap(); let mut cooperative_loading_task = None; match &self.index { IndexImplementation::V1 { @@ -1166,6 +1167,8 @@ impl ProgramCache { self.fork_graph .as_ref() .unwrap() + .upgrade() + .unwrap() .read() .unwrap() .relationship(loaded_program.deployment_slot, slot), @@ -2026,7 +2029,7 @@ mod tests { relation: BlockRelation::Unrelated, })); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); cache.prune(0, 0); assert!(cache.get_flattened_entries_for_tests().is_empty()); @@ -2039,7 +2042,7 @@ mod tests { relation: BlockRelation::Ancestor, })); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); cache.prune(0, 0); assert!(cache.get_flattened_entries_for_tests().is_empty()); @@ -2052,7 +2055,7 @@ mod tests { relation: BlockRelation::Descendant, })); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); cache.prune(0, 0); assert!(cache.get_flattened_entries_for_tests().is_empty()); @@ -2064,7 +2067,7 @@ mod tests { let fork_graph = Arc::new(RwLock::new(TestForkGraph { relation: BlockRelation::Unknown, })); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); cache.prune(0, 0); assert!(cache.get_flattened_entries_for_tests().is_empty()); @@ -2081,7 +2084,7 @@ mod tests { relation: BlockRelation::Ancestor, })); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); cache.assign_program(program1, new_test_entry(10, 10)); @@ -2171,7 +2174,8 @@ mod tests { loading_slot: Slot, keys: &[Pubkey], ) -> Vec<(Pubkey, (ProgramCacheMatchCriteria, u64))> { - let locked_fork_graph = cache.fork_graph.as_ref().unwrap().read().unwrap(); + let fork_graph = cache.fork_graph.as_ref().unwrap().upgrade().unwrap(); + let locked_fork_graph = fork_graph.read().unwrap(); let entries = cache.get_flattened_entries_for_tests(); keys.iter() .filter_map(|key| { @@ -2239,7 +2243,7 @@ mod tests { fork_graph.insert_fork(&[0, 5, 11, 25, 27]); let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); cache.assign_program(program1, new_test_entry(0, 1)); @@ -2434,7 +2438,7 @@ mod tests { fork_graph.insert_fork(&[0, 5, 11, 25, 27]); let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); cache.assign_program(program1, new_test_entry(0, 1)); @@ -2491,7 +2495,7 @@ mod tests { fork_graph.insert_fork(&[0, 5, 11, 25, 27]); let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); cache.assign_program(program1, new_test_entry(0, 1)); @@ -2547,7 +2551,7 @@ mod tests { let mut cache = new_mock_cache::(); let fork_graph = TestForkGraphSpecific::default(); let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); let mut missing = vec![(program1, (ProgramCacheMatchCriteria::NoCriteria, 1))]; @@ -2619,7 +2623,7 @@ mod tests { fork_graph.insert_fork(&[0, 10, 20]); fork_graph.insert_fork(&[0, 5]); let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); cache.assign_program(program1, new_test_entry(0, 1)); @@ -2659,7 +2663,7 @@ mod tests { fork_graph.insert_fork(&[0, 10, 20]); fork_graph.insert_fork(&[0, 5, 6]); let fork_graph = Arc::new(RwLock::new(fork_graph)); - cache.set_fork_graph(fork_graph); + cache.set_fork_graph(Arc::downgrade(&fork_graph)); let program1 = Pubkey::new_unique(); cache.assign_program(program1, new_test_entry(0, 1)); diff --git a/program-runtime/src/mem_pool.rs b/program-runtime/src/mem_pool.rs index 398de5bfa64c57..a92ec1603ddb27 100644 --- a/program-runtime/src/mem_pool.rs +++ b/program-runtime/src/mem_pool.rs @@ -1,7 +1,7 @@ use { solana_compute_budget::{ compute_budget::{MAX_CALL_DEPTH, MAX_INSTRUCTION_STACK_DEPTH, STACK_FRAME_SIZE}, - compute_budget_processor::{MAX_HEAP_FRAME_BYTES, MIN_HEAP_FRAME_BYTES}, + compute_budget_limits::{MAX_HEAP_FRAME_BYTES, MIN_HEAP_FRAME_BYTES}, }, solana_rbpf::{aligned_memory::AlignedMemory, ebpf::HOST_ALIGN}, std::array, diff --git a/program-runtime/src/stable_log.rs b/program-runtime/src/stable_log.rs index 748c4d7639214a..134c7f4f883f59 100644 --- a/program-runtime/src/stable_log.rs +++ b/program-runtime/src/stable_log.rs @@ -3,9 +3,9 @@ //! The format of these log messages should not be modified to avoid breaking downstream consumers //! of program logging use { - crate::{ic_logger_msg, log_collector::LogCollector}, base64::{prelude::BASE64_STANDARD, Engine}, itertools::Itertools, + solana_log_collector::{ic_logger_msg, LogCollector}, solana_sdk::pubkey::Pubkey, std::{cell::RefCell, rc::Rc}, }; diff --git a/program-runtime/src/sysvar_cache.rs b/program-runtime/src/sysvar_cache.rs index 1a270484410531..79124bd93f379e 100644 --- a/program-runtime/src/sysvar_cache.rs +++ b/program-runtime/src/sysvar_cache.rs @@ -7,7 +7,7 @@ use { instruction::InstructionError, pubkey::Pubkey, sysvar::{ - clock::Clock, epoch_rewards::EpochRewards, epoch_schedule::EpochSchedule, + self, clock::Clock, epoch_rewards::EpochRewards, epoch_schedule::EpochSchedule, last_restart_slot::LastRestartSlot, rent::Rent, slot_hashes::SlotHashes, stake_history::StakeHistory, Sysvar, SysvarId, }, @@ -48,7 +48,60 @@ pub struct SysvarCache { recent_blockhashes: Option, } +// declare_deprecated_sysvar_id doesn't support const. +// These sysvars are going away anyway. +const FEES_ID: Pubkey = solana_sdk::pubkey!("SysvarFees111111111111111111111111111111111"); +const RECENT_BLOCKHASHES_ID: Pubkey = + solana_sdk::pubkey!("SysvarRecentB1ockHashes11111111111111111111"); + impl SysvarCache { + /// Overwrite a sysvar. For testing purposes only. + #[allow(deprecated)] + pub fn set_sysvar_for_tests(&mut self, sysvar: &T) { + let data = bincode::serialize(sysvar).expect("Failed to serialize sysvar."); + let sysvar_id = T::id(); + match sysvar_id { + sysvar::clock::ID => { + self.clock = Some(data); + } + sysvar::epoch_rewards::ID => { + self.epoch_rewards = Some(data); + } + sysvar::epoch_schedule::ID => { + self.epoch_schedule = Some(data); + } + FEES_ID => { + let fees: Fees = + bincode::deserialize(&data).expect("Failed to deserialize Fees sysvar."); + self.fees = Some(fees); + } + sysvar::last_restart_slot::ID => { + self.last_restart_slot = Some(data); + } + RECENT_BLOCKHASHES_ID => { + let recent_blockhashes: RecentBlockhashes = bincode::deserialize(&data) + .expect("Failed to deserialize RecentBlockhashes sysvar."); + self.recent_blockhashes = Some(recent_blockhashes); + } + sysvar::rent::ID => { + self.rent = Some(data); + } + sysvar::slot_hashes::ID => { + let slot_hashes: SlotHashes = + bincode::deserialize(&data).expect("Failed to deserialize SlotHashes sysvar."); + self.slot_hashes = Some(data); + self.slot_hashes_obj = Some(Arc::new(slot_hashes)); + } + sysvar::stake_history::ID => { + let stake_history: StakeHistory = bincode::deserialize(&data) + .expect("Failed to deserialize StakeHistory sysvar."); + self.stake_history = Some(data); + self.stake_history_obj = Some(Arc::new(stake_history)); + } + _ => panic!("Unrecognized Sysvar ID: {sysvar_id}"), + } + } + // this is exposed for SyscallGetSysvar and should not otherwise be used pub fn sysvar_id_to_buffer(&self, sysvar_id: &Pubkey) -> &Option> { if Clock::check_id(sysvar_id) { diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 8c4acfa970edb4..b31e0330a34f92 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -24,11 +24,13 @@ solana-banks-server = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-compute-budget = { workspace = true } solana-inline-spl = { workspace = true } +solana-log-collector = { workspace = true } solana-logger = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-svm = { workspace = true } +solana-timings = { workspace = true } solana-vote-program = { workspace = true } solana_rbpf = { workspace = true } thiserror = { workspace = true } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index f3c06d6b0fe94b..be8e763ef60f62 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -16,9 +16,9 @@ use { solana_banks_server::banks_server::start_local_server, solana_bpf_loader_program::serialization::serialize_parameters, solana_compute_budget::compute_budget::ComputeBudget, + solana_log_collector::ic_msg, solana_program_runtime::{ - ic_msg, invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, - stable_log, timings::ExecuteTimings, + invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, stable_log, }, solana_runtime::{ accounts_background_service::{AbsRequestSender, SnapshotRequestKind}, @@ -47,6 +47,7 @@ use { stable_layout::stable_instruction::StableInstruction, sysvar::{Sysvar, SysvarId}, }, + solana_timings::ExecuteTimings, solana_vote_program::vote_state::{self, VoteState, VoteStateVersions}, std::{ cell::RefCell, @@ -545,13 +546,6 @@ impl ProgramTest { self.transaction_account_lock_limit = Some(transaction_account_lock_limit); } - /// Override the SBF compute budget - #[allow(deprecated)] - #[deprecated(since = "1.8.0", note = "please use `set_compute_max_units` instead")] - pub fn set_bpf_compute_max_units(&mut self, bpf_compute_max_units: u64) { - self.set_compute_max_units(bpf_compute_max_units); - } - /// Add an account to the test environment's genesis config. pub fn add_genesis_account(&mut self, address: Pubkey, account: Account) { self.genesis_accounts diff --git a/program-test/tests/stake.rs b/program-test/tests/stake.rs deleted file mode 100644 index 1ad7a756b32631..00000000000000 --- a/program-test/tests/stake.rs +++ /dev/null @@ -1,193 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] - -mod setup; - -use { - setup::{setup_stake, setup_vote}, - solana_program_test::ProgramTest, - solana_sdk::{ - instruction::InstructionError, - signature::{Keypair, Signer}, - stake::{instruction as stake_instruction, instruction::StakeError}, - transaction::{Transaction, TransactionError}, - }, - test_case::test_case, -}; - -#[derive(PartialEq)] -enum PendingStakeActivationTestFlag { - MergeActive, - MergeInactive, - NoMerge, -} - -#[test_case(PendingStakeActivationTestFlag::NoMerge; "test that redelegate stake then deactivate it then withdraw from it is not permitted")] -#[test_case(PendingStakeActivationTestFlag::MergeActive; "test that redelegate stake then merge it with another active stake then deactivate it then withdraw from it is not permitted")] -#[test_case(PendingStakeActivationTestFlag::MergeInactive; "test that redelegate stake then merge it with another inactive stake then deactivate it then withdraw from it is not permitted")] -#[tokio::test] -async fn test_stake_redelegation_pending_activation(merge_flag: PendingStakeActivationTestFlag) { - let program_test = ProgramTest::default(); - let mut context = program_test.start_with_context().await; - - // 1. create first vote accounts - context.warp_to_slot(100).unwrap(); - let vote_address = setup_vote(&mut context).await; - - // 1.1 advance to normal epoch - let first_normal_slot = context.genesis_config().epoch_schedule.first_normal_slot; - let slots_per_epoch = context.genesis_config().epoch_schedule.slots_per_epoch; - let mut current_slot = first_normal_slot + slots_per_epoch; - context.warp_to_slot(current_slot).unwrap(); - context.warp_forward_force_reward_interval_end().unwrap(); - - // 2. create first stake account and delegate to first vote_address - let stake_lamports = 50_000_000_000; - let user_keypair = Keypair::new(); - let stake_address = - setup_stake(&mut context, &user_keypair, &vote_address, stake_lamports).await; - - // 2.1 advance to new epoch so that the stake is activated. - current_slot += slots_per_epoch; - context.warp_to_slot(current_slot).unwrap(); - context.warp_forward_force_reward_interval_end().unwrap(); - - // 2.2 stake is now activated and can't withdrawal directly - let transaction = Transaction::new_signed_with_payer( - &[stake_instruction::withdraw( - &stake_address, - &user_keypair.pubkey(), - &solana_sdk::pubkey::new_rand(), - 1, - None, - )], - Some(&context.payer.pubkey()), - &vec![&context.payer, &user_keypair], - context.last_blockhash, - ); - let r = context.banks_client.process_transaction(transaction).await; - assert_eq!( - r.unwrap_err().unwrap(), - TransactionError::InstructionError(0, InstructionError::InsufficientFunds) - ); - - // 3. create 2nd vote account - let vote_address2 = setup_vote(&mut context).await; - - // 3.1 relegate stake account to 2nd vote account, which creates 2nd stake account - let stake_keypair2 = Keypair::new(); - let stake_address2 = stake_keypair2.pubkey(); - let transaction = Transaction::new_signed_with_payer( - &stake_instruction::redelegate( - &stake_address, - &user_keypair.pubkey(), - &vote_address2, - &stake_address2, - ), - Some(&context.payer.pubkey()), - &vec![&context.payer, &user_keypair, &stake_keypair2], - context.last_blockhash, - ); - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); - - if merge_flag != PendingStakeActivationTestFlag::NoMerge { - // 3.2 create 3rd to-merge stake account - let stake_address3 = - setup_stake(&mut context, &user_keypair, &vote_address2, stake_lamports).await; - - // 3.2.1 deactivate merge stake account - if merge_flag == PendingStakeActivationTestFlag::MergeInactive { - let transaction = Transaction::new_signed_with_payer( - &[stake_instruction::deactivate_stake( - &stake_address3, - &user_keypair.pubkey(), - )], - Some(&context.payer.pubkey()), - &vec![&context.payer, &user_keypair], - context.last_blockhash, - ); - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); - } - - // 3.2.2 merge 3rd stake account to 2nd stake account. However, it should not clear the pending stake activation flags on stake_account2. - let transaction = Transaction::new_signed_with_payer( - &stake_instruction::merge(&stake_address2, &stake_address3, &user_keypair.pubkey()), - Some(&context.payer.pubkey()), - &vec![&context.payer, &user_keypair], - context.last_blockhash, - ); - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); - } - - // 3.3 deactivate 2nd stake account should fail because of pending stake activation. - let transaction = Transaction::new_signed_with_payer( - &[stake_instruction::deactivate_stake( - &stake_address2, - &user_keypair.pubkey(), - )], - Some(&context.payer.pubkey()), - &vec![&context.payer, &user_keypair], - context.last_blockhash, - ); - let r = context.banks_client.process_transaction(transaction).await; - assert_eq!( - r.unwrap_err().unwrap(), - TransactionError::InstructionError( - 0, - InstructionError::Custom( - StakeError::RedelegatedStakeMustFullyActivateBeforeDeactivationIsPermitted as u32 - ) - ) - ); - - // 3.4 withdraw from 2nd stake account should also fail because of pending stake activation. - let transaction = Transaction::new_signed_with_payer( - &[stake_instruction::withdraw( - &stake_address2, - &user_keypair.pubkey(), - &solana_sdk::pubkey::new_rand(), - 1, - None, - )], - Some(&context.payer.pubkey()), - &vec![&context.payer, &user_keypair], - context.last_blockhash, - ); - let r = context.banks_client.process_transaction(transaction).await; - assert_eq!( - r.unwrap_err().unwrap(), - TransactionError::InstructionError(0, InstructionError::InsufficientFunds) - ); - - // 4. advance to new epoch so that the 2nd stake account is fully activated - current_slot += slots_per_epoch; - context.warp_to_slot(current_slot).unwrap(); - context.warp_forward_force_reward_interval_end().unwrap(); - - // 4.1 Now deactivate 2nd stake account should succeed because there is no pending stake activation. - let transaction = Transaction::new_signed_with_payer( - &[stake_instruction::deactivate_stake( - &stake_address2, - &user_keypair.pubkey(), - )], - Some(&context.payer.pubkey()), - &vec![&context.payer, &user_keypair], - context.last_blockhash, - ); - context - .banks_client - .process_transaction(transaction) - .await - .unwrap(); -} diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml index f11b127b541898..f721fcef11430d 100644 --- a/programs/address-lookup-table/Cargo.toml +++ b/programs/address-lookup-table/Cargo.toml @@ -19,6 +19,7 @@ solana-program = { workspace = true } thiserror = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dependencies] +solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs index 737c35e4c4b2f4..e146dd184b5385 100644 --- a/programs/address-lookup-table/src/lib.rs +++ b/programs/address-lookup-table/src/lib.rs @@ -3,13 +3,3 @@ #[cfg(not(target_os = "solana"))] pub mod processor; - -#[deprecated( - since = "1.17.0", - note = "Please use `solana_program::address_lookup_table` instead" -)] -pub use solana_program::address_lookup_table::{ - error, instruction, - program::{check_id, id, ID}, - state, -}; diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs index 7becdbd047e7c9..6274606228b4bd 100644 --- a/programs/address-lookup-table/src/processor.rs +++ b/programs/address-lookup-table/src/processor.rs @@ -1,5 +1,6 @@ use { - solana_program_runtime::{declare_process_instruction, ic_msg, invoke_context::InvokeContext}, + solana_log_collector::ic_msg, + solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, solana_sdk::{ address_lookup_table::{ instruction::ProgramInstruction, diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index 148c0c92333995..ace6f2ed9c0b83 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -15,12 +15,16 @@ byteorder = { workspace = true } libsecp256k1 = { workspace = true } log = { workspace = true } scopeguard = { workspace = true } +solana-bn254 = { workspace = true } solana-compute-budget = { workspace = true } solana-curve25519 = { workspace = true } +solana-log-collector = { workspace = true } solana-measure = { workspace = true } solana-poseidon = { workspace = true } +solana-program-memory = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-timings = { workspace = true } solana-type-overrides = { workspace = true } solana_rbpf = { workspace = true } thiserror = { workspace = true } @@ -41,4 +45,8 @@ name = "solana_bpf_loader_program" targets = ["x86_64-unknown-linux-gnu"] [features] -shuttle-test = ["solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test"] +shuttle-test = [ + "solana-type-overrides/shuttle-test", + "solana-program-runtime/shuttle-test", + "solana_rbpf/shuttle-test" +] diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 2c2b16245c88ce..58ef907746f5ac 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -6,15 +6,14 @@ pub mod syscalls; use { solana_compute_budget::compute_budget::MAX_INSTRUCTION_STACK_DEPTH, + solana_log_collector::{ic_logger_msg, ic_msg, LogCollector}, solana_measure::measure::Measure, solana_program_runtime::{ - ic_logger_msg, ic_msg, invoke_context::{BpfAllocator, InvokeContext, SerializedAccountMetadata, SyscallContext}, loaded_programs::{ LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, DELAY_VISIBILITY_SLOT_OFFSET, }, - log_collector::LogCollector, mem_pool::VmMemoryPool, stable_log, sysvar_cache::get_sysvar_with_account_check, @@ -3578,7 +3577,7 @@ mod tests { program_account = accounts.get(3).unwrap().clone(); process_instruction( &loader_id, - &[0, 1], + &[1], &[], vec![ (programdata_address, programdata_account.clone()), diff --git a/programs/bpf_loader/src/syscalls/mod.rs b/programs/bpf_loader/src/syscalls/mod.rs index 304359a67cf895..e70a266f340917 100644 --- a/programs/bpf_loader/src/syscalls/mod.rs +++ b/programs/bpf_loader/src/syscalls/mod.rs @@ -12,11 +12,16 @@ pub use self::{ }; #[allow(deprecated)] use { + solana_bn254::prelude::{ + alt_bn128_addition, alt_bn128_multiplication, alt_bn128_pairing, AltBn128Error, + ALT_BN128_ADDITION_OUTPUT_LEN, ALT_BN128_MULTIPLICATION_OUTPUT_LEN, + ALT_BN128_PAIRING_ELEMENT_LEN, ALT_BN128_PAIRING_OUTPUT_LEN, + }, solana_compute_budget::compute_budget::ComputeBudget, + solana_log_collector::{ic_logger_msg, ic_msg}, solana_poseidon as poseidon, - solana_program_runtime::{ - ic_logger_msg, ic_msg, invoke_context::InvokeContext, stable_log, timings::ExecuteTimings, - }, + solana_program_memory::is_nonoverlapping, + solana_program_runtime::{invoke_context::InvokeContext, stable_log}, solana_rbpf::{ declare_builtin_function, memory_region::{AccessType, MemoryMapping}, @@ -25,11 +30,6 @@ use { }, solana_sdk::{ account_info::AccountInfo, - alt_bn128::prelude::{ - alt_bn128_addition, alt_bn128_multiplication, alt_bn128_pairing, AltBn128Error, - ALT_BN128_ADDITION_OUTPUT_LEN, ALT_BN128_MULTIPLICATION_OUTPUT_LEN, - ALT_BN128_PAIRING_ELEMENT_LEN, ALT_BN128_PAIRING_OUTPUT_LEN, - }, big_mod_exp::{big_mod_exp, BigModExpParams}, blake3, bpf_loader, bpf_loader_deprecated, bpf_loader_upgradeable, entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS}, @@ -43,14 +43,12 @@ use { enable_partitioned_epoch_reward, enable_poseidon_syscall, error_on_syscall_bpf_function_hash_collisions, get_sysvar_syscall_enabled, last_restart_slot_sysvar, reject_callx_r10, remaining_compute_units_syscall_enabled, - switch_to_new_elf_parser, }, hash::{Hash, Hasher}, instruction::{AccountMeta, InstructionError, ProcessedSiblingInstruction}, keccak, native_loader, precompiles::is_precompile, program::MAX_RETURN_DATA, - program_stubs::is_nonoverlapping, pubkey::{Pubkey, PubkeyError, MAX_SEEDS, MAX_SEED_LEN, PUBKEY_BYTES}, secp256k1_recover::{ Secp256k1RecoverError, SECP256K1_PUBLIC_KEY_LENGTH, SECP256K1_SIGNATURE_LENGTH, @@ -58,6 +56,7 @@ use { sysvar::{Sysvar, SysvarId}, transaction_context::{IndexOfAccount, InstructionAccount}, }, + solana_timings::ExecuteTimings, solana_type_overrides::sync::Arc, std::{ alloc::Layout, @@ -307,7 +306,6 @@ pub fn create_program_runtime_environment_v1<'a>( enable_sbpf_v1: true, enable_sbpf_v2: false, optimize_rodata: false, - new_elf_parser: feature_set.is_active(&switch_to_new_elf_parser::id()), aligned_memory_mapping: !feature_set.is_active(&bpf_account_data_direct_mapping::id()), // Warning, do not use `Config::default()` so that configuration here is explicit. }; @@ -1574,7 +1572,7 @@ declare_builtin_function!( _arg5: u64, memory_mapping: &mut MemoryMapping, ) -> Result { - use solana_sdk::alt_bn128::prelude::{ALT_BN128_ADD, ALT_BN128_MUL, ALT_BN128_PAIRING}; + use solana_bn254::prelude::{ALT_BN128_ADD, ALT_BN128_MUL, ALT_BN128_PAIRING}; let budget = invoke_context.get_compute_budget(); let (cost, output): (u64, usize) = match group_op { ALT_BN128_ADD => ( @@ -1841,7 +1839,7 @@ declare_builtin_function!( _arg5: u64, memory_mapping: &mut MemoryMapping, ) -> Result { - use solana_sdk::alt_bn128::compression::prelude::{ + use solana_bn254::compression::prelude::{ alt_bn128_g1_compress, alt_bn128_g1_decompress, alt_bn128_g2_compress, alt_bn128_g2_decompress, ALT_BN128_G1_COMPRESS, ALT_BN128_G1_DECOMPRESS, ALT_BN128_G2_COMPRESS, ALT_BN128_G2_DECOMPRESS, G1, G1_COMPRESSED, G2, G2_COMPRESSED, diff --git a/programs/config/Cargo.toml b/programs/config/Cargo.toml index d81d84e8c4505e..48bd3157c6e0a6 100644 --- a/programs/config/Cargo.toml +++ b/programs/config/Cargo.toml @@ -14,8 +14,10 @@ bincode = { workspace = true } chrono = { workspace = true, features = ["default", "serde"] } serde = { workspace = true } serde_derive = { workspace = true } +solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-short-vec = { workspace = true } [dev-dependencies] solana-logger = { workspace = true } diff --git a/programs/config/src/config_processor.rs b/programs/config/src/config_processor.rs index fd4b806567180d..fdbe830f0e3e16 100644 --- a/programs/config/src/config_processor.rs +++ b/programs/config/src/config_processor.rs @@ -3,7 +3,8 @@ use { crate::ConfigKeys, bincode::deserialize, - solana_program_runtime::{declare_process_instruction, ic_msg}, + solana_log_collector::ic_msg, + solana_program_runtime::declare_process_instruction, solana_sdk::{ instruction::InstructionError, program_utils::limited_deserialize, pubkey::Pubkey, transaction_context::IndexOfAccount, diff --git a/programs/config/src/lib.rs b/programs/config/src/lib.rs index 731682b9e8d198..3aabed96ef32c9 100644 --- a/programs/config/src/lib.rs +++ b/programs/config/src/lib.rs @@ -16,8 +16,8 @@ use { solana_sdk::{ account::{Account, AccountSharedData}, pubkey::Pubkey, - short_vec, }, + solana_short_vec as short_vec, }; pub trait ConfigState: serde::Serialize + Default { diff --git a/programs/loader-v4/Cargo.toml b/programs/loader-v4/Cargo.toml index 79c078f29785b5..952b3bf1cfe511 100644 --- a/programs/loader-v4/Cargo.toml +++ b/programs/loader-v4/Cargo.toml @@ -11,6 +11,7 @@ edition = { workspace = true } [dependencies] log = { workspace = true } solana-compute-budget = { workspace = true } +solana-log-collector = { workspace = true } solana-measure = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } @@ -28,4 +29,4 @@ name = "solana_loader_v4_program" targets = ["x86_64-unknown-linux-gnu"] [features] -shuttle-test = ["solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test"] +shuttle-test = ["solana-type-overrides/shuttle-test", "solana-program-runtime/shuttle-test", "solana_rbpf/shuttle-test"] diff --git a/programs/loader-v4/src/lib.rs b/programs/loader-v4/src/lib.rs index 161f72a478cc07..f44031f6f15021 100644 --- a/programs/loader-v4/src/lib.rs +++ b/programs/loader-v4/src/lib.rs @@ -1,14 +1,13 @@ use { solana_compute_budget::compute_budget::ComputeBudget, + solana_log_collector::{ic_logger_msg, LogCollector}, solana_measure::measure::Measure, solana_program_runtime::{ - ic_logger_msg, invoke_context::InvokeContext, loaded_programs::{ LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryType, DELAY_VISIBILITY_SLOT_OFFSET, }, - log_collector::LogCollector, stable_log, }, solana_rbpf::{ @@ -85,7 +84,6 @@ pub fn create_program_runtime_environment_v2<'a>( enable_sbpf_v1: false, enable_sbpf_v2: true, optimize_rodata: true, - new_elf_parser: true, aligned_memory_mapping: true, // Warning, do not use `Config::default()` so that configuration here is explicit. }; @@ -1591,7 +1589,7 @@ mod tests { &[0, 1, 2, 3], transaction_accounts.clone(), &[(1, false, true)], - Err(InstructionError::InvalidAccountOwner), + Err(InstructionError::UnsupportedProgramId), ); // Error: Program is uninitialized diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index 2851a373c75521..16a475c9d77ac0 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -79,7 +79,7 @@ version = "2.1.0" dependencies = [ "agave-geyser-plugin-interface", "chrono", - "clap 2.33.3", + "clap", "console", "core_affinity", "crossbeam-channel", @@ -376,9 +376,9 @@ checksum = "9ad284aeb45c13f2fb4f084de4a420ebf447423bdf9386c0540ce33cb3ef4b8c" [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" [[package]] name = "arrayvec" @@ -495,9 +495,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", @@ -847,9 +847,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.16.1" +version = "1.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" +checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" dependencies = [ "bytemuck_derive", ] @@ -879,9 +879,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "bzip2" @@ -1012,36 +1012,11 @@ dependencies = [ "atty", "bitflags 1.3.2", "strsim 0.8.0", - "textwrap 0.11.0", + "textwrap", "unicode-width", "vec_map", ] -[[package]] -name = "clap" -version = "3.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123" -dependencies = [ - "atty", - "bitflags 1.3.2", - "clap_lex", - "indexmap 1.9.3", - "once_cell", - "strsim 0.10.0", - "termcolor", - "textwrap 0.16.1", -] - -[[package]] -name = "clap_lex" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] - [[package]] name = "combine" version = "3.8.1" @@ -1928,17 +1903,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "goblin" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c955ab4e0ad8c843ea653a3d143048b87490d9be56bd7132a435c2407846ac8f" -dependencies = [ - "log", - "plain", - "scroll", -] - [[package]] name = "h2" version = "0.3.26" @@ -1951,7 +1915,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.2.6", + "indexmap 2.3.0", "slab", "tokio", "tokio-util 0.7.1", @@ -2118,9 +2082,9 @@ checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -2280,9 +2244,9 @@ dependencies = [ [[package]] name = "index_list" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb725b6505e51229de32027e0cfcd9db29da4d89156f9747b0a5195643fa3e1" +checksum = "4e6ba961c14e98151cd6416dd3685efe786a94c38bc1a535c06ceff0a1600813" [[package]] name = "indexmap" @@ -2296,9 +2260,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -2751,9 +2715,9 @@ dependencies = [ [[package]] name = "lz4" -version = "1.25.0" +version = "1.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6eab492fe7f8651add23237ea56dbf11b3c4ff762ab83d40a47f11433421f91" +checksum = "958b4caa893816eea05507c20cfe47574a43d9a697138a7872990bba8a0ece68" dependencies = [ "libc", "lz4-sys", @@ -2761,9 +2725,9 @@ dependencies = [ [[package]] name = "lz4-sys" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9764018d143cc854c9f17f0b907de70f14393b1f502da6375dce70f00514eb3" +checksum = "109de74d5d2353660401699a4174a4ff23fcc649caf553df71933c7fb45ad868" dependencies = [ "cc", "libc", @@ -3109,18 +3073,18 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", @@ -3166,9 +3130,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.64" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", "cfg-if 1.0.0", @@ -3207,9 +3171,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.102" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", @@ -3237,12 +3201,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "os_str_bytes" -version = "6.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" - [[package]] name = "parity-tokio-ipc" version = "0.9.0" @@ -3452,12 +3410,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05da548ad6865900e60eaba7f589cc0783590a92e940c26953ff81ddbab2d677" -[[package]] -name = "plain" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" - [[package]] name = "polyval" version = "0.6.2" @@ -4167,20 +4119,6 @@ name = "scroll" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" -dependencies = [ - "scroll_derive", -] - -[[package]] -name = "scroll_derive" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdbda6ac5cd1321e724fa9cee216f3a61885889b896f073b8f82322789c5250e" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] [[package]] name = "sct" @@ -4232,9 +4170,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" dependencies = [ "serde_derive", ] @@ -4250,9 +4188,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.204" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" dependencies = [ "proc-macro2", "quote", @@ -4261,11 +4199,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.119" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0" +checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -4310,7 +4249,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "itoa", "ryu", "serde", @@ -4545,6 +4484,7 @@ dependencies = [ name = "solana-accounts-db" version = "2.1.0" dependencies = [ + "ahash 0.8.10", "bincode", "blake3", "bv", @@ -4554,7 +4494,7 @@ dependencies = [ "crossbeam-channel", "dashmap", "index_list", - "indexmap 2.2.6", + "indexmap 2.3.0", "itertools 0.12.1", "lazy_static", "log", @@ -4572,12 +4512,13 @@ dependencies = [ "smallvec", "solana-bucket-map", "solana-inline-spl", + "solana-lattice-hash", "solana-measure", "solana-metrics", "solana-nohash-hasher", "solana-rayon-threadlimit", "solana-sdk", - "solana-svm", + "solana-svm-transaction", "static_assertions", "tar", "tempfile", @@ -4594,12 +4535,20 @@ dependencies = [ "num-derive", "num-traits", "rustc_version", + "solana-log-collector", "solana-program", "solana-program-runtime", "solana-sdk", "thiserror", ] +[[package]] +name = "solana-atomic-u64" +version = "2.1.0" +dependencies = [ + "parking_lot 0.12.2", +] + [[package]] name = "solana-banks-client" version = "2.1.0" @@ -4651,7 +4600,6 @@ dependencies = [ "fnv", "log", "rand 0.8.5", - "rayon", "rustc_version", "serde", "serde_derive", @@ -4659,6 +4607,19 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-bn254" +version = "2.1.0" +dependencies = [ + "ark-bn254", + "ark-ec", + "ark-ff", + "ark-serialize", + "bytemuck", + "solana-program", + "thiserror", +] + [[package]] name = "solana-bpf-loader-program" version = "2.1.0" @@ -4668,12 +4629,16 @@ dependencies = [ "libsecp256k1 0.6.0", "log", "scopeguard", + "solana-bn254", "solana-compute-budget", "solana-curve25519", + "solana-log-collector", "solana-measure", "solana-poseidon", + "solana-program-memory", "solana-program-runtime", "solana-sdk", + "solana-timings", "solana-type-overrides", "solana_rbpf", "thiserror", @@ -4696,12 +4661,31 @@ dependencies = [ "tempfile", ] +[[package]] +name = "solana-builtins-default-costs" +version = "2.1.0" +dependencies = [ + "ahash 0.8.10", + "lazy_static", + "log", + "rustc_version", + "solana-address-lookup-table-program", + "solana-bpf-loader-program", + "solana-compute-budget-program", + "solana-config-program", + "solana-loader-v4-program", + "solana-sdk", + "solana-stake-program", + "solana-system-program", + "solana-vote-program", +] + [[package]] name = "solana-clap-utils" version = "2.1.0" dependencies = [ "chrono", - "clap 2.33.3", + "clap", "rpassword", "solana-remote-wallet", "solana-sdk", @@ -4732,7 +4716,7 @@ dependencies = [ "Inflector", "base64 0.22.1", "chrono", - "clap 2.33.3", + "clap", "console", "humantime", "indicatif", @@ -4759,7 +4743,7 @@ dependencies = [ "dashmap", "futures 0.3.30", "futures-util", - "indexmap 2.2.6", + "indexmap 2.3.0", "indicatif", "log", "quinn", @@ -4805,8 +4789,10 @@ dependencies = [ "chrono", "serde", "serde_derive", + "solana-log-collector", "solana-program-runtime", "solana-sdk", + "solana-short-vec", ] [[package]] @@ -4817,7 +4803,7 @@ dependencies = [ "bincode", "crossbeam-channel", "futures-util", - "indexmap 2.2.6", + "indexmap 2.3.0", "log", "rand 0.8.5", "rayon", @@ -4833,6 +4819,7 @@ name = "solana-core" version = "2.1.0" dependencies = [ "ahash 0.8.10", + "anyhow", "arrayvec", "base64 0.22.1", "bincode", @@ -4864,10 +4851,13 @@ dependencies = [ "serde_derive", "solana-accounts-db", "solana-bloom", + "solana-builtins-default-costs", "solana-client", "solana-compute-budget", + "solana-connection-cache", "solana-cost-model", "solana-entry", + "solana-fee", "solana-geyser-plugin-manager", "solana-gossip", "solana-ledger", @@ -4876,17 +4866,19 @@ dependencies = [ "solana-net-utils", "solana-perf", "solana-poh", - "solana-program-runtime", "solana-quic-client", "solana-rayon-threadlimit", "solana-rpc", "solana-rpc-client-api", "solana-runtime", + "solana-runtime-transaction", "solana-sanitize", "solana-sdk", "solana-send-transaction-service", + "solana-short-vec", "solana-streamer", "solana-svm", + "solana-timings", "solana-tpu-client", "solana-transaction-status", "solana-turbine", @@ -4913,16 +4905,11 @@ dependencies = [ "lazy_static", "log", "rustc_version", - "solana-address-lookup-table-program", - "solana-bpf-loader-program", + "solana-builtins-default-costs", "solana-compute-budget", - "solana-compute-budget-program", - "solana-config-program", - "solana-loader-v4-program", "solana-metrics", + "solana-runtime-transaction", "solana-sdk", - "solana-stake-program", - "solana-system-program", "solana-vote-program", ] @@ -4937,6 +4924,13 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-decode-error" +version = "2.1.0" +dependencies = [ + "num-traits", +] + [[package]] name = "solana-define-syscall" version = "2.1.0" @@ -4979,7 +4973,7 @@ version = "2.1.0" dependencies = [ "bincode", "byteorder 1.5.0", - "clap 2.33.3", + "clap", "crossbeam-channel", "log", "serde", @@ -4995,6 +4989,14 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-fee" +version = "2.1.0" +dependencies = [ + "solana-sdk", + "solana-svm-transaction", +] + [[package]] name = "solana-genesis-utils" version = "2.1.0" @@ -5038,10 +5040,10 @@ dependencies = [ "assert_matches", "bincode", "bv", - "clap 2.33.3", + "clap", "crossbeam-channel", "flate2", - "indexmap 2.2.6", + "indexmap 2.3.0", "itertools 0.12.1", "log", "lru", @@ -5065,9 +5067,11 @@ dependencies = [ "solana-net-utils", "solana-perf", "solana-rayon-threadlimit", + "solana-rpc-client", "solana-runtime", "solana-sanitize", "solana-sdk", + "solana-short-vec", "solana-streamer", "solana-tpu-client", "solana-version", @@ -5082,8 +5086,16 @@ name = "solana-inline-spl" version = "2.1.0" dependencies = [ "bytemuck", - "rustc_version", - "solana-sdk", + "solana-program", +] + +[[package]] +name = "solana-lattice-hash" +version = "2.1.0" +dependencies = [ + "base64 0.22.1", + "blake3", + "bytemuck", ] [[package]] @@ -5111,6 +5123,7 @@ dependencies = [ "num_cpus", "num_enum", "prost", + "qualifier_attr", "rand 0.8.5", "rand_chacha 0.3.1", "rayon", @@ -5137,6 +5150,7 @@ dependencies = [ "solana-storage-bigtable", "solana-storage-proto", "solana-svm", + "solana-timings", "solana-transaction-status", "solana-vote", "solana-vote-program", @@ -5158,6 +5172,7 @@ version = "2.1.0" dependencies = [ "log", "solana-compute-budget", + "solana-log-collector", "solana-measure", "solana-program-runtime", "solana-sdk", @@ -5165,6 +5180,13 @@ dependencies = [ "solana_rbpf", ] +[[package]] +name = "solana-log-collector" +version = "2.1.0" +dependencies = [ + "log", +] + [[package]] name = "solana-logger" version = "2.1.0" @@ -5203,12 +5225,18 @@ dependencies = [ "thiserror", ] +[[package]] +name = "solana-msg" +version = "2.1.0" +dependencies = [ + "solana-define-syscall", +] + [[package]] name = "solana-net-utils" version = "2.1.0" dependencies = [ "bincode", - "clap 3.2.25", "crossbeam-channel", "log", "nix", @@ -5216,10 +5244,7 @@ dependencies = [ "serde", "serde_derive", "socket2 0.5.7", - "solana-logger", "solana-sdk", - "solana-version", - "static_assertions", "tokio", "url 2.5.2", ] @@ -5252,6 +5277,7 @@ dependencies = [ "solana-metrics", "solana-rayon-threadlimit", "solana-sdk", + "solana-short-vec", "solana-vote-program", ] @@ -5285,10 +5311,6 @@ dependencies = [ name = "solana-program" version = "2.1.0" dependencies = [ - "ark-bn254", - "ark-ec", - "ark-ff", - "ark-serialize", "base64 0.22.1", "bincode", "bitflags 2.6.0", @@ -5305,7 +5327,6 @@ dependencies = [ "getrandom 0.2.10", "js-sys", "lazy_static", - "libsecp256k1 0.6.0", "log", "memoffset 0.9.0", "num-bigint 0.4.6", @@ -5319,20 +5340,33 @@ dependencies = [ "serde_derive", "sha2 0.10.8", "sha3 0.10.8", + "solana-atomic-u64", + "solana-decode-error", "solana-define-syscall", + "solana-msg", + "solana-program-memory", "solana-sanitize", "solana-sdk-macro", + "solana-secp256k1-recover", + "solana-short-vec", "thiserror", "wasm-bindgen", ] +[[package]] +name = "solana-program-memory" +version = "2.1.0" +dependencies = [ + "num-traits", + "solana-define-syscall", +] + [[package]] name = "solana-program-runtime" version = "2.1.0" dependencies = [ "base64 0.22.1", "bincode", - "eager", "enum-iterator", "itertools 0.12.1", "libc", @@ -5344,9 +5378,11 @@ dependencies = [ "rustc_version", "serde", "solana-compute-budget", + "solana-log-collector", "solana-measure", "solana-metrics", "solana-sdk", + "solana-timings", "solana-type-overrides", "solana-vote", "solana_rbpf", @@ -5372,11 +5408,13 @@ dependencies = [ "solana-bpf-loader-program", "solana-compute-budget", "solana-inline-spl", + "solana-log-collector", "solana-logger", "solana-program-runtime", "solana-runtime", "solana-sdk", "solana-svm", + "solana-timings", "solana-vote-program", "solana_rbpf", "thiserror", @@ -5563,8 +5601,6 @@ dependencies = [ name = "solana-rpc-client-nonce-utils" version = "2.1.0" dependencies = [ - "clap 2.33.3", - "solana-clap-utils", "solana-rpc-client", "solana-sdk", "thiserror", @@ -5619,6 +5655,7 @@ dependencies = [ "solana-compute-budget-program", "solana-config-program", "solana-cost-model", + "solana-fee", "solana-inline-spl", "solana-loader-v4-program", "solana-measure", @@ -5626,10 +5663,12 @@ dependencies = [ "solana-perf", "solana-program-runtime", "solana-rayon-threadlimit", + "solana-runtime-transaction", "solana-sdk", "solana-stake-program", "solana-svm", "solana-system-program", + "solana-timings", "solana-transaction-status", "solana-version", "solana-vote", @@ -5649,12 +5688,20 @@ dependencies = [ ] [[package]] -name = "solana-sanitize" +name = "solana-runtime-transaction" version = "2.1.0" dependencies = [ + "log", + "rustc_version", + "solana-compute-budget", + "solana-sdk", "thiserror", ] +[[package]] +name = "solana-sanitize" +version = "2.1.0" + [[package]] name = "solana-sbf-programs" version = "2.1.0" @@ -5672,17 +5719,21 @@ dependencies = [ "solana-bpf-loader-program", "solana-cli-output", "solana-compute-budget", + "solana-fee", "solana-ledger", + "solana-log-collector", "solana-logger", "solana-measure", "solana-program", "solana-program-runtime", "solana-runtime", + "solana-runtime-transaction", "solana-sbf-rust-invoke-dep", "solana-sbf-rust-realloc-dep", "solana-sbf-rust-realloc-invoke-dep", "solana-sdk", "solana-svm", + "solana-timings", "solana-transaction-status", "solana-type-overrides", "solana_rbpf", @@ -5715,6 +5766,7 @@ name = "solana-sbf-rust-alt-bn128" version = "2.1.0" dependencies = [ "array-bytes", + "solana-bn254", "solana-program", ] @@ -5723,6 +5775,7 @@ name = "solana-sbf-rust-alt-bn128-compression" version = "2.1.0" dependencies = [ "array-bytes", + "solana-bn254", "solana-program", ] @@ -5795,6 +5848,7 @@ version = "2.1.0" dependencies = [ "num-derive", "num-traits", + "solana-decode-error", "solana-program", "thiserror", ] @@ -6045,6 +6099,7 @@ version = "2.1.0" dependencies = [ "libsecp256k1 0.7.0", "solana-program", + "solana-secp256k1-recover", ] [[package]] @@ -6151,9 +6206,14 @@ dependencies = [ "sha2 0.10.8", "sha3 0.10.8", "siphasher", + "solana-bn254", + "solana-decode-error", "solana-program", + "solana-program-memory", "solana-sanitize", "solana-sdk-macro", + "solana-secp256k1-recover", + "solana-short-vec", "thiserror", "uriparse", "wasm-bindgen", @@ -6169,6 +6229,17 @@ dependencies = [ "syn 2.0.58", ] +[[package]] +name = "solana-secp256k1-recover" +version = "2.1.0" +dependencies = [ + "borsh 1.5.1", + "libsecp256k1 0.6.0", + "rustc_version", + "solana-define-syscall", + "thiserror", +] + [[package]] name = "solana-security-txt" version = "1.1.1" @@ -6182,6 +6253,7 @@ dependencies = [ "crossbeam-channel", "log", "solana-client", + "solana-connection-cache", "solana-measure", "solana-metrics", "solana-runtime", @@ -6189,6 +6261,14 @@ dependencies = [ "solana-tpu-client", ] +[[package]] +name = "solana-short-vec" +version = "2.1.0" +dependencies = [ + "rustc_version", + "serde", +] + [[package]] name = "solana-stake-program" version = "2.1.0" @@ -6197,6 +6277,7 @@ dependencies = [ "log", "rustc_version", "solana-config-program", + "solana-log-collector", "solana-program-runtime", "solana-sdk", "solana-type-overrides", @@ -6261,7 +6342,7 @@ dependencies = [ "futures 0.3.30", "futures-util", "histogram", - "indexmap 2.2.6", + "indexmap 2.3.0", "itertools 0.12.1", "libc", "log", @@ -6273,6 +6354,7 @@ dependencies = [ "rand 0.8.5", "rustls", "smallvec", + "socket2 0.5.7", "solana-measure", "solana-metrics", "solana-perf", @@ -6290,19 +6372,33 @@ dependencies = [ "itertools 0.12.1", "log", "percentage", + "qualifier_attr", "rustc_version", "serde", "serde_derive", "solana-bpf-loader-program", "solana-compute-budget", + "solana-fee", "solana-loader-v4-program", + "solana-log-collector", "solana-measure", "solana-metrics", "solana-program-runtime", + "solana-runtime-transaction", "solana-sdk", + "solana-svm-transaction", "solana-system-program", + "solana-timings", "solana-type-overrides", "solana-vote", + "thiserror", +] + +[[package]] +name = "solana-svm-transaction" +version = "2.1.0" +dependencies = [ + "solana-sdk", ] [[package]] @@ -6313,6 +6409,7 @@ dependencies = [ "log", "serde", "serde_derive", + "solana-log-collector", "solana-program-runtime", "solana-sdk", "solana-type-overrides", @@ -6330,7 +6427,6 @@ dependencies = [ "serde_json", "solana-accounts-db", "solana-cli-output", - "solana-client", "solana-compute-budget", "solana-core", "solana-geyser-plugin-manager", @@ -6341,6 +6437,7 @@ dependencies = [ "solana-program-test", "solana-rpc", "solana-rpc-client", + "solana-rpc-client-api", "solana-runtime", "solana-sdk", "solana-streamer", @@ -6361,6 +6458,15 @@ dependencies = [ "solana-sdk", ] +[[package]] +name = "solana-timings" +version = "2.1.0" +dependencies = [ + "eager", + "enum-iterator", + "solana-sdk", +] + [[package]] name = "solana-tpu-client" version = "2.1.0" @@ -6368,7 +6474,7 @@ dependencies = [ "async-trait", "bincode", "futures-util", - "indexmap 2.2.6", + "indexmap 2.3.0", "indicatif", "log", "rayon", @@ -6387,7 +6493,6 @@ dependencies = [ name = "solana-transaction-metrics-tracker" version = "2.1.0" dependencies = [ - "Inflector", "base64 0.22.1", "bincode", "lazy_static", @@ -6395,6 +6500,7 @@ dependencies = [ "rand 0.8.5", "solana-perf", "solana-sdk", + "solana-short-vec", ] [[package]] @@ -6499,9 +6605,9 @@ dependencies = [ "qualifier_attr", "scopeguard", "solana-ledger", - "solana-program-runtime", "solana-runtime", "solana-sdk", + "solana-timings", "solana-unified-scheduler-logic", "vec_extract_if_polyfill", ] @@ -6565,11 +6671,10 @@ dependencies = [ "solana-entry", "solana-gossip", "solana-ledger", - "solana-logger", "solana-program", - "solana-program-runtime", "solana-runtime", "solana-sdk", + "solana-timings", "solana-vote-program", ] @@ -6580,6 +6685,7 @@ dependencies = [ "bytemuck", "num-derive", "num-traits", + "solana-log-collector", "solana-program-runtime", "solana-sdk", "solana-zk-sdk", @@ -6619,6 +6725,7 @@ dependencies = [ "bytemuck", "num-derive", "num-traits", + "solana-log-collector", "solana-program-runtime", "solana-sdk", "solana-zk-token-sdk", @@ -6655,13 +6762,12 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06beab07f4104d6ad70d47baa67ad1bcd501a2345a983e20c389bade7c72305e" +checksum = "381f595f78accb55aeea018a90e3acf6048f960d932002737d249e3294bd58fe" dependencies = [ "byteorder 1.5.0", "combine", - "goblin", "hash32", "libc", "log", @@ -7142,26 +7248,20 @@ dependencies = [ "unicode-width", ] -[[package]] -name = "textwrap" -version = "0.16.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" - [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", @@ -7418,7 +7518,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.3.0", "toml_datetime", "winnow", ] diff --git a/programs/sbf/Cargo.toml b/programs/sbf/Cargo.toml index 453542998d9104..44ca0165600fd1 100644 --- a/programs/sbf/Cargo.toml +++ b/programs/sbf/Cargo.toml @@ -22,22 +22,27 @@ net2 = "0.2.37" num-derive = "0.4.2" num-traits = "0.2" rand = "0.8" -serde = "1.0.112" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 -serde_derive = "1.0.112" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde = "1.0.112" # must match the serde_derive version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 +serde_derive = "1.0.112" # must match the serde version, see https://github.com/serde-rs/serde/issues/2584#issuecomment-1685252251 serde_json = "1.0.56" solana-account-decoder = { path = "../../account-decoder", version = "=2.1.0" } solana-accounts-db = { path = "../../accounts-db", version = "=2.1.0" } +solana-bn254 = { path = "../../curves/bn254", version = "=2.1.0" } solana-bpf-loader-program = { path = "../bpf_loader", version = "=2.1.0" } solana-cli-output = { path = "../../cli-output", version = "=2.1.0" } solana-compute-budget = { path = "../../compute-budget", version = "=2.1.0" } solana-curve25519 = { path = "../../curves/curve25519", version = "=2.1.0" } +solana-decode-error = { path = "../../sdk/decode-error", version = "=2.1.0" } +solana-fee = { path = "../../fee", version = "=2.1.0" } solana-ledger = { path = "../../ledger", version = "=2.1.0" } +solana-log-collector = { path = "../../log-collector", version = "=2.1.0" } solana-logger = { path = "../../logger", version = "=2.1.0" } solana-measure = { path = "../../measure", version = "=2.1.0" } solana-poseidon = { path = "../../poseidon/", version = "=2.1.0" } solana-program = { path = "../../sdk/program", version = "=2.1.0" } solana-program-runtime = { path = "../../program-runtime", version = "=2.1.0" } solana-runtime = { path = "../../runtime", version = "=2.1.0" } +solana-runtime-transaction = { path = "../../runtime-transaction", version = "=2.1.0" } solana-sbf-rust-128bit-dep = { path = "rust/128bit_dep", version = "=2.1.0" } solana-sbf-rust-invoke-dep = { path = "rust/invoke_dep", version = "=2.1.0" } solana-sbf-rust-invoked-dep = { path = "rust/invoked_dep", version = "=2.1.0" } @@ -47,12 +52,14 @@ solana-sbf-rust-param-passing-dep = { path = "rust/param_passing_dep", version = solana-sbf-rust-realloc-dep = { path = "rust/realloc_dep", version = "=2.1.0" } solana-sbf-rust-realloc-invoke-dep = { path = "rust/realloc_invoke_dep", version = "=2.1.0" } solana-sdk = { path = "../../sdk", version = "=2.1.0" } +solana-secp256k1-recover = { path = "../../curves/secp256k1-recover", version = "=2.1.0" } solana-svm = { path = "../../svm", version = "=2.1.0" } +solana-timings = { path = "../../timings", version = "=2.1.0" } solana-transaction-status = { path = "../../transaction-status", version = "=2.1.0" } solana-type-overrides = { path = "../../type-overrides", version = "=2.1.0" } agave-validator = { path = "../../validator", version = "=2.1.0" } solana-zk-token-sdk = { path = "../../zk-token-sdk", version = "=2.1.0" } -solana_rbpf = "=0.8.1" +solana_rbpf = "=0.8.2" thiserror = "1.0" [package] @@ -77,6 +84,8 @@ strip = true sbf_c = [] sbf_rust = [] dummy-for-ci-check = ["sbf_c", "sbf_rust"] +# This was needed for ci +frozen-abi = [] [dev-dependencies] agave-validator = { workspace = true } @@ -92,17 +101,21 @@ solana-accounts-db = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-cli-output = { workspace = true } solana-compute-budget = { workspace = true } +solana-fee = { workspace = true } solana-ledger = { workspace = true } +solana-log-collector = { workspace = true } solana-logger = { workspace = true } solana-measure = { workspace = true } solana-program = { workspace = true } solana-program-runtime = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } +solana-runtime-transaction = { workspace = true } solana-sbf-rust-invoke-dep = { workspace = true } solana-sbf-rust-realloc-dep = { workspace = true } solana-sbf-rust-realloc-invoke-dep = { workspace = true } solana-sdk = { workspace = true, features = ["dev-context-only-utils"] } solana-svm = { workspace = true } +solana-timings = { workspace = true } solana-transaction-status = { workspace = true } solana-type-overrides = { workspace = true } solana_rbpf = { workspace = true } diff --git a/programs/sbf/c/src/invoke/invoke.c b/programs/sbf/c/src/invoke/invoke.c index 1ff4e6b69a096c..1e5691ffdc8e3b 100644 --- a/programs/sbf/c/src/invoke/invoke.c +++ b/programs/sbf/c/src/invoke/invoke.c @@ -13,30 +13,31 @@ static const uint8_t TEST_SUCCESS = 1; static const uint8_t TEST_PRIVILEGE_ESCALATION_SIGNER = 2; static const uint8_t TEST_PRIVILEGE_ESCALATION_WRITABLE = 3; -static const uint8_t TEST_PPROGRAM_NOT_EXECUTABLE = 4; -static const uint8_t TEST_EMPTY_ACCOUNTS_SLICE = 5; -static const uint8_t TEST_CAP_SEEDS = 6; -static const uint8_t TEST_CAP_SIGNERS = 7; -static const uint8_t TEST_ALLOC_ACCESS_VIOLATION = 8; -static const uint8_t TEST_MAX_INSTRUCTION_DATA_LEN_EXCEEDED = 9; -static const uint8_t TEST_MAX_INSTRUCTION_ACCOUNTS_EXCEEDED = 10; -static const uint8_t TEST_RETURN_ERROR = 11; -static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER = 12; -static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE = 13; -static const uint8_t TEST_WRITABLE_DEESCALATION_WRITABLE = 14; -static const uint8_t TEST_NESTED_INVOKE_TOO_DEEP = 15; -static const uint8_t TEST_CALL_PRECOMPILE = 16; -static const uint8_t ADD_LAMPORTS = 17; -static const uint8_t TEST_RETURN_DATA_TOO_LARGE = 18; -static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER = 19; -static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE = 20; -static const uint8_t TEST_MAX_ACCOUNT_INFOS_EXCEEDED = 21; +static const uint8_t TEST_PPROGRAM_NOT_OWNED_BY_LOADER = 4; +static const uint8_t TEST_PPROGRAM_NOT_EXECUTABLE = 5; +static const uint8_t TEST_EMPTY_ACCOUNTS_SLICE = 6; +static const uint8_t TEST_CAP_SEEDS = 7; +static const uint8_t TEST_CAP_SIGNERS = 8; +static const uint8_t TEST_ALLOC_ACCESS_VIOLATION = 9; +static const uint8_t TEST_MAX_INSTRUCTION_DATA_LEN_EXCEEDED = 10; +static const uint8_t TEST_MAX_INSTRUCTION_ACCOUNTS_EXCEEDED = 11; +static const uint8_t TEST_RETURN_ERROR = 12; +static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER = 13; +static const uint8_t TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE = 14; +static const uint8_t TEST_WRITABLE_DEESCALATION_WRITABLE = 15; +static const uint8_t TEST_NESTED_INVOKE_TOO_DEEP = 16; +static const uint8_t TEST_CALL_PRECOMPILE = 17; +static const uint8_t ADD_LAMPORTS = 18; +static const uint8_t TEST_RETURN_DATA_TOO_LARGE = 19; +static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER = 20; +static const uint8_t TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE = 21; +static const uint8_t TEST_MAX_ACCOUNT_INFOS_EXCEEDED = 22; // TEST_CPI_INVALID_* must match the definitions in // https://github.com/solana-labs/solana/blob/master/programs/sbf/rust/invoke/src/instructions.rs -static const uint8_t TEST_CPI_INVALID_KEY_POINTER = 34; -static const uint8_t TEST_CPI_INVALID_OWNER_POINTER = 35; -static const uint8_t TEST_CPI_INVALID_LAMPORTS_POINTER = 36; -static const uint8_t TEST_CPI_INVALID_DATA_POINTER = 37; +static const uint8_t TEST_CPI_INVALID_KEY_POINTER = 35; +static const uint8_t TEST_CPI_INVALID_OWNER_POINTER = 36; +static const uint8_t TEST_CPI_INVALID_LAMPORTS_POINTER = 37; +static const uint8_t TEST_CPI_INVALID_DATA_POINTER = 38; static const int MINT_INDEX = 0; static const int ARGUMENT_INDEX = 1; @@ -51,6 +52,7 @@ static const int SYSTEM_PROGRAM_INDEX = 9; static const int FROM_INDEX = 10; static const int ED25519_PROGRAM_INDEX = 11; static const int INVOKE_PROGRAM_INDEX = 12; +static const int UNEXECUTABLE_PROGRAM_INDEX = 13; uint64_t do_nested_invokes(uint64_t num_nested_invokes, SolAccountInfo *accounts, uint64_t num_accounts) { @@ -84,7 +86,7 @@ uint64_t do_nested_invokes(uint64_t num_nested_invokes, extern uint64_t entrypoint(const uint8_t *input) { sol_log("invoke C program"); - SolAccountInfo accounts[13]; + SolAccountInfo accounts[14]; SolParameters params = (SolParameters){.ka = accounts}; if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(accounts))) { @@ -374,12 +376,22 @@ extern uint64_t entrypoint(const uint8_t *input) { sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); break; } + case TEST_PPROGRAM_NOT_OWNED_BY_LOADER: { + sol_log("Test program not owned by loader"); + SolAccountMeta arguments[] = { + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}}; + uint8_t data[] = {RETURN_OK}; + const SolInstruction instruction = {accounts[ARGUMENT_INDEX].key, arguments, + SOL_ARRAY_SIZE(arguments), data, + SOL_ARRAY_SIZE(data)}; + return sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); + } case TEST_PPROGRAM_NOT_EXECUTABLE: { sol_log("Test program not executable"); SolAccountMeta arguments[] = { - {accounts[DERIVED_KEY3_INDEX].key, false, false}}; - uint8_t data[] = {VERIFY_PRIVILEGE_ESCALATION}; - const SolInstruction instruction = {accounts[ARGUMENT_INDEX].key, arguments, + {accounts[INVOKED_ARGUMENT_INDEX].key, false, false}}; + uint8_t data[] = {RETURN_OK}; + const SolInstruction instruction = {accounts[UNEXECUTABLE_PROGRAM_INDEX].key, arguments, SOL_ARRAY_SIZE(arguments), data, SOL_ARRAY_SIZE(data)}; return sol_invoke(&instruction, accounts, SOL_ARRAY_SIZE(accounts)); diff --git a/programs/sbf/rust/alt_bn128/Cargo.toml b/programs/sbf/rust/alt_bn128/Cargo.toml index f80c24bf4b2f3f..03e523e58b9797 100644 --- a/programs/sbf/rust/alt_bn128/Cargo.toml +++ b/programs/sbf/rust/alt_bn128/Cargo.toml @@ -10,6 +10,7 @@ edition = { workspace = true } [dependencies] array-bytes = { workspace = true } +solana-bn254 = { workspace = true } solana-program = { workspace = true } [lib] diff --git a/programs/sbf/rust/alt_bn128/src/lib.rs b/programs/sbf/rust/alt_bn128/src/lib.rs index 54a3c1fd29a3e5..7a546dc248fba3 100644 --- a/programs/sbf/rust/alt_bn128/src/lib.rs +++ b/programs/sbf/rust/alt_bn128/src/lib.rs @@ -1,9 +1,9 @@ //! Alt_bn128 Syscalls tests extern crate solana_program; -use solana_program::{ - alt_bn128::prelude::{alt_bn128_addition, alt_bn128_multiplication, alt_bn128_pairing}, - custom_heap_default, custom_panic_default, msg, +use { + solana_bn254::prelude::{alt_bn128_addition, alt_bn128_multiplication, alt_bn128_pairing}, + solana_program::{custom_heap_default, custom_panic_default, msg}, }; fn alt_bn128_addition_test() { diff --git a/programs/sbf/rust/alt_bn128_compression/Cargo.toml b/programs/sbf/rust/alt_bn128_compression/Cargo.toml index 53fb783d9981c4..7f80554f592e31 100644 --- a/programs/sbf/rust/alt_bn128_compression/Cargo.toml +++ b/programs/sbf/rust/alt_bn128_compression/Cargo.toml @@ -10,6 +10,7 @@ edition = { workspace = true } [dependencies] array-bytes = { workspace = true } +solana-bn254 = { workspace = true } solana-program = { workspace = true } [lib] diff --git a/programs/sbf/rust/alt_bn128_compression/src/lib.rs b/programs/sbf/rust/alt_bn128_compression/src/lib.rs index 7545788fc3cf4a..a5ca6645142ec3 100644 --- a/programs/sbf/rust/alt_bn128_compression/src/lib.rs +++ b/programs/sbf/rust/alt_bn128_compression/src/lib.rs @@ -1,12 +1,12 @@ //! Alt_bn128 compression Syscalls tests extern crate solana_program; -use solana_program::{ - alt_bn128::compression::prelude::{ +use { + solana_bn254::compression::prelude::{ alt_bn128_g1_compress, alt_bn128_g1_decompress, alt_bn128_g2_compress, alt_bn128_g2_decompress, }, - custom_heap_default, custom_panic_default, msg, + solana_program::{custom_heap_default, custom_panic_default, msg}, }; fn alt_bn128_compression_g1() { diff --git a/programs/sbf/rust/deprecated_loader/src/lib.rs b/programs/sbf/rust/deprecated_loader/src/lib.rs index c2573fdcc771e6..ec7719f559412e 100644 --- a/programs/sbf/rust/deprecated_loader/src/lib.rs +++ b/programs/sbf/rust/deprecated_loader/src/lib.rs @@ -17,8 +17,8 @@ use solana_program::{ pub const REALLOC: u8 = 1; pub const REALLOC_EXTEND_FROM_SLICE: u8 = 12; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS: u8 = 28; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED: u8 = 29; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS: u8 = 29; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED: u8 = 30; #[derive(Debug, PartialEq)] struct SStruct { diff --git a/programs/sbf/rust/error_handling/Cargo.toml b/programs/sbf/rust/error_handling/Cargo.toml index d95e9d0eb925b0..a623d0163bc44a 100644 --- a/programs/sbf/rust/error_handling/Cargo.toml +++ b/programs/sbf/rust/error_handling/Cargo.toml @@ -11,6 +11,7 @@ edition = { workspace = true } [dependencies] num-derive = { workspace = true } num-traits = { workspace = true } +solana-decode-error = { workspace = true } solana-program = { workspace = true } thiserror = { workspace = true } diff --git a/programs/sbf/rust/error_handling/src/lib.rs b/programs/sbf/rust/error_handling/src/lib.rs index ffd045ef1b3962..75aa233d867d12 100644 --- a/programs/sbf/rust/error_handling/src/lib.rs +++ b/programs/sbf/rust/error_handling/src/lib.rs @@ -4,9 +4,9 @@ extern crate solana_program; use { num_derive::FromPrimitive, num_traits::FromPrimitive, + solana_decode_error::DecodeError, solana_program::{ account_info::AccountInfo, - decode_error::DecodeError, entrypoint::ProgramResult, msg, program_error::{PrintProgramError, ProgramError}, diff --git a/programs/sbf/rust/invoke/src/lib.rs b/programs/sbf/rust/invoke/src/lib.rs index be29d6760bec25..d663f960a01842 100644 --- a/programs/sbf/rust/invoke/src/lib.rs +++ b/programs/sbf/rust/invoke/src/lib.rs @@ -461,11 +461,20 @@ fn process_instruction<'a>( invoked_instruction.accounts[0].is_writable = true; invoke(&invoked_instruction, accounts)?; } + TEST_PPROGRAM_NOT_OWNED_BY_LOADER => { + msg!("Test program not owned by loader"); + let instruction = create_instruction( + *accounts[ARGUMENT_INDEX].key, + &[(accounts[INVOKED_ARGUMENT_INDEX].key, false, false)], + vec![RETURN_OK], + ); + invoke(&instruction, accounts)?; + } TEST_PPROGRAM_NOT_EXECUTABLE => { msg!("Test program not executable"); let instruction = create_instruction( - *accounts[ARGUMENT_INDEX].key, - &[(accounts[ARGUMENT_INDEX].key, true, true)], + *accounts[UNEXECUTABLE_PROGRAM_INDEX].key, + &[(accounts[INVOKED_ARGUMENT_INDEX].key, false, false)], vec![RETURN_OK], ); invoke(&instruction, accounts)?; @@ -1353,14 +1362,36 @@ fn process_instruction<'a>( TEST_CALLEE_ACCOUNT_UPDATES => { msg!("TEST_CALLEE_ACCOUNT_UPDATES"); - if instruction_data.len() < 2 + 2 * std::mem::size_of::() { + if instruction_data.len() < 3 + 3 * std::mem::size_of::() { return Ok(()); } let writable = instruction_data[1] != 0; - let resize = usize::from_le_bytes(instruction_data[2..10].try_into().unwrap()); - let write_offset = usize::from_le_bytes(instruction_data[10..18].try_into().unwrap()); - let invoke_struction = &instruction_data[18..]; + let clone_data = instruction_data[2] != 0; + let resize = usize::from_le_bytes(instruction_data[3..11].try_into().unwrap()); + let pre_write_offset = + usize::from_le_bytes(instruction_data[11..19].try_into().unwrap()); + let post_write_offset = + usize::from_le_bytes(instruction_data[19..27].try_into().unwrap()); + let invoke_struction = &instruction_data[27..]; + + let old_data = if clone_data { + let prev = accounts[ARGUMENT_INDEX].try_borrow_data().unwrap().as_ptr(); + + let data = accounts[ARGUMENT_INDEX].try_borrow_data().unwrap().to_vec(); + + let old = accounts[ARGUMENT_INDEX].data.replace(data.leak()); + + let post = accounts[ARGUMENT_INDEX].try_borrow_data().unwrap().as_ptr(); + + if prev == post { + panic!("failed to clone the data"); + } + + Some(old) + } else { + None + }; let account = &accounts[ARGUMENT_INDEX]; @@ -1368,6 +1399,11 @@ fn process_instruction<'a>( account.realloc(resize, false).unwrap(); } + if pre_write_offset != 0 { + // Ensure we still have access to the correct account + account.data.borrow_mut()[pre_write_offset] ^= 0xe5; + } + if !invoke_struction.is_empty() { // Invoke another program. With direct mapping, before CPI the callee will update the accounts (incl resizing) // so the pointer may change. @@ -1388,9 +1424,13 @@ fn process_instruction<'a>( .unwrap(); } - if write_offset != 0 { - // Ensure we still have access to the correct account - account.data.borrow_mut()[write_offset] ^= 0xe5; + if post_write_offset != 0 { + if let Some(old) = old_data { + old[post_write_offset] ^= 0xe5; + } else { + // Ensure we still have access to the correct account + account.data.borrow_mut()[post_write_offset] ^= 0xe5; + } } } TEST_STACK_HEAP_ZEROED => { diff --git a/programs/sbf/rust/invoke_dep/src/lib.rs b/programs/sbf/rust/invoke_dep/src/lib.rs index 066e900b7f9d2e..19acfc1db6fcf9 100644 --- a/programs/sbf/rust/invoke_dep/src/lib.rs +++ b/programs/sbf/rust/invoke_dep/src/lib.rs @@ -3,44 +3,45 @@ pub const TEST_SUCCESS: u8 = 1; pub const TEST_PRIVILEGE_ESCALATION_SIGNER: u8 = 2; pub const TEST_PRIVILEGE_ESCALATION_WRITABLE: u8 = 3; -pub const TEST_PPROGRAM_NOT_EXECUTABLE: u8 = 4; -pub const TEST_EMPTY_ACCOUNTS_SLICE: u8 = 5; -pub const TEST_CAP_SEEDS: u8 = 6; -pub const TEST_CAP_SIGNERS: u8 = 7; -pub const TEST_ALLOC_ACCESS_VIOLATION: u8 = 8; -pub const TEST_MAX_INSTRUCTION_DATA_LEN_EXCEEDED: u8 = 9; -pub const TEST_MAX_INSTRUCTION_ACCOUNTS_EXCEEDED: u8 = 10; -pub const TEST_RETURN_ERROR: u8 = 11; -pub const TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER: u8 = 12; -pub const TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE: u8 = 13; -pub const TEST_WRITABLE_DEESCALATION_WRITABLE: u8 = 14; -pub const TEST_NESTED_INVOKE_TOO_DEEP: u8 = 15; -pub const TEST_CALL_PRECOMPILE: u8 = 16; -pub const ADD_LAMPORTS: u8 = 17; -pub const TEST_RETURN_DATA_TOO_LARGE: u8 = 18; -pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER: u8 = 19; -pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE: u8 = 20; -pub const TEST_MAX_ACCOUNT_INFOS_EXCEEDED: u8 = 21; -pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE: u8 = 22; -pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE_NESTED: u8 = 23; -pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER: u8 = 24; -pub const TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER: u8 = 25; -pub const TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE: u8 = 26; -pub const TEST_ALLOW_WRITE_AFTER_OWNERSHIP_CHANGE_TO_CALLER: u8 = 27; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS: u8 = 28; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED: u8 = 29; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLEE_GROWS: u8 = 30; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN: u8 = 31; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS: u8 = 32; -pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS_NESTED: u8 = 33; -pub const TEST_CPI_INVALID_KEY_POINTER: u8 = 34; -pub const TEST_CPI_INVALID_OWNER_POINTER: u8 = 35; -pub const TEST_CPI_INVALID_LAMPORTS_POINTER: u8 = 36; -pub const TEST_CPI_INVALID_DATA_POINTER: u8 = 37; -pub const TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION: u8 = 38; -pub const TEST_WRITE_ACCOUNT: u8 = 39; -pub const TEST_CALLEE_ACCOUNT_UPDATES: u8 = 40; -pub const TEST_STACK_HEAP_ZEROED: u8 = 41; +pub const TEST_PPROGRAM_NOT_OWNED_BY_LOADER: u8 = 4; +pub const TEST_PPROGRAM_NOT_EXECUTABLE: u8 = 5; +pub const TEST_EMPTY_ACCOUNTS_SLICE: u8 = 6; +pub const TEST_CAP_SEEDS: u8 = 7; +pub const TEST_CAP_SIGNERS: u8 = 8; +pub const TEST_ALLOC_ACCESS_VIOLATION: u8 = 9; +pub const TEST_MAX_INSTRUCTION_DATA_LEN_EXCEEDED: u8 = 10; +pub const TEST_MAX_INSTRUCTION_ACCOUNTS_EXCEEDED: u8 = 11; +pub const TEST_RETURN_ERROR: u8 = 12; +pub const TEST_PRIVILEGE_DEESCALATION_ESCALATION_SIGNER: u8 = 13; +pub const TEST_PRIVILEGE_DEESCALATION_ESCALATION_WRITABLE: u8 = 14; +pub const TEST_WRITABLE_DEESCALATION_WRITABLE: u8 = 15; +pub const TEST_NESTED_INVOKE_TOO_DEEP: u8 = 16; +pub const TEST_CALL_PRECOMPILE: u8 = 17; +pub const ADD_LAMPORTS: u8 = 18; +pub const TEST_RETURN_DATA_TOO_LARGE: u8 = 19; +pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_SIGNER: u8 = 20; +pub const TEST_DUPLICATE_PRIVILEGE_ESCALATION_WRITABLE: u8 = 21; +pub const TEST_MAX_ACCOUNT_INFOS_EXCEEDED: u8 = 22; +pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE: u8 = 23; +pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLEE_NESTED: u8 = 24; +pub const TEST_FORBID_WRITE_AFTER_OWNERSHIP_CHANGE_IN_CALLER: u8 = 25; +pub const TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE_MOVING_DATA_POINTER: u8 = 26; +pub const TEST_FORBID_LEN_UPDATE_AFTER_OWNERSHIP_CHANGE: u8 = 27; +pub const TEST_ALLOW_WRITE_AFTER_OWNERSHIP_CHANGE_TO_CALLER: u8 = 28; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS: u8 = 29; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_NESTED: u8 = 30; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLEE_GROWS: u8 = 31; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLEE_SHRINKS_SMALLER_THAN_ORIGINAL_LEN: u8 = 32; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS: u8 = 33; +pub const TEST_CPI_ACCOUNT_UPDATE_CALLER_GROWS_CALLEE_SHRINKS_NESTED: u8 = 34; +pub const TEST_CPI_INVALID_KEY_POINTER: u8 = 35; +pub const TEST_CPI_INVALID_OWNER_POINTER: u8 = 36; +pub const TEST_CPI_INVALID_LAMPORTS_POINTER: u8 = 37; +pub const TEST_CPI_INVALID_DATA_POINTER: u8 = 38; +pub const TEST_CPI_CHANGE_ACCOUNT_DATA_MEMORY_ALLOCATION: u8 = 39; +pub const TEST_WRITE_ACCOUNT: u8 = 40; +pub const TEST_CALLEE_ACCOUNT_UPDATES: u8 = 41; +pub const TEST_STACK_HEAP_ZEROED: u8 = 42; pub const MINT_INDEX: usize = 0; pub const ARGUMENT_INDEX: usize = 1; @@ -55,3 +56,4 @@ pub const SYSTEM_PROGRAM_INDEX: usize = 9; pub const FROM_INDEX: usize = 10; pub const ED25519_PROGRAM_INDEX: usize = 11; pub const INVOKE_PROGRAM_INDEX: usize = 12; +pub const UNEXECUTABLE_PROGRAM_INDEX: usize = 13; diff --git a/programs/sbf/rust/secp256k1_recover/Cargo.toml b/programs/sbf/rust/secp256k1_recover/Cargo.toml index 8a7e473e5b6a65..199558865c1d24 100644 --- a/programs/sbf/rust/secp256k1_recover/Cargo.toml +++ b/programs/sbf/rust/secp256k1_recover/Cargo.toml @@ -11,6 +11,7 @@ edition = { workspace = true } [dependencies] libsecp256k1 = { workspace = true } solana-program = { workspace = true } +solana-secp256k1-recover = { workspace = true } [lib] crate-type = ["cdylib"] diff --git a/programs/sbf/rust/secp256k1_recover/src/lib.rs b/programs/sbf/rust/secp256k1_recover/src/lib.rs index c954dda4144f05..09b239e090a147 100644 --- a/programs/sbf/rust/secp256k1_recover/src/lib.rs +++ b/programs/sbf/rust/secp256k1_recover/src/lib.rs @@ -2,8 +2,9 @@ //! Secp256k1Recover Syscall test extern crate solana_program; -use solana_program::{ - custom_heap_default, custom_panic_default, msg, secp256k1_recover::secp256k1_recover, +use { + solana_program::{custom_heap_default, custom_panic_default, msg}, + solana_secp256k1_recover::secp256k1_recover, }; fn test_secp256k1_recover() { diff --git a/programs/sbf/rust/sysvar/src/lib.rs b/programs/sbf/rust/sysvar/src/lib.rs index 72e3563caf5364..50f6891d85e3ed 100644 --- a/programs/sbf/rust/sysvar/src/lib.rs +++ b/programs/sbf/rust/sysvar/src/lib.rs @@ -2,8 +2,6 @@ extern crate solana_program; #[allow(deprecated)] -use solana_program::sysvar::fees::Fees; -#[allow(deprecated)] use solana_program::sysvar::recent_blockhashes::RecentBlockhashes; use solana_program::{ account_info::AccountInfo, @@ -31,7 +29,7 @@ pub fn process_instruction( sysvar::clock::id().log(); let clock = Clock::from_account_info(&accounts[2]).unwrap(); assert_ne!(clock, Clock::default()); - let got_clock = Clock::get().unwrap(); + let got_clock = Clock::get()?; assert_eq!(clock, got_clock); } @@ -41,7 +39,7 @@ pub fn process_instruction( sysvar::epoch_schedule::id().log(); let epoch_schedule = EpochSchedule::from_account_info(&accounts[3]).unwrap(); assert_eq!(epoch_schedule, EpochSchedule::default()); - let got_epoch_schedule = EpochSchedule::get().unwrap(); + let got_epoch_schedule = EpochSchedule::get()?; assert_eq!(epoch_schedule, got_epoch_schedule); } @@ -49,9 +47,8 @@ pub fn process_instruction( msg!("Instructions identifier:"); sysvar::instructions::id().log(); assert_eq!(*accounts[4].owner, sysvar::id()); - let index = instructions::load_current_index_checked(&accounts[4]).unwrap(); - let instruction = - instructions::load_instruction_at_checked(index as usize, &accounts[4]).unwrap(); + let index = instructions::load_current_index_checked(&accounts[4])?; + let instruction = instructions::load_instruction_at_checked(index as usize, &accounts[4])?; assert_eq!(0, index); assert_eq!( instruction, @@ -70,7 +67,6 @@ pub fn process_instruction( AccountMeta::new_readonly(*accounts[8].key, false), AccountMeta::new_readonly(*accounts[9].key, false), AccountMeta::new_readonly(*accounts[10].key, false), - AccountMeta::new_readonly(*accounts[11].key, false), ], ) ); @@ -89,7 +85,7 @@ pub fn process_instruction( msg!("Rent identifier:"); sysvar::rent::id().log(); let rent = Rent::from_account_info(&accounts[6]).unwrap(); - let got_rent = Rent::get().unwrap(); + let got_rent = Rent::get()?; assert_eq!(rent, got_rent); } @@ -114,22 +110,12 @@ pub fn process_instruction( sysvar::stake_history::id().log(); let _ = StakeHistory::from_account_info(&accounts[9]).unwrap(); - // Fees - #[allow(deprecated)] - if instruction_data[0] == 1 { - msg!("Fee identifier:"); - sysvar::fees::id().log(); - let fees = Fees::from_account_info(&accounts[10]).unwrap(); - let got_fees = Fees::get().unwrap(); - assert_eq!(fees, got_fees); - } - // Epoch Rewards { msg!("EpochRewards identifier:"); sysvar::epoch_rewards::id().log(); - let epoch_rewards = EpochRewards::from_account_info(&accounts[11]).unwrap(); - let got_epoch_rewards = EpochRewards::get().unwrap(); + let epoch_rewards = EpochRewards::from_account_info(&accounts[10]).unwrap(); + let got_epoch_rewards = EpochRewards::get()?; assert_eq!(epoch_rewards, got_epoch_rewards); } diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index b26c2e74e230ff..c9c5f99b05cd2b 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -13,16 +13,14 @@ use { solana_account_decoder::parse_bpf_loader::{ parse_bpf_upgradeable_loader, BpfUpgradeableLoaderAccountType, }, - solana_compute_budget::{ - compute_budget::ComputeBudget, - compute_budget_processor::process_compute_budget_instructions, - }, + solana_compute_budget::compute_budget::ComputeBudget, solana_ledger::token_balances::collect_token_balances, - solana_program_runtime::{invoke_context::mock_process_instruction, timings::ExecuteTimings}, + solana_program_runtime::invoke_context::mock_process_instruction, solana_rbpf::vm::ContextObject, solana_runtime::{ bank::{Bank, TransactionBalancesSet}, bank_client::BankClient, + bank_forks::BankForks, genesis_utils::{ bootstrap_validator_stake_lamports, create_genesis_config, create_genesis_config_with_leader_ex, GenesisConfigInfo, @@ -33,6 +31,7 @@ use { load_upgradeable_program_wrapper, set_upgrade_authority, upgrade_program, }, }, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sbf_rust_invoke_dep::*, solana_sbf_rust_realloc_dep::*, solana_sbf_rust_realloc_invoke_dep::*, @@ -45,7 +44,7 @@ use { compute_budget::ComputeBudgetInstruction, entrypoint::MAX_PERMITTED_DATA_INCREASE, feature_set::{self, FeatureSet}, - fee::FeeStructure, + fee::{FeeBudgetLimits, FeeStructure}, fee_calculator::FeeRateGovernor, genesis_config::ClusterType, hash::Hash, @@ -62,18 +61,22 @@ use { transaction::{SanitizedTransaction, Transaction, TransactionError, VersionedTransaction}, }, solana_svm::{ + transaction_commit_result::CommittedTransaction, + transaction_execution_result::{InnerInstruction, TransactionExecutionDetails}, transaction_processor::ExecutionRecordingConfig, - transaction_results::{ - InnerInstruction, TransactionExecutionDetails, TransactionExecutionResult, - TransactionResults, - }, }, + solana_timings::ExecuteTimings, solana_transaction_status::{ map_inner_instructions, ConfirmedTransactionWithStatusMeta, TransactionStatusMeta, TransactionWithStatusMeta, VersionedTransactionWithStatusMeta, }, std::{ - assert_eq, cell::RefCell, collections::HashMap, str::FromStr, sync::Arc, time::Duration, + assert_eq, + cell::RefCell, + collections::HashMap, + str::FromStr, + sync::{Arc, RwLock}, + time::Duration, }, }; @@ -86,10 +89,9 @@ fn process_transaction_and_record_inner( Vec>, Vec, ) { - let signature = tx.signatures.first().unwrap().clone(); let txs = vec![tx]; let tx_batch = bank.prepare_batch_for_tests(txs); - let mut results = bank + let mut commit_results = bank .load_execute_and_commit_transactions( &tx_batch, MAX_PROCESSING_AGE, @@ -103,23 +105,15 @@ fn process_transaction_and_record_inner( None, ) .0; - let result = results - .fee_collection_results - .swap_remove(0) - .and_then(|_| bank.get_signature_status(&signature).unwrap()); - let execution_details = results - .execution_results - .swap_remove(0) - .details() - .expect("tx should be executed") - .clone(); - let inner_instructions = execution_details - .inner_instructions - .expect("cpi recording should be enabled"); - let log_messages = execution_details - .log_messages - .expect("log recording should be enabled"); - (result, inner_instructions, log_messages) + let TransactionExecutionDetails { + inner_instructions, + log_messages, + status, + .. + } = commit_results.swap_remove(0).unwrap().execution_details; + let inner_instructions = inner_instructions.expect("cpi recording should be enabled"); + let log_messages = log_messages.expect("log recording should be enabled"); + (status, inner_instructions, log_messages) } #[cfg(feature = "sbf_rust")] @@ -132,9 +126,7 @@ fn execute_transactions( let mut mint_decimals = HashMap::new(); let tx_pre_token_balances = collect_token_balances(&bank, &batch, &mut mint_decimals); let ( - TransactionResults { - execution_results, .. - }, + commit_results, TransactionBalancesSet { pre_balances, post_balances, @@ -152,7 +144,7 @@ fn execute_transactions( izip!( txs.iter(), - execution_results.into_iter(), + commit_results.into_iter(), pre_balances.into_iter(), post_balances.into_iter(), tx_pre_token_balances.into_iter(), @@ -161,56 +153,56 @@ fn execute_transactions( .map( |( tx, - execution_result, + commit_result, pre_balances, post_balances, pre_token_balances, post_token_balances, )| { - match execution_result { - TransactionExecutionResult::Executed { details, .. } => { - let TransactionExecutionDetails { - status, - log_messages, - inner_instructions, - fee_details, - return_data, - executed_units, - .. - } = details; - - let inner_instructions = inner_instructions.map(|inner_instructions| { - map_inner_instructions(inner_instructions).collect() - }); - - let tx_status_meta = TransactionStatusMeta { - status, - fee: fee_details.total_fee(), - pre_balances, - post_balances, - pre_token_balances: Some(pre_token_balances), - post_token_balances: Some(post_token_balances), - inner_instructions, - log_messages, - rewards: None, - loaded_addresses: LoadedAddresses::default(), - return_data, - compute_units_consumed: Some(executed_units), - }; - - Ok(ConfirmedTransactionWithStatusMeta { - slot: bank.slot(), - tx_with_meta: TransactionWithStatusMeta::Complete( - VersionedTransactionWithStatusMeta { - transaction: VersionedTransaction::from(tx.clone()), - meta: tx_status_meta, - }, - ), - block_time: None, - }) + commit_result.map(|committed_tx| { + let CommittedTransaction { + fee_details, + execution_details: + TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + return_data, + executed_units, + .. + }, + .. + } = committed_tx; + + let inner_instructions = inner_instructions + .map(|inner_instructions| map_inner_instructions(inner_instructions).collect()); + + let tx_status_meta = TransactionStatusMeta { + status, + fee: fee_details.total_fee(), + pre_balances, + post_balances, + pre_token_balances: Some(pre_token_balances), + post_token_balances: Some(post_token_balances), + inner_instructions, + log_messages, + rewards: None, + loaded_addresses: LoadedAddresses::default(), + return_data, + compute_units_consumed: Some(executed_units), + }; + + ConfirmedTransactionWithStatusMeta { + slot: bank.slot(), + tx_with_meta: TransactionWithStatusMeta::Complete( + VersionedTransactionWithStatusMeta { + transaction: VersionedTransaction::from(tx.clone()), + meta: tx_status_meta, + }, + ), + block_time: None, } - TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), - } + }) }, ) .collect() @@ -741,6 +733,10 @@ fn test_program_sbf_invoke_sanity() { let account = AccountSharedData::new(84, 0, &system_program::id()); bank.store_account(&from_keypair.pubkey(), &account); + let unexecutable_program_keypair = Keypair::new(); + let account = AccountSharedData::new(1, 0, &bpf_loader::id()); + bank.store_account(&unexecutable_program_keypair.pubkey(), &account); + let (derived_key1, bump_seed1) = Pubkey::find_program_address(&[b"You pass butter"], &invoke_program_id); let (derived_key2, bump_seed2) = @@ -763,6 +759,7 @@ fn test_program_sbf_invoke_sanity() { AccountMeta::new(from_keypair.pubkey(), true), AccountMeta::new_readonly(solana_sdk::ed25519_program::id(), false), AccountMeta::new_readonly(invoke_program_id, false), + AccountMeta::new_readonly(unexecutable_program_keypair.pubkey(), false), ]; // success cases @@ -914,6 +911,13 @@ fn test_program_sbf_invoke_sanity() { None, ); + do_invoke_failure_test_local( + TEST_PPROGRAM_NOT_OWNED_BY_LOADER, + TransactionError::InstructionError(0, InstructionError::AccountNotExecutable), + &[], + None, + ); + do_invoke_failure_test_local( TEST_PPROGRAM_NOT_EXECUTABLE, TransactionError::InstructionError(0, InstructionError::AccountNotExecutable), @@ -2530,7 +2534,7 @@ fn test_program_upgradeable_locks() { payer_keypair: &Keypair, buffer_keypair: &Keypair, program_keypair: &Keypair, - ) -> (Arc, Transaction, Transaction) { + ) -> (Arc, Arc>, Transaction, Transaction) { solana_logger::setup(); let GenesisConfigInfo { @@ -2600,7 +2604,7 @@ fn test_program_upgradeable_locks() { bank.last_blockhash(), ); - (bank, invoke_tx, upgrade_tx) + (bank, bank_forks, invoke_tx, upgrade_tx) } let payer_keypair = keypair_from_seed(&[56u8; 32]).unwrap(); @@ -2608,13 +2612,13 @@ fn test_program_upgradeable_locks() { let program_keypair = keypair_from_seed(&[77u8; 32]).unwrap(); let results1 = { - let (bank, invoke_tx, upgrade_tx) = + let (bank, _bank_forks, invoke_tx, upgrade_tx) = setup_program_upgradeable_locks(&payer_keypair, &buffer_keypair, &program_keypair); execute_transactions(&bank, vec![upgrade_tx, invoke_tx]) }; let results2 = { - let (bank, invoke_tx, upgrade_tx) = + let (bank, _bank_forks, invoke_tx, upgrade_tx) = setup_program_upgradeable_locks(&payer_keypair, &buffer_keypair, &program_keypair); execute_transactions(&bank, vec![invoke_tx, upgrade_tx]) }; @@ -3871,13 +3875,15 @@ fn test_program_fees() { &ReservedAccountKeys::empty_key_set(), ) .unwrap(); - let expected_normal_fee = fee_structure.calculate_fee( + let fee_budget_limits = FeeBudgetLimits::from( + process_compute_budget_instructions(sanitized_message.program_instructions_iter()) + .unwrap_or_default(), + ); + let expected_normal_fee = solana_fee::calculate_fee( &sanitized_message, - congestion_multiplier, - &process_compute_budget_instructions(sanitized_message.program_instructions_iter()) - .unwrap_or_default() - .into(), - false, + congestion_multiplier == 0, + fee_structure.lamports_per_signature, + fee_budget_limits.prioritization_fee, true, ); bank_client @@ -3899,13 +3905,15 @@ fn test_program_fees() { &ReservedAccountKeys::empty_key_set(), ) .unwrap(); - let expected_prioritized_fee = fee_structure.calculate_fee( + let fee_budget_limits = FeeBudgetLimits::from( + process_compute_budget_instructions(sanitized_message.program_instructions_iter()) + .unwrap_or_default(), + ); + let expected_prioritized_fee = solana_fee::calculate_fee( &sanitized_message, - congestion_multiplier, - &process_compute_budget_instructions(sanitized_message.program_instructions_iter()) - .unwrap_or_default() - .into(), - false, + congestion_multiplier == 0, + fee_structure.lamports_per_signature, + fee_budget_limits.prioritization_fee, true, ); assert!(expected_normal_fee < expected_prioritized_fee); @@ -4688,15 +4696,18 @@ fn test_update_callee_account() { bank.store_account(&account_keypair.pubkey(), &account); - let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 0]; + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]; instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); // instruction data for inner CPI (2x) - instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); - instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); @@ -4731,12 +4742,14 @@ fn test_update_callee_account() { account.set_data(data); bank.store_account(&account_keypair.pubkey(), &account); - let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1]; + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1, 0]; instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); // instruction data for inner CPI - instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); instruction_data.extend_from_slice(19480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(8129usize.to_le_bytes().as_ref()); let instruction = Instruction::new_with_bytes( @@ -4771,12 +4784,14 @@ fn test_update_callee_account() { account.set_data(data); bank.store_account(&account_keypair.pubkey(), &account); - let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1]; + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1, 0]; instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); // instruction data for inner CPI - instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(16385usize.to_le_bytes().as_ref()); let instruction = Instruction::new_with_bytes( @@ -4810,20 +4825,24 @@ fn test_update_callee_account() { account.set_data(data); bank.store_account(&account_keypair.pubkey(), &account); - let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1]; + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1, 0]; instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(16384usize.to_le_bytes().as_ref()); // instruction data for inner CPI (2x) - instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 1]); + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 1, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); - instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 1]); + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 1, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); // instruction data for inner CPI - instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0]); + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); instruction_data.extend_from_slice(20480usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); instruction_data.extend_from_slice(16385usize.to_le_bytes().as_ref()); let instruction = Instruction::new_with_bytes( @@ -4850,9 +4869,198 @@ fn test_update_callee_account() { assert_eq!(*v, expected, "offset:{i} {v:#x} != {expected:#x}"); }); + + // V. clone data, modify and CPI + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1, 1]; + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(8190usize.to_le_bytes().as_ref()); + + // instruction data for inner CPI + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 1, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(8191usize.to_le_bytes().as_ref()); + + let instruction = Instruction::new_with_bytes( + invoke_program_id, + &instruction_data, + account_metas.clone(), + ); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + + if direct_mapping { + // changing the data pointer is not permitted + assert!(result.is_err()); + } else { + assert!(result.is_ok()); + + let data = bank_client + .get_account_data(&account_keypair.pubkey()) + .unwrap() + .unwrap(); + + assert_eq!(data.len(), 10240); + + data.iter().enumerate().for_each(|(i, v)| { + let expected = match i { + // since the data is was cloned, the write to 8191 was lost + 8190 => (i as u8) ^ 0xe5, + ..=10240 => i as u8, + _ => 0, + }; + + assert_eq!(*v, expected, "offset:{i} {v:#x} != {expected:#x}"); + }); + } } } +#[test] +fn test_clone_account_data() { + // Test cloning account data works as expect with + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(100_123_456_789); + + let mut bank = Bank::new_for_tests(&genesis_config); + let feature_set = Arc::make_mut(&mut bank.feature_set); + + feature_set.deactivate(&feature_set::bpf_account_data_direct_mapping::id()); + + let (bank, bank_forks) = bank.wrap_with_bank_forks_for_tests(); + let mut bank_client = BankClient::new_shared(bank.clone()); + let authority_keypair = Keypair::new(); + + let (_, invoke_program_id) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, + "solana_sbf_rust_invoke", + ); + + let (bank, invoke_program_id2) = load_upgradeable_program_and_advance_slot( + &mut bank_client, + bank_forks.as_ref(), + &mint_keypair, + &authority_keypair, + "solana_sbf_rust_invoke", + ); + + assert_ne!(invoke_program_id, invoke_program_id2); + + println!("invoke_program_id:{invoke_program_id}"); + println!("invoke_program_id2:{invoke_program_id2}"); + + let account_keypair = Keypair::new(); + + let mint_pubkey = mint_keypair.pubkey(); + + let account_metas = vec![ + AccountMeta::new(mint_pubkey, true), + AccountMeta::new(account_keypair.pubkey(), false), + AccountMeta::new_readonly(invoke_program_id2, false), + AccountMeta::new_readonly(invoke_program_id, false), + ]; + + // I. clone data and CPI; modify data in callee. + // Now the original data in the caller is unmodified, and we get a "instruction modified data of an account it does not own" + // error in the caller + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id2); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1, 1]; + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + + // instruction data for inner CPI: modify account + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(8190usize.to_le_bytes().as_ref()); + + let instruction = + Instruction::new_with_bytes(invoke_program_id, &instruction_data, account_metas.clone()); + + let message = Message::new(&[instruction], Some(&mint_pubkey)); + let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); + let (result, _, logs) = process_transaction_and_record_inner(&bank, tx); + assert!(result.is_err(), "{result:?}"); + let error = format!("Program {invoke_program_id} failed: instruction modified data of an account it does not own"); + assert!(logs.iter().any(|log| log.contains(&error)), "{logs:?}"); + + // II. clone data, modify and then CPI + // The deserialize checks should verify that we're not allowed to modify an account we don't own, even though + // we have only modified a copy of the data. Fails in caller + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id2); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1, 1]; + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(8190usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + + // instruction data for inner CPI + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + + let instruction = + Instruction::new_with_bytes(invoke_program_id, &instruction_data, account_metas.clone()); + + let message = Message::new(&[instruction], Some(&mint_pubkey)); + let tx = Transaction::new(&[&mint_keypair], message.clone(), bank.last_blockhash()); + let (result, _, logs) = process_transaction_and_record_inner(&bank, tx); + assert!(result.is_err(), "{result:?}"); + let error = format!("Program {invoke_program_id} failed: instruction modified data of an account it does not own"); + assert!(logs.iter().any(|log| log.contains(&error)), "{logs:?}"); + + // II. Clone data, call, modifiy in callee and then make the same change in the caller - transaction succeeds + // Note the caller needs to modify the original account data, not the copy + let mut account = AccountSharedData::new(42, 10240, &invoke_program_id2); + let data: Vec = (0..10240).map(|n| n as u8).collect(); + account.set_data(data); + + bank.store_account(&account_keypair.pubkey(), &account); + + let mut instruction_data = vec![TEST_CALLEE_ACCOUNT_UPDATES, 1, 1]; + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(8190usize.to_le_bytes().as_ref()); + + // instruction data for inner CPI + instruction_data.extend_from_slice(&[TEST_CALLEE_ACCOUNT_UPDATES, 0, 0]); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(0usize.to_le_bytes().as_ref()); + instruction_data.extend_from_slice(8190usize.to_le_bytes().as_ref()); + + let instruction = + Instruction::new_with_bytes(invoke_program_id, &instruction_data, account_metas.clone()); + let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction); + + // works because the account is exactly the same in caller as callee + assert!(result.is_ok(), "{result:?}"); +} + #[test] fn test_stack_heap_zeroed() { solana_logger::setup(); diff --git a/programs/sbf/tests/simulation.rs b/programs/sbf/tests/simulation.rs index 6b799c05690c0b..a1a9f6a6ce8ab0 100644 --- a/programs/sbf/tests/simulation.rs +++ b/programs/sbf/tests/simulation.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "sbf_rust")] + use { agave_validator::test_validator::*, solana_runtime::{ @@ -17,7 +19,6 @@ use { }; #[test] -#[cfg(feature = "sbf_rust")] fn test_no_panic_banks_client() { solana_logger::setup(); @@ -55,7 +56,6 @@ fn test_no_panic_banks_client() { } #[test] -#[cfg(feature = "sbf_rust")] fn test_no_panic_rpc_client() { solana_logger::setup(); diff --git a/programs/sbf/tests/sysvar.rs b/programs/sbf/tests/sysvar.rs index 9fd35adcaf82c4..92213bb36d33bb 100644 --- a/programs/sbf/tests/sysvar.rs +++ b/programs/sbf/tests/sysvar.rs @@ -1,3 +1,5 @@ +#![cfg(feature = "sbf_rust")] + use { solana_runtime::{ bank::Bank, @@ -12,7 +14,7 @@ use { pubkey::Pubkey, signature::{Keypair, Signer}, sysvar::{ - clock, epoch_rewards, epoch_schedule, fees, instructions, recent_blockhashes, rent, + clock, epoch_rewards, epoch_schedule, instructions, recent_blockhashes, rent, slot_hashes, slot_history, stake_history, }, transaction::{SanitizedTransaction, Transaction}, @@ -20,7 +22,6 @@ use { }; #[test] -#[cfg(feature = "sbf_rust")] fn test_sysvar_syscalls() { solana_logger::setup(); @@ -67,8 +68,6 @@ fn test_sysvar_syscalls() { AccountMeta::new_readonly(slot_hashes::id(), false), AccountMeta::new_readonly(slot_history::id(), false), AccountMeta::new_readonly(stake_history::id(), false), - #[allow(deprecated)] - AccountMeta::new_readonly(fees::id(), false), AccountMeta::new_readonly(epoch_rewards::id(), false), ], ); diff --git a/programs/stake-tests/tests/test_move_stake_and_lamports.rs b/programs/stake-tests/tests/test_move_stake_and_lamports.rs index 7c67db2d5520b9..0ef36753337c83 100644 --- a/programs/stake-tests/tests/test_move_stake_and_lamports.rs +++ b/programs/stake-tests/tests/test_move_stake_and_lamports.rs @@ -10,7 +10,7 @@ use { solana_sdk::{ account::Account as SolanaAccount, entrypoint::ProgramResult, - feature_set::move_stake_and_move_lamports_ixs, + feature_set::{move_stake_and_move_lamports_ixs, stake_raise_minimum_delegation_to_1_sol}, instruction::Instruction, program_error::ProgramError, pubkey::Pubkey, @@ -403,6 +403,7 @@ impl StakeLifecycle { } #[test_matrix( + [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, StakeLifecycle::Deactivating, StakeLifecycle::Deactive], [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, @@ -412,12 +413,13 @@ impl StakeLifecycle { )] #[tokio::test] async fn test_move_stake( + program_test: ProgramTest, move_source_type: StakeLifecycle, move_dest_type: StakeLifecycle, full_move: bool, has_lockup: bool, ) { - let mut context = program_test().start_with_context().await; + let mut context = program_test.start_with_context().await; let accounts = Accounts::default(); accounts.initialize(&mut context).await; @@ -542,14 +544,32 @@ async fn test_move_stake( } } - // source has 2x minimum (always 2 sol because these tests dont have featuresets) - // so first for inactive accounts lets undershoot and fail for underfunded dest - if move_dest_type != StakeLifecycle::Active { + // the below checks are conceptually incoherent with a 1 lamport minimum + // the undershoot fails successfully (but because its a zero move, not because the destination ends underfunded) + // then the second one succeeds failedly (because its a full move, so the "underfunded" source is actually closed) + if minimum_delegation > 1 { + // source has 2x minimum (always 2 sol because these tests dont have featuresets) + // so first for inactive accounts lets undershoot and fail for underfunded dest + if move_dest_type != StakeLifecycle::Active { + let instruction = ixn::move_stake( + &move_source, + &move_dest, + &staker_keypair.pubkey(), + minimum_delegation - 1, + ); + + let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) + .await + .unwrap_err(); + assert_eq!(e, ProgramError::InvalidArgument); + } + + // now lets overshoot and fail for underfunded source let instruction = ixn::move_stake( &move_source, &move_dest, &staker_keypair.pubkey(), - minimum_delegation - 1, + minimum_delegation + 1, ); let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) @@ -558,19 +578,6 @@ async fn test_move_stake( assert_eq!(e, ProgramError::InvalidArgument); } - // now lets overshoot and fail for underfunded source - let instruction = ixn::move_stake( - &move_source, - &move_dest, - &staker_keypair.pubkey(), - minimum_delegation + 1, - ); - - let e = process_instruction(&mut context, &instruction, &vec![&staker_keypair]) - .await - .unwrap_err(); - assert_eq!(e, ProgramError::InvalidArgument); - // now we do it juuust right let instruction = ixn::move_stake( &move_source, @@ -648,6 +655,7 @@ async fn test_move_stake( } #[test_matrix( + [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, StakeLifecycle::Deactivating, StakeLifecycle::Deactive], [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, @@ -657,12 +665,13 @@ async fn test_move_stake( )] #[tokio::test] async fn test_move_lamports( + program_test: ProgramTest, move_source_type: StakeLifecycle, move_dest_type: StakeLifecycle, different_votes: bool, has_lockup: bool, ) { - let mut context = program_test().start_with_context().await; + let mut context = program_test.start_with_context().await; let accounts = Accounts::default(); accounts.initialize(&mut context).await; @@ -860,6 +869,7 @@ async fn test_move_lamports( } #[test_matrix( + [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], [(StakeLifecycle::Active, StakeLifecycle::Uninitialized), (StakeLifecycle::Uninitialized, StakeLifecycle::Initialized), (StakeLifecycle::Uninitialized, StakeLifecycle::Uninitialized)], @@ -867,10 +877,11 @@ async fn test_move_lamports( )] #[tokio::test] async fn test_move_uninitialized_fail( + program_test: ProgramTest, move_types: (StakeLifecycle, StakeLifecycle), move_lamports: bool, ) { - let mut context = program_test().start_with_context().await; + let mut context = program_test.start_with_context().await; let accounts = Accounts::default(); accounts.initialize(&mut context).await; @@ -931,12 +942,14 @@ async fn test_move_uninitialized_fail( } #[test_matrix( + [program_test(), program_test_without_features(&[stake_raise_minimum_delegation_to_1_sol::id()])], [StakeLifecycle::Initialized, StakeLifecycle::Active, StakeLifecycle::Deactive], [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, StakeLifecycle::Deactive], [false, true] )] #[tokio::test] async fn test_move_general_fail( + program_test: ProgramTest, move_source_type: StakeLifecycle, move_dest_type: StakeLifecycle, move_lamports: bool, @@ -952,7 +965,7 @@ async fn test_move_general_fail( return; } - let mut context = program_test().start_with_context().await; + let mut context = program_test.start_with_context().await; let accounts = Accounts::default(); accounts.initialize(&mut context).await; @@ -1212,12 +1225,15 @@ async fn test_move_general_fail( // this test is only to be sure the feature gate is safe // once the feature has been activated, this can all be deleted #[test_matrix( + [program_test_without_features(&[move_stake_and_move_lamports_ixs::id()]), + program_test_without_features(&[move_stake_and_move_lamports_ixs::id(), stake_raise_minimum_delegation_to_1_sol::id()])], [StakeLifecycle::Initialized, StakeLifecycle::Active, StakeLifecycle::Deactive], [StakeLifecycle::Initialized, StakeLifecycle::Activating, StakeLifecycle::Active, StakeLifecycle::Deactive], [false, true] )] #[tokio::test] async fn test_move_feature_gate_fail( + program_test: ProgramTest, move_source_type: StakeLifecycle, move_dest_type: StakeLifecycle, move_lamports: bool, @@ -1233,10 +1249,7 @@ async fn test_move_feature_gate_fail( return; } - let mut context = program_test_without_features(&[move_stake_and_move_lamports_ixs::id()]) - .start_with_context() - .await; - + let mut context = program_test.start_with_context().await; let accounts = Accounts::default(); accounts.initialize(&mut context).await; diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index b55c904b4b17a6..41eb6e6b43b9fb 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } bincode = { workspace = true } log = { workspace = true } solana-config-program = { workspace = true } +solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } solana-type-overrides = { workspace = true } diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index 1b874cd9750596..30671825108904 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -1,8 +1,8 @@ use { crate::stake_state::{ authorize, authorize_with_seed, deactivate, deactivate_delinquent, delegate, initialize, - merge, move_lamports, move_stake, new_warmup_cooldown_rate_epoch, redelegate, set_lockup, - split, withdraw, + merge, move_lamports, move_stake, new_warmup_cooldown_rate_epoch, set_lockup, split, + withdraw, }, log::*, solana_program_runtime::{ @@ -324,7 +324,6 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| let clock = invoke_context.get_sysvar_cache().get_clock()?; deactivate_delinquent( - invoke_context, transaction_context, instruction_context, &mut me, @@ -333,25 +332,10 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| clock.epoch, ) } + #[allow(deprecated)] StakeInstruction::Redelegate => { - let mut me = get_stake_account()?; - if invoke_context - .get_feature_set() - .is_active(&feature_set::stake_redelegate_instruction::id()) - { - instruction_context.check_number_of_instruction_accounts(3)?; - redelegate( - invoke_context, - transaction_context, - instruction_context, - &mut me, - 1, - 2, - &signers, - ) - } else { - Err(InstructionError::InvalidInstructionData) - } + let _ = get_stake_account()?; + Err(InstructionError::InvalidInstructionData) } StakeInstruction::MoveStake(lamports) => { if invoke_context @@ -428,9 +412,7 @@ mod tests { LockupArgs, StakeError, }, stake_flags::StakeFlags, - state::{ - warmup_cooldown_rate, Authorized, Lockup, StakeActivationStatus, StakeAuthorize, - }, + state::{warmup_cooldown_rate, Authorized, Lockup, StakeAuthorize}, MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION, }, stake_history::{StakeHistory, StakeHistoryEntry}, @@ -849,16 +831,6 @@ mod tests { ), Err(InstructionError::InvalidAccountOwner), ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::redelegate( - &spoofed_stake_state_pubkey(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - )[2], - Err(InstructionError::InvalidAccountOwner), - ); } #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] @@ -7305,618 +7277,6 @@ mod tests { ); } - #[test_case(feature_set_no_minimum_delegation(); "no_min_delegation")] - #[test_case(feature_set_all_enabled(); "all_enabled")] - fn test_redelegate(feature_set: Arc) { - let feature_set = Arc::new(feature_set); - - let minimum_delegation = crate::get_minimum_delegation(&feature_set); - let rent = Rent::default(); - let rent_exempt_reserve = rent.minimum_balance(StakeStateV2::size_of()); - let stake_history = StakeHistory::default(); - let current_epoch = 100; - - let authorized_staker = Pubkey::new_unique(); - let vote_address = Pubkey::new_unique(); - let new_vote_address = Pubkey::new_unique(); - let stake_address = Pubkey::new_unique(); - let uninitialized_stake_address = Pubkey::new_unique(); - - let prepare_stake_account = |activation_epoch, expected_stake_activation_status| { - let initial_stake_delegation = minimum_delegation + rent_exempt_reserve; - let initial_stake_state = StakeStateV2::Stake( - Meta { - authorized: Authorized { - staker: authorized_staker, - withdrawer: Pubkey::new_unique(), - }, - rent_exempt_reserve, - ..Meta::default() - }, - new_stake( - initial_stake_delegation, - &vote_address, - &VoteState::default(), - activation_epoch, - ), - StakeFlags::empty(), - ); - - if let Some(expected_stake_activation_status) = expected_stake_activation_status { - assert_eq!( - expected_stake_activation_status, - initial_stake_state - .delegation() - .unwrap() - .stake_activating_and_deactivating(current_epoch, &stake_history, None) - ); - } - - AccountSharedData::new_data_with_space( - rent_exempt_reserve + initial_stake_delegation, /* lamports */ - &initial_stake_state, - StakeStateV2::size_of(), - &id(), - ) - .unwrap() - }; - - let new_vote_account = AccountSharedData::new_data_with_space( - 1, /* lamports */ - &VoteStateVersions::new_current(VoteState::default()), - VoteState::size_of(), - &solana_vote_program::id(), - ) - .unwrap(); - - let process_instruction_redelegate = - |stake_address: &Pubkey, - stake_account: &AccountSharedData, - authorized_staker: &Pubkey, - vote_address: &Pubkey, - vote_account: &AccountSharedData, - uninitialized_stake_address: &Pubkey, - uninitialized_stake_account: &AccountSharedData, - expected_result| { - #[allow(deprecated)] - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::Redelegate).unwrap(), - vec![ - (*stake_address, stake_account.clone()), - ( - *uninitialized_stake_address, - uninitialized_stake_account.clone(), - ), - (*vote_address, vote_account.clone()), - (*authorized_staker, AccountSharedData::default()), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - clock::id(), - create_account_shared_data_for_test(&Clock { - epoch: current_epoch, - ..Clock::default() - }), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ], - vec![ - AccountMeta { - pubkey: *stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: *uninitialized_stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: *vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: *authorized_staker, - is_signer: true, - is_writable: false, - }, - ], - expected_result, - ) - }; - - // - // Failure: incorrect authorized staker - // - let stake_account = prepare_stake_account(0 /*activation_epoch*/, None); - let uninitialized_stake_account = - AccountSharedData::new(0 /* lamports */, StakeStateV2::size_of(), &id()); - - let _ = process_instruction_redelegate( - &stake_address, - &stake_account, - &Pubkey::new_unique(), // <-- Incorrect authorized staker - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(InstructionError::MissingRequiredSignature), - ); - - // - // Success: normal case - // - let output_accounts = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Ok(()), - ); - - assert_eq!(output_accounts[0].lamports(), rent_exempt_reserve); - if let StakeStateV2::Stake(meta, stake, _) = output_accounts[0].deserialize_data().unwrap() - { - assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); - assert_eq!( - stake.delegation.stake, - minimum_delegation + rent_exempt_reserve - ); - assert_eq!(stake.delegation.activation_epoch, 0); - assert_eq!(stake.delegation.deactivation_epoch, current_epoch); - } else { - panic!("Invalid output_accounts[0] data"); - } - assert_eq!( - output_accounts[1].lamports(), - minimum_delegation + rent_exempt_reserve - ); - if let StakeStateV2::Stake(meta, stake, _) = output_accounts[1].deserialize_data().unwrap() - { - assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); - assert_eq!(stake.delegation.stake, minimum_delegation); - assert_eq!(stake.delegation.activation_epoch, current_epoch); - assert_eq!(stake.delegation.deactivation_epoch, u64::MAX); - } else { - panic!("Invalid output_accounts[1] data"); - } - - // - // Variations of rescinding the deactivation of `stake_account` - // - let deactivated_stake_accounts = [ - ( - // Failure: insufficient stake in `stake_account` to even delegate normally - { - let mut deactivated_stake_account = output_accounts[0].clone(); - deactivated_stake_account - .checked_add_lamports(minimum_delegation - 1) - .unwrap(); - deactivated_stake_account - }, - Err(StakeError::InsufficientDelegation.into()), - ), - ( - // Failure: `stake_account` holds the "virtual stake" that's cooling now, with the - // real stake now warming up in `uninitialized_stake_account` - { - let mut deactivated_stake_account = output_accounts[0].clone(); - deactivated_stake_account - .checked_add_lamports(minimum_delegation) - .unwrap(); - deactivated_stake_account - }, - Err(StakeError::TooSoonToRedelegate.into()), - ), - ( - // Success: `stake_account` has been replenished with additional lamports to - // fully realize its "virtual stake" - { - let mut deactivated_stake_account = output_accounts[0].clone(); - deactivated_stake_account - .checked_add_lamports(minimum_delegation + rent_exempt_reserve) - .unwrap(); - deactivated_stake_account - }, - Ok(()), - ), - ( - // Failure: `stake_account` has been replenished with 1 lamport less than what's - // necessary to fully realize its "virtual stake" - { - let mut deactivated_stake_account = output_accounts[0].clone(); - deactivated_stake_account - .checked_add_lamports(minimum_delegation + rent_exempt_reserve - 1) - .unwrap(); - deactivated_stake_account - }, - Err(StakeError::TooSoonToRedelegate.into()), - ), - ]; - for (deactivated_stake_account, expected_result) in deactivated_stake_accounts { - #[allow(deprecated)] - process_instruction( - Arc::clone(&feature_set), - &serialize(&StakeInstruction::DelegateStake).unwrap(), - vec![ - (stake_address, deactivated_stake_account), - (vote_address, new_vote_account.clone()), - (authorized_staker, AccountSharedData::default()), - ( - stake_config::id(), - config::create_account(0, &stake_config::Config::default()), - ), - ( - stake_history::id(), - create_account_shared_data_for_test(&stake_history), - ), - (rent::id(), create_account_shared_data_for_test(&rent)), - ( - clock::id(), - create_account_shared_data_for_test(&Clock { - epoch: current_epoch, - ..Clock::default() - }), - ), - ( - epoch_schedule::id(), - create_account_shared_data_for_test(&EpochSchedule::default()), - ), - ], - vec![ - AccountMeta { - pubkey: stake_address, - is_signer: false, - is_writable: true, - }, - AccountMeta { - pubkey: vote_address, - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: clock::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_history::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: stake_config::id(), - is_signer: false, - is_writable: false, - }, - AccountMeta { - pubkey: authorized_staker, - is_signer: true, - is_writable: false, - }, - ], - expected_result, - ); - } - - // - // Success: `uninitialized_stake_account` starts with 42 extra lamports - // - let uninitialized_stake_account_with_extra_lamports = - AccountSharedData::new(42 /* lamports */, StakeStateV2::size_of(), &id()); - let output_accounts = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account_with_extra_lamports, - Ok(()), - ); - - assert_eq!(output_accounts[0].lamports(), rent_exempt_reserve); - assert_eq!( - output_accounts[1].lamports(), - minimum_delegation + rent_exempt_reserve + 42 - ); - if let StakeStateV2::Stake(meta, stake, _) = output_accounts[1].deserialize_data().unwrap() - { - assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); - assert_eq!(stake.delegation.stake, minimum_delegation + 42); - assert_eq!(stake.delegation.activation_epoch, current_epoch); - assert_eq!(stake.delegation.deactivation_epoch, u64::MAX); - } else { - panic!("Invalid output_accounts[1] data"); - } - - // - // Success: `stake_account` is over-allocated and holds a greater than required `rent_exempt_reserve` - // - let mut stake_account_over_allocated = - prepare_stake_account(0 /*activation_epoch:*/, None); - if let StakeStateV2::Stake(mut meta, stake, stake_flags) = - stake_account_over_allocated.deserialize_data().unwrap() - { - meta.rent_exempt_reserve += 42; - stake_account_over_allocated - .set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) - .unwrap(); - } - stake_account_over_allocated - .checked_add_lamports(42) - .unwrap(); - assert_eq!( - stake_account_over_allocated.lamports(), - (minimum_delegation + rent_exempt_reserve) + (rent_exempt_reserve + 42), - ); - assert_eq!(uninitialized_stake_account.lamports(), 0); - let output_accounts = process_instruction_redelegate( - &stake_address, - &stake_account_over_allocated, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Ok(()), - ); - - assert_eq!(output_accounts[0].lamports(), rent_exempt_reserve + 42); - if let StakeStateV2::Stake(meta, _stake, _) = output_accounts[0].deserialize_data().unwrap() - { - assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve + 42); - } else { - panic!("Invalid output_accounts[0] data"); - } - assert_eq!( - output_accounts[1].lamports(), - minimum_delegation + rent_exempt_reserve, - ); - if let StakeStateV2::Stake(meta, stake, _) = output_accounts[1].deserialize_data().unwrap() - { - assert_eq!(meta.rent_exempt_reserve, rent_exempt_reserve); - assert_eq!(stake.delegation.stake, minimum_delegation); - } else { - panic!("Invalid output_accounts[1] data"); - } - - // - // Failure: `uninitialized_stake_account` with invalid program id - // - let _ = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &AccountSharedData::new( - 0, /* lamports */ - StakeStateV2::size_of(), - &Pubkey::new_unique(), // <-- Invalid program id - ), - Err(InstructionError::IncorrectProgramId), - ); - - // - // Failure: `uninitialized_stake_account` with size too small - // - let _ = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &AccountSharedData::new(0 /* lamports */, StakeStateV2::size_of() - 1, &id()), // <-- size too small - Err(InstructionError::InvalidAccountData), - ); - - // - // Failure: `uninitialized_stake_account` with size too large - // - let _ = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &AccountSharedData::new(0 /* lamports */, StakeStateV2::size_of() + 1, &id()), // <-- size too large - Err(InstructionError::InvalidAccountData), - ); - - // - // Failure: `uninitialized_stake_account` with initialized stake account - // - let _ = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &stake_account.clone(), // <-- Initialized stake account - Err(InstructionError::AccountAlreadyInitialized), - ); - - // - // Failure: invalid `new_vote_account` - // - let _ = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &new_vote_address, - &uninitialized_stake_account, // <-- Invalid vote account - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(InstructionError::IncorrectProgramId), - ); - - // - // Failure: invalid `stake_account` - // - let _ = process_instruction_redelegate( - &stake_address, - &uninitialized_stake_account, // <-- Uninitialized stake account - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(InstructionError::InvalidAccountData), - ); - - // - // Failure: stake is inactive, activating or deactivating - // - let inactive_stake_account = prepare_stake_account( - current_epoch + 1, /*activation_epoch*/ - Some(StakeActivationStatus { - effective: 0, - activating: 0, - deactivating: 0, - }), - ); - let _ = process_instruction_redelegate( - &stake_address, - &inactive_stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(StakeError::RedelegateTransientOrInactiveStake.into()), - ); - - let activating_stake_account = prepare_stake_account( - current_epoch, /*activation_epoch*/ - Some(StakeActivationStatus { - effective: 0, - activating: minimum_delegation + rent_exempt_reserve, - deactivating: 0, - }), - ); - let _ = process_instruction_redelegate( - &stake_address, - &activating_stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(StakeError::RedelegateTransientOrInactiveStake.into()), - ); - - let mut deactivating_stake_account = - prepare_stake_account(0 /*activation_epoch:*/, None); - if let StakeStateV2::Stake(meta, mut stake, _stake_flags) = - deactivating_stake_account.deserialize_data().unwrap() - { - stake.deactivate(current_epoch).unwrap(); - assert_eq!( - StakeActivationStatus { - effective: minimum_delegation + rent_exempt_reserve, - activating: 0, - deactivating: minimum_delegation + rent_exempt_reserve, - }, - stake.delegation.stake_activating_and_deactivating( - current_epoch, - &stake_history, - None - ) - ); - - deactivating_stake_account - .set_state(&StakeStateV2::Stake(meta, stake, StakeFlags::empty())) - .unwrap(); - } - let _ = process_instruction_redelegate( - &stake_address, - &deactivating_stake_account, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(StakeError::RedelegateTransientOrInactiveStake.into()), - ); - - // - // Failure: `stake_account` has insufficient stake - // (less than `minimum_delegation + rent_exempt_reserve`) - // - let mut stake_account_too_few_lamports = stake_account.clone(); - if let StakeStateV2::Stake(meta, mut stake, stake_flags) = - stake_account_too_few_lamports.deserialize_data().unwrap() - { - stake.delegation.stake -= 1; - assert_eq!( - stake.delegation.stake, - minimum_delegation + rent_exempt_reserve - 1 - ); - stake_account_too_few_lamports - .set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) - .unwrap(); - } else { - panic!("Invalid stake_account"); - } - stake_account_too_few_lamports - .checked_sub_lamports(1) - .unwrap(); - assert_eq!( - stake_account_too_few_lamports.lamports(), - minimum_delegation + 2 * rent_exempt_reserve - 1 - ); - - let _ = process_instruction_redelegate( - &stake_address, - &stake_account_too_few_lamports, - &authorized_staker, - &new_vote_address, - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(StakeError::InsufficientDelegation.into()), - ); - - // - // Failure: redelegate to same vote address - // - let _ = process_instruction_redelegate( - &stake_address, - &stake_account, - &authorized_staker, - &vote_address, // <-- Same vote address - &new_vote_account, - &uninitialized_stake_address, - &uninitialized_stake_account, - Err(StakeError::RedelegateToSameVoteAccount.into()), - ); - } - #[test] fn test_stake_process_instruction_with_epoch_rewards_active() { let feature_set = feature_set_all_enabled(); @@ -8080,16 +7440,6 @@ mod tests { ), Err(StakeError::EpochRewardsActive.into()), ); - process_instruction_as_one_arg( - Arc::clone(&feature_set), - &instruction::redelegate( - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - &Pubkey::new_unique(), - )[2], - Err(StakeError::EpochRewardsActive.into()), - ); process_instruction_as_one_arg( Arc::clone(&feature_set), &instruction::move_stake( diff --git a/programs/stake/src/stake_state.rs b/programs/stake/src/stake_state.rs index 2d3388d70d281d..6bb26e288db8f2 100644 --- a/programs/stake/src/stake_state.rs +++ b/programs/stake/src/stake_state.rs @@ -9,12 +9,13 @@ )] pub use solana_sdk::stake::state::*; use { - solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, + solana_log_collector::ic_msg, + solana_program_runtime::invoke_context::InvokeContext, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, account_utils::StateMut, clock::{Clock, Epoch}, - feature_set::{self, FeatureSet}, + feature_set::FeatureSet, instruction::{checked_add, InstructionError}, pubkey::Pubkey, rent::Rent, @@ -93,25 +94,11 @@ fn redelegate_stake( let new_rate_activation_epoch = new_warmup_cooldown_rate_epoch(invoke_context); // If stake is currently active: if stake.stake(clock.epoch, stake_history, new_rate_activation_epoch) != 0 { - let stake_lamports_ok = if invoke_context - .get_feature_set() - .is_active(&feature_set::stake_redelegate_instruction::id()) - { - // When a stake account is redelegated, the delegated lamports from the source stake - // account are transferred to a new stake account. Do not permit the deactivation of - // the source stake account to be rescinded, by more generally requiring the delegation - // be configured with the expected amount of stake lamports before rescinding. - stake_lamports >= stake.delegation.stake - } else { - true - }; - // If pubkey of new voter is the same as current, // and we are scheduled to start deactivating this epoch, // we rescind deactivation if stake.delegation.voter_pubkey == *voter_pubkey && clock.epoch == stake.delegation.deactivation_epoch - && stake_lamports_ok { stake.delegation.deactivation_epoch = u64::MAX; return Ok(()); @@ -374,56 +361,15 @@ pub fn delegate( } } -fn deactivate_stake( - invoke_context: &InvokeContext, - stake: &mut Stake, - stake_flags: &mut StakeFlags, - epoch: Epoch, -) -> Result<(), InstructionError> { - if invoke_context - .get_feature_set() - .is_active(&feature_set::stake_redelegate_instruction::id()) - { - if stake_flags.contains(StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED) { - let stake_history = invoke_context.get_sysvar_cache().get_stake_history()?; - // when MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED flag is set on stake_flags, - // deactivation is only permitted when the stake delegation activating amount is zero. - let status = stake.delegation.stake_activating_and_deactivating( - epoch, - stake_history.as_ref(), - new_warmup_cooldown_rate_epoch(invoke_context), - ); - if status.activating != 0 { - Err(InstructionError::from( - StakeError::RedelegatedStakeMustFullyActivateBeforeDeactivationIsPermitted, - )) - } else { - stake.deactivate(epoch)?; - // After deactivation, need to clear `MustFullyActivateBeforeDeactivationIsPermitted` flag if any. - // So that future activation and deactivation are not subject to that restriction. - stake_flags - .remove(StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED); - Ok(()) - } - } else { - stake.deactivate(epoch)?; - Ok(()) - } - } else { - stake.deactivate(epoch)?; - Ok(()) - } -} - pub fn deactivate( - invoke_context: &InvokeContext, + _invoke_context: &InvokeContext, stake_account: &mut BorrowedAccount, clock: &Clock, signers: &HashSet, ) -> Result<(), InstructionError> { - if let StakeStateV2::Stake(meta, mut stake, mut stake_flags) = stake_account.get_state()? { + if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_account.get_state()? { meta.authorized.check(signers, StakeAuthorize::Staker)?; - deactivate_stake(invoke_context, &mut stake, &mut stake_flags, clock.epoch)?; + stake.deactivate(clock.epoch)?; stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(InstructionError::InvalidAccountData) @@ -668,123 +614,6 @@ pub fn merge( Ok(()) } -pub fn redelegate( - invoke_context: &InvokeContext, - transaction_context: &TransactionContext, - instruction_context: &InstructionContext, - stake_account: &mut BorrowedAccount, - uninitialized_stake_account_index: IndexOfAccount, - vote_account_index: IndexOfAccount, - signers: &HashSet, -) -> Result<(), InstructionError> { - let clock = invoke_context.get_sysvar_cache().get_clock()?; - - // ensure `uninitialized_stake_account_index` is in the uninitialized state - let mut uninitialized_stake_account = instruction_context - .try_borrow_instruction_account(transaction_context, uninitialized_stake_account_index)?; - if *uninitialized_stake_account.get_owner() != id() { - ic_msg!( - invoke_context, - "expected uninitialized stake account owner to be {}, not {}", - id(), - *uninitialized_stake_account.get_owner() - ); - return Err(InstructionError::IncorrectProgramId); - } - if uninitialized_stake_account.get_data().len() != StakeStateV2::size_of() { - ic_msg!( - invoke_context, - "expected uninitialized stake account data len to be {}, not {}", - StakeStateV2::size_of(), - uninitialized_stake_account.get_data().len() - ); - return Err(InstructionError::InvalidAccountData); - } - if !matches!( - uninitialized_stake_account.get_state()?, - StakeStateV2::Uninitialized - ) { - ic_msg!( - invoke_context, - "expected uninitialized stake account to be uninitialized", - ); - return Err(InstructionError::AccountAlreadyInitialized); - } - - // validate the provided vote account - let vote_account = instruction_context - .try_borrow_instruction_account(transaction_context, vote_account_index)?; - if *vote_account.get_owner() != solana_vote_program::id() { - ic_msg!( - invoke_context, - "expected vote account owner to be {}, not {}", - solana_vote_program::id(), - *vote_account.get_owner() - ); - return Err(InstructionError::IncorrectProgramId); - } - let vote_pubkey = *vote_account.get_key(); - let vote_state = vote_account.get_state::()?; - - let (stake_meta, effective_stake) = - if let StakeStateV2::Stake(meta, stake, _stake_flags) = stake_account.get_state()? { - let status = get_stake_status(invoke_context, &stake, &clock)?; - if status.effective == 0 || status.activating != 0 || status.deactivating != 0 { - ic_msg!(invoke_context, "stake is not active"); - return Err(StakeError::RedelegateTransientOrInactiveStake.into()); - } - - // Deny redelegating to the same vote account. This is nonsensical and could be used to - // grief the global stake warm-up/cool-down rate - if stake.delegation.voter_pubkey == vote_pubkey { - ic_msg!( - invoke_context, - "redelegating to the same vote account not permitted" - ); - return Err(StakeError::RedelegateToSameVoteAccount.into()); - } - - (meta, status.effective) - } else { - ic_msg!(invoke_context, "invalid stake account data",); - return Err(InstructionError::InvalidAccountData); - }; - - // deactivate `stake_account` - // - // Note: This function also ensures `signers` contains the `StakeAuthorize::Staker` - deactivate(invoke_context, stake_account, &clock, signers)?; - - // transfer the effective stake to the uninitialized stake account - stake_account.checked_sub_lamports(effective_stake)?; - uninitialized_stake_account.checked_add_lamports(effective_stake)?; - - // initialize and schedule `uninitialized_stake_account` for activation - let sysvar_cache = invoke_context.get_sysvar_cache(); - let rent = sysvar_cache.get_rent()?; - let mut uninitialized_stake_meta = stake_meta; - uninitialized_stake_meta.rent_exempt_reserve = - rent.minimum_balance(uninitialized_stake_account.get_data().len()); - - let ValidatedDelegatedInfo { stake_amount } = validate_delegated_amount( - &uninitialized_stake_account, - &uninitialized_stake_meta, - invoke_context.get_feature_set(), - )?; - uninitialized_stake_account.set_state(&StakeStateV2::Stake( - uninitialized_stake_meta, - new_stake( - stake_amount, - &vote_pubkey, - &vote_state.convert_to_current(), - clock.epoch, - ), - StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, - ))?; - - Ok(()) -} - pub fn move_stake( invoke_context: &InvokeContext, transaction_context: &TransactionContext, @@ -861,8 +690,6 @@ pub fn move_stake( source_stake.credits_observed, )?; - // StakeFlags::empty() is valid here because the only existing stake flag, - // MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, does not apply to active stakes destination_account.set_state(&StakeStateV2::Stake( destination_meta, destination_stake, @@ -880,8 +707,6 @@ pub fn move_stake( let mut destination_stake = source_stake; destination_stake.delegation.stake = lamports; - // StakeFlags::empty() is valid here because the only existing stake flag, - // MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, is cleared when a stake is activated destination_account.set_state(&StakeStateV2::Stake( destination_meta, destination_stake, @@ -898,8 +723,6 @@ pub fn move_stake( } else { source_stake.delegation.stake = source_final_stake; - // StakeFlags::empty() is valid here because the only existing stake flag, - // MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, does not apply to active stakes source_account.set_state(&StakeStateV2::Stake( source_meta, source_stake, @@ -1078,7 +901,6 @@ pub fn withdraw( } pub(crate) fn deactivate_delinquent( - invoke_context: &InvokeContext, transaction_context: &TransactionContext, instruction_context: &InstructionContext, stake_account: &mut BorrowedAccount, @@ -1112,7 +934,7 @@ pub(crate) fn deactivate_delinquent( return Err(StakeError::InsufficientReferenceVotes.into()); } - if let StakeStateV2::Stake(meta, mut stake, mut stake_flags) = stake_account.get_state()? { + if let StakeStateV2::Stake(meta, mut stake, stake_flags) = stake_account.get_state()? { if stake.delegation.voter_pubkey != *delinquent_vote_account_pubkey { return Err(StakeError::VoteAddressMismatch.into()); } @@ -1120,7 +942,7 @@ pub(crate) fn deactivate_delinquent( // Deactivate the stake account if its delegated vote account has never voted or has not // voted in the last `MINIMUM_DELINQUENT_EPOCHS_FOR_DEACTIVATION` if eligible_for_deactivate_delinquent(&delinquent_vote_state.epoch_credits, current_epoch) { - deactivate_stake(invoke_context, &mut stake, &mut stake_flags, current_epoch)?; + stake.deactivate(current_epoch)?; stake_account.set_state(&StakeStateV2::Stake(meta, stake, stake_flags)) } else { Err(StakeError::MinimumDelinquentEpochsForDeactivationNotMet.into()) diff --git a/programs/system/Cargo.toml b/programs/system/Cargo.toml index 1e5643587c8f88..a7e464fe5c3f9a 100644 --- a/programs/system/Cargo.toml +++ b/programs/system/Cargo.toml @@ -14,6 +14,7 @@ bincode = { workspace = true } log = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } +solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } solana-type-overrides = { workspace = true } diff --git a/programs/system/src/system_instruction.rs b/programs/system/src/system_instruction.rs index 57353e73d7e21e..c75f8d7d0c9f86 100644 --- a/programs/system/src/system_instruction.rs +++ b/programs/system/src/system_instruction.rs @@ -1,5 +1,6 @@ use { - solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, + solana_log_collector::ic_msg, + solana_program_runtime::invoke_context::InvokeContext, solana_sdk::{ instruction::{checked_add, InstructionError}, nonce::{ diff --git a/programs/system/src/system_processor.rs b/programs/system/src/system_processor.rs index d455fb84ba5c12..528f4dc222674a 100644 --- a/programs/system/src/system_processor.rs +++ b/programs/system/src/system_processor.rs @@ -4,8 +4,9 @@ use { withdraw_nonce_account, }, log::*, + solana_log_collector::ic_msg, solana_program_runtime::{ - declare_process_instruction, ic_msg, invoke_context::InvokeContext, + declare_process_instruction, invoke_context::InvokeContext, sysvar_cache::get_sysvar_with_account_check, }, solana_sdk::{ @@ -542,7 +543,10 @@ declare_process_instruction!(Entrypoint, DEFAULT_COMPUTE_UNITS, |invoke_context| mod tests { #[allow(deprecated)] use solana_sdk::{ - account::{self, Account, AccountSharedData, ReadableAccount}, + account::{ + self, create_account_shared_data_with_fields, to_account, Account, AccountSharedData, + ReadableAccount, DUMMY_INHERITABLE_ACCOUNT_FIELDS, + }, fee_calculator::FeeCalculator, hash::{hash, Hash}, instruction::{AccountMeta, Instruction, InstructionError}, @@ -552,8 +556,12 @@ mod tests { Data as NonceData, DurableNonce, State as NonceState, Versions as NonceVersions, }, }, - nonce_account, recent_blockhashes_account, system_instruction, system_program, - sysvar::{self, recent_blockhashes::IterItem, rent::Rent}, + nonce_account, system_instruction, system_program, + sysvar::{ + self, + recent_blockhashes::{IntoIterSorted, IterItem, RecentBlockhashes, MAX_ENTRIES}, + rent::Rent, + }, }; use { super::*, @@ -562,6 +570,7 @@ mod tests { solana_program_runtime::{ invoke_context::mock_process_instruction, with_mock_invoke_context, }, + std::collections::BinaryHeap, }; impl From for Address { @@ -595,11 +604,30 @@ mod tests { fn create_default_account() -> AccountSharedData { AccountSharedData::new(0, 0, &Pubkey::new_unique()) } + #[allow(deprecated)] + fn create_recent_blockhashes_account_for_test<'a, I>( + recent_blockhash_iter: I, + ) -> AccountSharedData + where + I: IntoIterator>, + { + let mut account = create_account_shared_data_with_fields::( + &RecentBlockhashes::default(), + DUMMY_INHERITABLE_ACCOUNT_FIELDS, + ); + let sorted = BinaryHeap::from_iter(recent_blockhash_iter); + let sorted_iter = IntoIterSorted::new(sorted); + let recent_blockhash_iter = sorted_iter.take(MAX_ENTRIES); + let recent_blockhashes: RecentBlockhashes = recent_blockhash_iter.collect(); + to_account(&recent_blockhashes, &mut account); + account + } fn create_default_recent_blockhashes_account() -> AccountSharedData { #[allow(deprecated)] - recent_blockhashes_account::create_account_with_data_for_test( - vec![IterItem(0u64, &Hash::default(), 0); sysvar::recent_blockhashes::MAX_ENTRIES], - ) + create_recent_blockhashes_account_for_test(vec![ + IterItem(0u64, &Hash::default(), 0); + sysvar::recent_blockhashes::MAX_ENTRIES + ]) } fn create_default_rent_account() -> AccountSharedData { account::create_account_shared_data_for_test(&Rent::free()) @@ -1551,10 +1579,10 @@ mod tests { ); let blockhash = hash(&serialize(&0).unwrap()); #[allow(deprecated)] - let new_recent_blockhashes_account = - solana_sdk::recent_blockhashes_account::create_account_with_data_for_test( - vec![IterItem(0u64, &blockhash, 0); sysvar::recent_blockhashes::MAX_ENTRIES], - ); + let new_recent_blockhashes_account = create_recent_blockhashes_account_for_test(vec![ + IterItem(0u64, &blockhash, 0); + sysvar::recent_blockhashes::MAX_ENTRIES + ]); mock_process_instruction( &system_program::id(), Vec::new(), @@ -1837,8 +1865,7 @@ mod tests { #[allow(deprecated)] let blockhash_id = sysvar::recent_blockhashes::id(); #[allow(deprecated)] - let new_recent_blockhashes_account = - solana_sdk::recent_blockhashes_account::create_account_with_data_for_test(vec![]); + let new_recent_blockhashes_account = create_recent_blockhashes_account_for_test(vec![]); process_instruction( &serialize(&SystemInstruction::InitializeNonceAccount(nonce_address)).unwrap(), vec![ @@ -1900,8 +1927,7 @@ mod tests { Ok(()), ); #[allow(deprecated)] - let new_recent_blockhashes_account = - solana_sdk::recent_blockhashes_account::create_account_with_data_for_test(vec![]); + let new_recent_blockhashes_account = create_recent_blockhashes_account_for_test(vec![]); mock_process_instruction( &system_program::id(), Vec::new(), diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index 8d8ec0cf24b255..817441da321a75 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -31,7 +31,7 @@ use { #[cfg_attr( feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor), - frozen_abi(digest = "EcS3xgfomytEAQ1eVd8R76ZejwyHp2Ed8dHqQWh6zi5v") + frozen_abi(digest = "3R2hRL3FM6jovbYubq2UWeiVDEVzrhH6M1ihoCPZWLsk") )] #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub enum VoteTransaction { diff --git a/programs/zk-elgamal-proof/Cargo.toml b/programs/zk-elgamal-proof/Cargo.toml index 059f5481e91460..e3dbcde0d5b18a 100644 --- a/programs/zk-elgamal-proof/Cargo.toml +++ b/programs/zk-elgamal-proof/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } +solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } solana-zk-sdk = { workspace = true } diff --git a/programs/zk-elgamal-proof/src/lib.rs b/programs/zk-elgamal-proof/src/lib.rs index 2514ab0c0d655d..2516cbbadf0d08 100644 --- a/programs/zk-elgamal-proof/src/lib.rs +++ b/programs/zk-elgamal-proof/src/lib.rs @@ -2,7 +2,8 @@ use { bytemuck::Pod, - solana_program_runtime::{declare_process_instruction, ic_msg, invoke_context::InvokeContext}, + solana_log_collector::ic_msg, + solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, solana_sdk::{instruction::InstructionError, system_program}, solana_zk_sdk::zk_elgamal_proof_program::{ id, diff --git a/programs/zk-token-proof-tests/tests/process_transaction.rs b/programs/zk-token-proof-tests/tests/process_transaction.rs index 4576421130af61..71d1761d2222e0 100644 --- a/programs/zk-token-proof-tests/tests/process_transaction.rs +++ b/programs/zk-token-proof-tests/tests/process_transaction.rs @@ -1717,7 +1717,7 @@ impl WithMaxComputeUnitLimit for Vec { fn with_max_compute_unit_limit(mut self) -> Self { self.push( solana_sdk::compute_budget::ComputeBudgetInstruction::set_compute_unit_limit( - solana_compute_budget::compute_budget_processor::MAX_COMPUTE_UNIT_LIMIT, + solana_compute_budget::compute_budget_limits::MAX_COMPUTE_UNIT_LIMIT, ), ); self diff --git a/programs/zk-token-proof/Cargo.toml b/programs/zk-token-proof/Cargo.toml index 29f53ec069209f..afbda6f1b8161c 100644 --- a/programs/zk-token-proof/Cargo.toml +++ b/programs/zk-token-proof/Cargo.toml @@ -12,6 +12,7 @@ edition = { workspace = true } bytemuck = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true } +solana-log-collector = { workspace = true } solana-program-runtime = { workspace = true } solana-sdk = { workspace = true } solana-zk-token-sdk = { workspace = true } diff --git a/programs/zk-token-proof/src/lib.rs b/programs/zk-token-proof/src/lib.rs index 32b7b69ed00ae4..f1fc5bca28c1fc 100644 --- a/programs/zk-token-proof/src/lib.rs +++ b/programs/zk-token-proof/src/lib.rs @@ -2,7 +2,8 @@ use { bytemuck::Pod, - solana_program_runtime::{declare_process_instruction, ic_msg, invoke_context::InvokeContext}, + solana_log_collector::ic_msg, + solana_program_runtime::{declare_process_instruction, invoke_context::InvokeContext}, solana_sdk::{ feature_set, instruction::{InstructionError, TRANSACTION_LEVEL_STACK_HEIGHT}, diff --git a/pubsub-client/src/nonblocking/pubsub_client.rs b/pubsub-client/src/nonblocking/pubsub_client.rs index b79e91f681b97f..44663b3372cb2c 100644 --- a/pubsub-client/src/nonblocking/pubsub_client.rs +++ b/pubsub-client/src/nonblocking/pubsub_client.rs @@ -183,10 +183,9 @@ use { RpcTransactionLogsFilter, }, error_object::RpcErrorObject, - filter::maybe_map_filters, response::{ Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, - RpcSignatureResult, RpcVersionInfo, RpcVote, SlotInfo, SlotUpdate, + RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, }, }, solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, @@ -194,7 +193,7 @@ use { thiserror::Error, tokio::{ net::TcpStream, - sync::{mpsc, oneshot, RwLock}, + sync::{mpsc, oneshot}, task::JoinHandle, time::{sleep, Duration}, }, @@ -265,9 +264,8 @@ type RequestMsg = ( #[derive(Debug)] pub struct PubsubClient { subscribe_sender: mpsc::UnboundedSender, - request_sender: mpsc::UnboundedSender, + _request_sender: mpsc::UnboundedSender, shutdown_sender: oneshot::Sender<()>, - node_version: RwLock>, ws: JoinHandle, } @@ -279,14 +277,14 @@ impl PubsubClient { .map_err(PubsubClientError::ConnectionError)?; let (subscribe_sender, subscribe_receiver) = mpsc::unbounded_channel(); - let (request_sender, request_receiver) = mpsc::unbounded_channel(); + let (_request_sender, request_receiver) = mpsc::unbounded_channel(); let (shutdown_sender, shutdown_receiver) = oneshot::channel(); + #[allow(clippy::used_underscore_binding)] Ok(Self { subscribe_sender, - request_sender, + _request_sender, shutdown_sender, - node_version: RwLock::new(None), ws: tokio::spawn(PubsubClient::run_ws( ws, subscribe_receiver, @@ -301,43 +299,11 @@ impl PubsubClient { self.ws.await.unwrap() // WS future should not be cancelled or panicked } - pub async fn set_node_version(&self, version: semver::Version) -> Result<(), ()> { - let mut w_node_version = self.node_version.write().await; - *w_node_version = Some(version); + #[deprecated(since = "2.0.2", note = "PubsubClient::node_version is no longer used")] + pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> { Ok(()) } - async fn get_node_version(&self) -> PubsubClientResult { - let r_node_version = self.node_version.read().await; - if let Some(version) = &*r_node_version { - Ok(version.clone()) - } else { - drop(r_node_version); - let mut w_node_version = self.node_version.write().await; - let node_version = self.get_version().await?; - *w_node_version = Some(node_version.clone()); - Ok(node_version) - } - } - - async fn get_version(&self) -> PubsubClientResult { - let (response_sender, response_receiver) = oneshot::channel(); - self.request_sender - .send(("getVersion".to_string(), Value::Null, response_sender)) - .map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))?; - let result = response_receiver - .await - .map_err(|err| PubsubClientError::ConnectionClosed(err.to_string()))??; - let node_version: RpcVersionInfo = serde_json::from_value(result)?; - let node_version = semver::Version::parse(&node_version.solana_core).map_err(|e| { - PubsubClientError::RequestFailed { - reason: format!("failed to parse cluster version: {e}"), - message: "getVersion".to_string(), - } - })?; - Ok(node_version) - } - async fn subscribe<'a, T>(&self, operation: &str, params: Value) -> SubscribeResult<'a, T> where T: DeserializeOwned + Send + 'a, @@ -426,22 +392,8 @@ impl PubsubClient { pub async fn program_subscribe( &self, pubkey: &Pubkey, - mut config: Option, + config: Option, ) -> SubscribeResult<'_, RpcResponse> { - if let Some(ref mut config) = config { - if let Some(ref mut filters) = config.filters { - let node_version = self.get_node_version().await.ok(); - // If node does not support the pubsub `getVersion` method, assume version is old - // and filters should be mapped (node_version.is_none()). - maybe_map_filters(node_version, filters).map_err(|e| { - PubsubClientError::RequestFailed { - reason: e, - message: "maybe_map_filters".to_string(), - } - })?; - } - } - let params = json!([pubkey.to_string(), config]); self.subscribe("program", params).await } diff --git a/pubsub-client/src/pubsub_client.rs b/pubsub-client/src/pubsub_client.rs index 70769619db1f4d..5247bdb8b9e263 100644 --- a/pubsub-client/src/pubsub_client.rs +++ b/pubsub-client/src/pubsub_client.rs @@ -103,7 +103,6 @@ use { RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, RpcTransactionLogsFilter, }, - filter, response::{ Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, @@ -207,35 +206,6 @@ where .map_err(|err| err.into()) } - fn get_version( - writable_socket: &Arc>>>, - ) -> Result { - writable_socket.write().unwrap().send(Message::Text( - json!({ - "jsonrpc":"2.0","id":1,"method":"getVersion", - }) - .to_string(), - ))?; - let message = writable_socket.write().unwrap().read()?; - let message_text = &message.into_text()?; - - if let Ok(json_msg) = serde_json::from_str::>(message_text) { - if let Some(Object(version_map)) = json_msg.get("result") { - if let Some(node_version) = version_map.get("solana-core") { - if let Some(node_version) = node_version.as_str() { - if let Ok(parsed) = semver::Version::parse(node_version) { - return Ok(parsed); - } - } - } - } - } - - Err(PubsubClientError::UnexpectedGetVersionResponse(format!( - "msg={message_text}" - ))) - } - fn read_message( writable_socket: &Arc>>>, ) -> Result, PubsubClientError> { @@ -523,7 +493,7 @@ impl PubsubClient { pub fn program_subscribe( url: &str, pubkey: &Pubkey, - mut config: Option, + config: Option, ) -> Result { let url = Url::parse(url)?; let socket = connect_with_retry(url)?; @@ -534,16 +504,6 @@ impl PubsubClient { let exit = Arc::new(AtomicBool::new(false)); let exit_clone = exit.clone(); - if let Some(ref mut config) = config { - if let Some(ref mut filters) = config.filters { - let node_version = PubsubProgramClientSubscription::get_version(&socket_clone).ok(); - // If node does not support the pubsub `getVersion` method, assume version is old - // and filters should be mapped (node_version.is_none()). - filter::maybe_map_filters(node_version, filters) - .map_err(PubsubClientError::RequestError)?; - } - } - let body = json!({ "jsonrpc":"2.0", "id":1, diff --git a/rpc-client-api/Cargo.toml b/rpc-client-api/Cargo.toml index c8d1eaad8b7959..22a883244c709e 100644 --- a/rpc-client-api/Cargo.toml +++ b/rpc-client-api/Cargo.toml @@ -28,6 +28,7 @@ solana-version = { workspace = true } thiserror = { workspace = true } [dev-dependencies] +const_format = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/rpc-client-api/src/filter.rs b/rpc-client-api/src/filter.rs index 9af8cc0784c1ff..4c65f4249e3aed 100644 --- a/rpc-client-api/src/filter.rs +++ b/rpc-client-api/src/filter.rs @@ -1,6 +1,6 @@ -#![allow(deprecated)] use { - crate::version_req::VersionReq, + base64::{prelude::BASE64_STANDARD, Engine}, + serde::Deserialize, solana_inline_spl::{token::GenericTokenAccount, token_2022::Account}, solana_sdk::account::{AccountSharedData, ReadableAccount}, std::borrow::Cow, @@ -24,55 +24,36 @@ impl RpcFilterType { match self { RpcFilterType::DataSize(_) => Ok(()), RpcFilterType::Memcmp(compare) => { - let encoding = compare.encoding.as_ref().unwrap_or(&MemcmpEncoding::Binary); - match encoding { - MemcmpEncoding::Binary => { - use MemcmpEncodedBytes::*; - match &compare.bytes { - // DEPRECATED - Binary(bytes) => { - if bytes.len() > MAX_DATA_BASE58_SIZE { - return Err(RpcFilterError::Base58DataTooLarge); - } - let bytes = bs58::decode(&bytes) - .into_vec() - .map_err(RpcFilterError::DecodeError)?; - if bytes.len() > MAX_DATA_SIZE { - Err(RpcFilterError::Base58DataTooLarge) - } else { - Ok(()) - } - } - Base58(bytes) => { - if bytes.len() > MAX_DATA_BASE58_SIZE { - return Err(RpcFilterError::DataTooLarge); - } - let bytes = bs58::decode(&bytes).into_vec()?; - if bytes.len() > MAX_DATA_SIZE { - Err(RpcFilterError::DataTooLarge) - } else { - Ok(()) - } - } - Base64(bytes) => { - if bytes.len() > MAX_DATA_BASE64_SIZE { - return Err(RpcFilterError::DataTooLarge); - } - let bytes = base64::decode(bytes)?; - if bytes.len() > MAX_DATA_SIZE { - Err(RpcFilterError::DataTooLarge) - } else { - Ok(()) - } - } - Bytes(bytes) => { - if bytes.len() > MAX_DATA_SIZE { - return Err(RpcFilterError::DataTooLarge); - } - Ok(()) - } + use MemcmpEncodedBytes::*; + match &compare.bytes { + Base58(bytes) => { + if bytes.len() > MAX_DATA_BASE58_SIZE { + return Err(RpcFilterError::DataTooLarge); + } + let bytes = bs58::decode(&bytes).into_vec()?; + if bytes.len() > MAX_DATA_SIZE { + Err(RpcFilterError::DataTooLarge) + } else { + Ok(()) + } + } + Base64(bytes) => { + if bytes.len() > MAX_DATA_BASE64_SIZE { + return Err(RpcFilterError::DataTooLarge); + } + let bytes = BASE64_STANDARD.decode(bytes)?; + if bytes.len() > MAX_DATA_SIZE { + Err(RpcFilterError::DataTooLarge) + } else { + Ok(()) } } + Bytes(bytes) => { + if bytes.len() > MAX_DATA_SIZE { + return Err(RpcFilterError::DataTooLarge); + } + Ok(()) + } } } RpcFilterType::TokenAccountState => Ok(()), @@ -96,65 +77,69 @@ impl RpcFilterType { pub enum RpcFilterError { #[error("encoded binary data should be less than 129 bytes")] DataTooLarge, - #[deprecated( - since = "1.8.1", - note = "Error for MemcmpEncodedBytes::Binary which is deprecated" - )] - #[error("encoded binary (base 58) data should be less than 129 bytes")] - Base58DataTooLarge, - #[deprecated( - since = "1.8.1", - note = "Error for MemcmpEncodedBytes::Binary which is deprecated" - )] - #[error("bs58 decode error")] - DecodeError(bs58::decode::Error), #[error("base58 decode error")] Base58DecodeError(#[from] bs58::decode::Error), #[error("base64 decode error")] Base64DecodeError(#[from] base64::DecodeError), } -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum MemcmpEncoding { - Binary, -} - -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", untagged)] +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] +#[serde(rename_all = "camelCase", tag = "encoding", content = "bytes")] pub enum MemcmpEncodedBytes { - #[deprecated( - since = "1.8.1", - note = "Please use MemcmpEncodedBytes::Base58 instead" - )] - Binary(String), Base58(String), Base64(String), Bytes(Vec), } +impl<'de> Deserialize<'de> for MemcmpEncodedBytes { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(untagged)] + enum DataType { + Encoded(String), + Raw(Vec), + } + + #[derive(Deserialize)] + #[serde(rename_all = "camelCase")] + enum RpcMemcmpEncoding { + Base58, + Base64, + Bytes, + } + + #[derive(Deserialize)] + struct RpcMemcmpInner { + bytes: DataType, + encoding: Option, + } + + let data = RpcMemcmpInner::deserialize(deserializer)?; + + let memcmp_encoded_bytes = match data.bytes { + DataType::Encoded(bytes) => match data.encoding.unwrap_or(RpcMemcmpEncoding::Base58) { + RpcMemcmpEncoding::Base58 | RpcMemcmpEncoding::Bytes => { + MemcmpEncodedBytes::Base58(bytes) + } + RpcMemcmpEncoding::Base64 => MemcmpEncodedBytes::Base64(bytes), + }, + DataType::Raw(bytes) => MemcmpEncodedBytes::Bytes(bytes), + }; + + Ok(memcmp_encoded_bytes) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(into = "RpcMemcmp", from = "RpcMemcmp")] pub struct Memcmp { /// Data offset to begin match - #[deprecated( - since = "1.15.0", - note = "Field will be made private in future. Please use a constructor method instead." - )] - pub offset: usize, - /// Bytes, encoded with specified encoding, or default Binary - #[deprecated( - since = "1.15.0", - note = "Field will be made private in future. Please use a constructor method instead." - )] - pub bytes: MemcmpEncodedBytes, - /// Optional encoding specification - #[deprecated( - since = "1.11.2", - note = "Field has no server-side effect. Specify encoding with `MemcmpEncodedBytes` variant instead. \ - Field will be made private in future. Please use a constructor method instead." - )] - pub encoding: Option, + offset: usize, + /// Bytes, encoded with specified encoding + #[serde(flatten)] + bytes: MemcmpEncodedBytes, } impl Memcmp { @@ -162,7 +147,6 @@ impl Memcmp { Self { offset, bytes: encoded_bytes, - encoding: None, } } @@ -170,7 +154,6 @@ impl Memcmp { Self { offset, bytes: MemcmpEncodedBytes::Bytes(bytes), - encoding: None, } } @@ -178,15 +161,18 @@ impl Memcmp { Self { offset, bytes: MemcmpEncodedBytes::Base58(bs58::encode(bytes).into_string()), - encoding: None, } } + pub fn offset(&self) -> usize { + self.offset + } + pub fn bytes(&self) -> Option>> { use MemcmpEncodedBytes::*; match &self.bytes { - Binary(bytes) | Base58(bytes) => bs58::decode(bytes).into_vec().ok().map(Cow::Owned), - Base64(bytes) => base64::decode(bytes).ok().map(Cow::Owned), + Base58(bytes) => bs58::decode(bytes).into_vec().ok().map(Cow::Owned), + Base64(bytes) => BASE64_STANDARD.decode(bytes).ok().map(Cow::Owned), Bytes(bytes) => Some(Cow::Borrowed(bytes)), } } @@ -194,13 +180,13 @@ impl Memcmp { pub fn convert_to_raw_bytes(&mut self) -> Result<(), RpcFilterError> { use MemcmpEncodedBytes::*; match &self.bytes { - Binary(bytes) | Base58(bytes) => { + Base58(bytes) => { let bytes = bs58::decode(bytes).into_vec()?; self.bytes = Bytes(bytes); Ok(()) } Base64(bytes) => { - let bytes = base64::decode(bytes)?; + let bytes = BASE64_STANDARD.decode(bytes)?; self.bytes = Bytes(bytes); Ok(()) } @@ -222,120 +208,34 @@ impl Memcmp { None => false, } } -} - -// Internal struct to hold Memcmp filter data as either encoded String or raw Bytes -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(untagged)] -enum DataType { - Encoded(String), - Raw(Vec), -} - -// Internal struct used to specify explicit Base58 and Base64 encoding -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -enum RpcMemcmpEncoding { - Base58, - Base64, - // This variant exists only to preserve backward compatibility with generic `Memcmp` serde - #[serde(other)] - Binary, -} - -// Internal struct to enable Memcmp filters with explicit Base58 and Base64 encoding. The From -// implementations emulate `#[serde(tag = "encoding", content = "bytes")]` for -// `MemcmpEncodedBytes`. On the next major version, all these internal elements should be removed -// and replaced with adjacent tagging of `MemcmpEncodedBytes`. -#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] -struct RpcMemcmp { - offset: usize, - bytes: DataType, - encoding: Option, -} - -impl From for RpcMemcmp { - fn from(memcmp: Memcmp) -> RpcMemcmp { - let (bytes, encoding) = match memcmp.bytes { - MemcmpEncodedBytes::Binary(string) => { - (DataType::Encoded(string), Some(RpcMemcmpEncoding::Binary)) - } - MemcmpEncodedBytes::Base58(string) => { - (DataType::Encoded(string), Some(RpcMemcmpEncoding::Base58)) - } - MemcmpEncodedBytes::Base64(string) => { - (DataType::Encoded(string), Some(RpcMemcmpEncoding::Base64)) - } - MemcmpEncodedBytes::Bytes(vector) => (DataType::Raw(vector), None), - }; - RpcMemcmp { - offset: memcmp.offset, - bytes, - encoding, - } - } -} -impl From for Memcmp { - fn from(memcmp: RpcMemcmp) -> Memcmp { - let encoding = memcmp.encoding.unwrap_or(RpcMemcmpEncoding::Binary); - let bytes = match (encoding, memcmp.bytes) { - (RpcMemcmpEncoding::Binary, DataType::Encoded(string)) - | (RpcMemcmpEncoding::Base58, DataType::Encoded(string)) => { - MemcmpEncodedBytes::Base58(string) - } - (RpcMemcmpEncoding::Binary, DataType::Raw(vector)) => MemcmpEncodedBytes::Bytes(vector), - (RpcMemcmpEncoding::Base64, DataType::Encoded(string)) => { - MemcmpEncodedBytes::Base64(string) - } - _ => unreachable!(), - }; - Memcmp { - offset: memcmp.offset, - bytes, - encoding: None, - } - } -} - -pub fn maybe_map_filters( - node_version: Option, - filters: &mut [RpcFilterType], -) -> Result<(), String> { - let version_reqs = VersionReq::from_strs(&["<1.11.2", "~1.13"])?; - let needs_mapping = node_version - .map(|version| version_reqs.matches_any(&version)) - .unwrap_or(true); - if needs_mapping { - for filter in filters.iter_mut() { - if let RpcFilterType::Memcmp(memcmp) = filter { - match &memcmp.bytes { - MemcmpEncodedBytes::Base58(string) => { - memcmp.bytes = MemcmpEncodedBytes::Binary(string.clone()); - } - MemcmpEncodedBytes::Base64(_) => { - return Err("RPC node on old version does not support base64 \ - encoding for memcmp filters" - .to_string()); - } - _ => {} - } - } + /// Returns reference to bytes if variant is MemcmpEncodedBytes::Bytes; + /// otherwise returns None. Used exclusively by solana-rpc to check + /// SPL-token filters. + pub fn raw_bytes_as_ref(&self) -> Option<&[u8]> { + use MemcmpEncodedBytes::*; + if let Bytes(bytes) = &self.bytes { + Some(bytes) + } else { + None } } - Ok(()) } #[cfg(test)] mod tests { - use super::*; + use { + super::*, + const_format::formatcp, + serde_json::{json, Value}, + }; #[test] fn test_worst_case_encoded_tx_goldens() { let ff_data = vec![0xffu8; MAX_DATA_SIZE]; let data58 = bs58::encode(&ff_data).into_string(); assert_eq!(data58.len(), MAX_DATA_BASE58_SIZE); - let data64 = base64::encode(&ff_data); + let data64 = BASE64_STANDARD.encode(&ff_data); assert_eq!(data64.len(), MAX_DATA_BASE64_SIZE); } @@ -347,7 +247,6 @@ mod tests { assert!(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![1, 2, 3, 4, 5]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -355,7 +254,6 @@ mod tests { assert!(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![1, 2]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -363,7 +261,6 @@ mod tests { assert!(Memcmp { offset: 2, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![3, 4]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -371,7 +268,6 @@ mod tests { assert!(!Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![2]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -379,7 +275,6 @@ mod tests { assert!(!Memcmp { offset: 2, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![3, 4, 5, 6]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -387,7 +282,6 @@ mod tests { assert!(!Memcmp { offset: 6, bytes: MemcmpEncodedBytes::Base58(bs58::encode(vec![5]).into_string()), - encoding: None, } .bytes_match(&data)); @@ -395,7 +289,6 @@ mod tests { assert!(!Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58("III".to_string()), - encoding: None, } .bytes_match(&data)); } @@ -410,7 +303,6 @@ mod tests { RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(base58_bytes.to_string()), - encoding: None, }) .verify(), Ok(()) @@ -425,10 +317,142 @@ mod tests { RpcFilterType::Memcmp(Memcmp { offset: 0, bytes: MemcmpEncodedBytes::Base58(base58_bytes.to_string()), - encoding: None, }) .verify(), Err(RpcFilterError::DataTooLarge) ); } + + const BASE58_STR: &str = "Bpf4ERpEvSFmCSTNh1PzTWTkALrKXvMXEdthxHuwCQcf"; + const BASE64_STR: &str = "oMoycDvJzrjQpCfukbO4VW/FLGLfnbqBEc9KUEVgj2g="; + const BYTES: [u8; 4] = [0, 1, 2, 3]; + const OFFSET: usize = 42; + const DEFAULT_ENCODING_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE58_STR}","offset":{OFFSET}}}"#); + const BINARY_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE58_STR}","offset":{OFFSET},"encoding":"binary"}}"#); + const BASE58_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE58_STR}","offset":{OFFSET},"encoding":"base58"}}"#); + const BASE64_FILTER: &str = + formatcp!(r#"{{"bytes":"{BASE64_STR}","offset":{OFFSET},"encoding":"base64"}}"#); + const MISMATCHED_BASE64_FILTER: &str = + formatcp!(r#"{{"bytes":[0, 1, 2, 3],"offset":{OFFSET},"encoding":"base64"}}"#); + const BYTES_FILTER: &str = + formatcp!(r#"{{"bytes":[0, 1, 2, 3],"offset":{OFFSET},"encoding":null}}"#); + const BYTES_FILTER_WITH_ENCODING: &str = + formatcp!(r#"{{"bytes":[0, 1, 2, 3],"offset":{OFFSET},"encoding":"bytes"}}"#); + const MISMATCHED_BYTES_FILTER_WITH_ENCODING: &str = + formatcp!(r#"{{"bytes":"{BASE58_STR}","offset":{OFFSET},"encoding":"bytes"}}"#); + + #[test] + fn test_filter_deserialize() { + // Base58 is the default encoding + let default: Memcmp = serde_json::from_str(DEFAULT_ENCODING_FILTER).unwrap(); + assert_eq!( + default, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base58(BASE58_STR.to_string()), + } + ); + + // Binary input is no longer supported + let binary = serde_json::from_str::(BINARY_FILTER); + assert!(binary.is_err()); + + // Base58 input + let base58_filter: Memcmp = serde_json::from_str(BASE58_FILTER).unwrap(); + assert_eq!( + base58_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base58(BASE58_STR.to_string()), + } + ); + + // Base64 input + let base64_filter: Memcmp = serde_json::from_str(BASE64_FILTER).unwrap(); + assert_eq!( + base64_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base64(BASE64_STR.to_string()), + } + ); + + // Raw bytes input + let bytes_filter: Memcmp = serde_json::from_str(BYTES_FILTER).unwrap(); + assert_eq!( + bytes_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Bytes(BYTES.to_vec()), + } + ); + + let bytes_filter: Memcmp = serde_json::from_str(BYTES_FILTER_WITH_ENCODING).unwrap(); + assert_eq!( + bytes_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Bytes(BYTES.to_vec()), + } + ); + + // Mismatched input + let base64_filter: Memcmp = serde_json::from_str(MISMATCHED_BASE64_FILTER).unwrap(); + assert_eq!( + base64_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Bytes(BYTES.to_vec()), + } + ); + + let bytes_filter: Memcmp = + serde_json::from_str(MISMATCHED_BYTES_FILTER_WITH_ENCODING).unwrap(); + assert_eq!( + bytes_filter, + Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base58(BASE58_STR.to_string()), + } + ); + } + + #[test] + fn test_filter_serialize() { + // Base58 + let base58 = Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base58(BASE58_STR.to_string()), + }; + let serialized_json = json!(base58); + assert_eq!( + serialized_json, + serde_json::from_str::(BASE58_FILTER).unwrap() + ); + + // Base64 + let base64 = Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Base64(BASE64_STR.to_string()), + }; + let serialized_json = json!(base64); + assert_eq!( + serialized_json, + serde_json::from_str::(BASE64_FILTER).unwrap() + ); + + // Bytes + let bytes = Memcmp { + offset: OFFSET, + bytes: MemcmpEncodedBytes::Bytes(BYTES.to_vec()), + }; + let serialized_json = json!(bytes); + assert_eq!( + serialized_json, + serde_json::from_str::(BYTES_FILTER_WITH_ENCODING).unwrap() + ); + } } diff --git a/rpc-client-api/src/lib.rs b/rpc-client-api/src/lib.rs index 9615efe24ba3a2..b2484637766ce7 100644 --- a/rpc-client-api/src/lib.rs +++ b/rpc-client-api/src/lib.rs @@ -7,7 +7,6 @@ pub mod error_object; pub mod filter; pub mod request; pub mod response; -pub mod version_req; #[macro_use] extern crate serde_derive; diff --git a/rpc-client-api/src/request.rs b/rpc-client-api/src/request.rs index 1bcc42179d96ec..fe032a858deb47 100644 --- a/rpc-client-api/src/request.rs +++ b/rpc-client-api/src/request.rs @@ -48,7 +48,6 @@ pub enum RpcRequest { GetStorageTurn, GetStorageTurnRate, GetSlotsPerSegment, - GetStakeActivation, GetStakeMinimumDelegation, GetStoragePubkeysForSlot, GetSupply, @@ -111,7 +110,6 @@ impl fmt::Display for RpcRequest { RpcRequest::GetSlot => "getSlot", RpcRequest::GetSlotLeader => "getSlotLeader", RpcRequest::GetSlotLeaders => "getSlotLeaders", - RpcRequest::GetStakeActivation => "getStakeActivation", RpcRequest::GetStakeMinimumDelegation => "getStakeMinimumDelegation", RpcRequest::GetStorageTurn => "getStorageTurn", RpcRequest::GetStorageTurnRate => "getStorageTurnRate", diff --git a/rpc-client-api/src/response.rs b/rpc-client-api/src/response.rs index 5d2b115fcbb6ad..fcb330103057e4 100644 --- a/rpc-client-api/src/response.rs +++ b/rpc-client-api/src/response.rs @@ -5,7 +5,6 @@ use { solana_sdk::{ clock::{Epoch, Slot, UnixTimestamp}, fee_calculator::{FeeCalculator, FeeRateGovernor}, - hash::Hash, inflation::Inflation, transaction::{Result, TransactionError}, }, @@ -119,31 +118,6 @@ pub struct RpcBlockhash { pub last_valid_block_height: u64, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct RpcFees { - pub blockhash: String, - pub fee_calculator: FeeCalculator, - pub last_valid_slot: Slot, - pub last_valid_block_height: u64, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct DeprecatedRpcFees { - pub blockhash: String, - pub fee_calculator: FeeCalculator, - pub last_valid_slot: Slot, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct Fees { - pub blockhash: Hash, - pub fee_calculator: FeeCalculator, - pub last_valid_block_height: u64, -} - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct RpcFeeCalculator { @@ -469,14 +443,6 @@ pub enum StakeActivationState { Inactive, } -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)] -#[serde(rename_all = "camelCase")] -pub struct RpcStakeActivation { - pub state: StakeActivationState, - pub active: u64, - pub inactive: u64, -} - #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] #[serde(rename_all = "camelCase")] pub struct RpcTokenAccountBalance { diff --git a/rpc-client-api/src/version_req.rs b/rpc-client-api/src/version_req.rs deleted file mode 100644 index 8c8d57e35c2610..00000000000000 --- a/rpc-client-api/src/version_req.rs +++ /dev/null @@ -1,20 +0,0 @@ -pub(crate) struct VersionReq(Vec); - -impl VersionReq { - pub(crate) fn from_strs(versions: &[T]) -> Result - where - T: AsRef + std::fmt::Debug, - { - let mut version_reqs = vec![]; - for version in versions { - let version_req = semver::VersionReq::parse(version.as_ref()) - .map_err(|err| format!("Could not parse version {version:?}: {err:?}"))?; - version_reqs.push(version_req); - } - Ok(Self(version_reqs)) - } - - pub(crate) fn matches_any(&self, version: &semver::Version) -> bool { - self.0.iter().any(|r| r.matches(version)) - } -} diff --git a/rpc-client-nonce-utils/Cargo.toml b/rpc-client-nonce-utils/Cargo.toml index 476616a0b1211e..f939d06762c53c 100644 --- a/rpc-client-nonce-utils/Cargo.toml +++ b/rpc-client-nonce-utils/Cargo.toml @@ -10,8 +10,8 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -clap = "2.33.0" -solana-clap-utils = { workspace = true } +clap = { version = "2.33.0", optional = true } +solana-clap-utils = { workspace = true, optional = true } solana-rpc-client = { workspace = true } solana-sdk = { workspace = true } thiserror = { workspace = true } @@ -24,5 +24,9 @@ solana-account-decoder = { workspace = true } solana-rpc-client-api = { workspace = true } tokio = { workspace = true, features = ["full"] } +[features] +default = [] +clap = ["dep:clap", "dep:solana-clap-utils"] + [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/rpc-client-nonce-utils/src/blockhash_query.rs b/rpc-client-nonce-utils/src/blockhash_query.rs index 7a6c1c1f8441b2..179ec378f4fecc 100644 --- a/rpc-client-nonce-utils/src/blockhash_query.rs +++ b/rpc-client-nonce-utils/src/blockhash_query.rs @@ -1,3 +1,4 @@ +#[cfg(feature = "clap")] use { clap::ArgMatches, solana_clap_utils::{ @@ -5,6 +6,8 @@ use { nonce::*, offline::*, }, +}; +use { solana_rpc_client::rpc_client::RpcClient, solana_sdk::{commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey}, }; @@ -71,6 +74,7 @@ impl BlockhashQuery { } } + #[cfg(feature = "clap")] pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self { let blockhash = value_of(matches, BLOCKHASH_ARG.name); let sign_only = matches.is_present(SIGN_ONLY_ARG.name); @@ -104,10 +108,11 @@ impl Default for BlockhashQuery { #[cfg(test)] mod tests { + #[cfg(feature = "clap")] + use clap::App; use { super::*, crate::blockhash_query, - clap::App, serde_json::{self, json}, solana_account_decoder::{UiAccount, UiAccountEncoding}, solana_rpc_client_api::{ @@ -172,6 +177,7 @@ mod tests { BlockhashQuery::new(None, true, Some(nonce_pubkey)); } + #[cfg(feature = "clap")] #[test] fn test_blockhash_query_new_from_matches_ok() { let test_commands = App::new("blockhash_query_test") @@ -240,6 +246,7 @@ mod tests { ); } + #[cfg(feature = "clap")] #[test] #[should_panic] fn test_blockhash_query_new_from_matches_without_nonce_fail() { @@ -253,6 +260,7 @@ mod tests { BlockhashQuery::new_from_matches(&matches); } + #[cfg(feature = "clap")] #[test] #[should_panic] fn test_blockhash_query_new_from_matches_with_nonce_fail() { diff --git a/rpc-client-nonce-utils/src/nonblocking/blockhash_query.rs b/rpc-client-nonce-utils/src/nonblocking/blockhash_query.rs index 07f03cae68cbe5..f31313f69c93bb 100644 --- a/rpc-client-nonce-utils/src/nonblocking/blockhash_query.rs +++ b/rpc-client-nonce-utils/src/nonblocking/blockhash_query.rs @@ -1,13 +1,16 @@ use { crate::nonblocking, + solana_rpc_client::nonblocking::rpc_client::RpcClient, + solana_sdk::{commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey}, +}; +#[cfg(feature = "clap")] +use { clap::ArgMatches, solana_clap_utils::{ input_parsers::{pubkey_of, value_of}, nonce::*, offline::*, }, - solana_rpc_client::nonblocking::rpc_client::RpcClient, - solana_sdk::{commitment_config::CommitmentConfig, hash::Hash, pubkey::Pubkey}, }; #[derive(Debug, PartialEq, Eq)] @@ -76,6 +79,7 @@ impl BlockhashQuery { } } + #[cfg(feature = "clap")] pub fn new_from_matches(matches: &ArgMatches<'_>) -> Self { let blockhash = value_of(matches, BLOCKHASH_ARG.name); let sign_only = matches.is_present(SIGN_ONLY_ARG.name); @@ -112,10 +116,11 @@ impl Default for BlockhashQuery { #[cfg(test)] mod tests { + #[cfg(feature = "clap")] + use clap::App; use { super::*, crate::nonblocking::blockhash_query, - clap::App, serde_json::{self, json}, solana_account_decoder::{UiAccount, UiAccountEncoding}, solana_rpc_client_api::{ @@ -180,6 +185,7 @@ mod tests { BlockhashQuery::new(None, true, Some(nonce_pubkey)); } + #[cfg(feature = "clap")] #[test] fn test_blockhash_query_new_from_matches_ok() { let test_commands = App::new("blockhash_query_test") @@ -248,6 +254,7 @@ mod tests { ); } + #[cfg(feature = "clap")] #[test] #[should_panic] fn test_blockhash_query_new_from_matches_without_nonce_fail() { @@ -261,6 +268,7 @@ mod tests { BlockhashQuery::new_from_matches(&matches); } + #[cfg(feature = "clap")] #[test] #[should_panic] fn test_blockhash_query_new_from_matches_with_nonce_fail() { diff --git a/rpc-client/src/mock_sender.rs b/rpc-client/src/mock_sender.rs index 8c5c58086e6fe6..9730a6ff24a983 100644 --- a/rpc-client/src/mock_sender.rs +++ b/rpc-client/src/mock_sender.rs @@ -12,18 +12,17 @@ use { request::RpcRequest, response::{ Response, RpcAccountBalance, RpcBlockProduction, RpcBlockProductionRange, RpcBlockhash, - RpcConfirmedTransactionStatusWithSignature, RpcContactInfo, RpcFees, RpcIdentity, + RpcConfirmedTransactionStatusWithSignature, RpcContactInfo, RpcIdentity, RpcInflationGovernor, RpcInflationRate, RpcInflationReward, RpcKeyedAccount, RpcPerfSample, RpcPrioritizationFee, RpcResponseContext, RpcSimulateTransactionResult, - RpcSnapshotSlotInfo, RpcStakeActivation, RpcSupply, RpcVersionInfo, RpcVoteAccountInfo, - RpcVoteAccountStatus, StakeActivationState, + RpcSnapshotSlotInfo, RpcSupply, RpcVersionInfo, RpcVoteAccountInfo, + RpcVoteAccountStatus, }, }, solana_sdk::{ account::Account, clock::{Slot, UnixTimestamp}, epoch_info::EpochInfo, - fee_calculator::{FeeCalculator, FeeRateGovernor}, instruction::InstructionError, message::MessageHeader, pubkey::Pubkey, @@ -117,13 +116,6 @@ impl RpcSender for MockSender { context: RpcResponseContext { slot: 1, api_version: None }, value: Value::Number(Number::from(50)), })?, - "getRecentBlockhash" => serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value: ( - Value::String(PUBKEY.to_string()), - serde_json::to_value(FeeCalculator::default()).unwrap(), - ), - })?, "getEpochInfo" => serde_json::to_value(EpochInfo { epoch: 1, slot_index: 2, @@ -132,31 +124,6 @@ impl RpcSender for MockSender { block_height: 34, transaction_count: Some(123), })?, - "getFeeCalculatorForBlockhash" => { - let value = if self.url == "blockhash_expired" { - Value::Null - } else { - serde_json::to_value(Some(FeeCalculator::default())).unwrap() - }; - serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value, - })? - } - "getFeeRateGovernor" => serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value: serde_json::to_value(FeeRateGovernor::default()).unwrap(), - })?, - "getFees" => serde_json::to_value(Response { - context: RpcResponseContext { slot: 1, api_version: None }, - value: serde_json::to_value(RpcFees { - blockhash: PUBKEY.to_string(), - fee_calculator: FeeCalculator::default(), - last_valid_slot: 42, - last_valid_block_height: 42, - }) - .unwrap(), - })?, "getSignatureStatuses" => { let status: transaction::Result<()> = if self.url == "account_in_use" { Err(TransactionError::AccountInUse) @@ -242,7 +209,6 @@ impl RpcSender for MockSender { "getSlot" => json![0], "getMaxShredInsertSlot" => json![0], "requestAirdrop" => Value::String(Signature::from([8; 64]).to_string()), - "getSnapshotSlot" => Value::Number(Number::from(0)), "getHighestSnapshotSlot" => json!(RpcSnapshotSlotInfo { full: 100, incremental: Some(110), @@ -287,11 +253,6 @@ impl RpcSender for MockSender { }) } } - "getStakeActivation" => json!(RpcStakeActivation { - state: StakeActivationState::Activating, - active: 123, - inactive: 12, - }), "getStakeMinimumDelegation" => json!(Response { context: RpcResponseContext { slot: 1, api_version: None }, value: 123_456_789, diff --git a/rpc-client/src/nonblocking/rpc_client.rs b/rpc-client/src/nonblocking/rpc_client.rs index 9194012224d4f0..0ca5f76a49f829 100644 --- a/rpc-client/src/nonblocking/rpc_client.rs +++ b/rpc-client/src/nonblocking/rpc_client.rs @@ -32,14 +32,13 @@ use { Error as ClientError, ErrorKind as ClientErrorKind, Result as ClientResult, }, config::{RpcAccountInfoConfig, *}, - filter::{self, RpcFilterType}, request::{RpcError, RpcRequest, RpcResponseErrorData, TokenAccountsFilter}, response::*, }, solana_sdk::{ account::Account, clock::{Epoch, Slot, UnixTimestamp, DEFAULT_MS_PER_SLOT}, - commitment_config::{CommitmentConfig, CommitmentLevel}, + commitment_config::CommitmentConfig, epoch_info::EpochInfo, epoch_schedule::EpochSchedule, hash::Hash, @@ -57,7 +56,7 @@ use { str::FromStr, time::{Duration, Instant}, }, - tokio::{sync::RwLock, time::sleep}, + tokio::time::sleep, }; /// A client of a remote Solana node. @@ -141,7 +140,6 @@ use { pub struct RpcClient { sender: Box, config: RpcClientConfig, - node_version: RwLock>, } impl RpcClient { @@ -157,7 +155,6 @@ impl RpcClient { ) -> Self { Self { sender: Box::new(sender), - node_version: RwLock::new(None), config, } } @@ -509,30 +506,11 @@ impl RpcClient { self.sender.url() } - pub async fn set_node_version(&self, version: semver::Version) -> Result<(), ()> { - let mut w_node_version = self.node_version.write().await; - *w_node_version = Some(version); + #[deprecated(since = "2.0.2", note = "RpcClient::node_version is no longer used")] + pub async fn set_node_version(&self, _version: semver::Version) -> Result<(), ()> { Ok(()) } - async fn get_node_version(&self) -> Result { - let r_node_version = self.node_version.read().await; - if let Some(version) = &*r_node_version { - Ok(version.clone()) - } else { - drop(r_node_version); - let mut w_node_version = self.node_version.write().await; - let node_version = self.get_version().await.map_err(|e| { - RpcError::RpcRequestError(format!("cluster version query failed: {e}")) - })?; - let node_version = semver::Version::parse(&node_version.solana_core).map_err(|e| { - RpcError::RpcRequestError(format!("failed to parse cluster version: {e}")) - })?; - *w_node_version = Some(node_version.clone()); - Ok(node_version) - } - } - /// Get the configured default [commitment level][cl]. /// /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment @@ -550,37 +528,6 @@ impl RpcClient { self.config.commitment_config } - async fn use_deprecated_commitment(&self) -> Result { - Ok(self.get_node_version().await? < semver::Version::new(1, 5, 5)) - } - - async fn maybe_map_commitment( - &self, - requested_commitment: CommitmentConfig, - ) -> Result { - if matches!( - requested_commitment.commitment, - CommitmentLevel::Finalized | CommitmentLevel::Confirmed | CommitmentLevel::Processed - ) && self.use_deprecated_commitment().await? - { - return Ok(CommitmentConfig::use_deprecated_commitment( - requested_commitment, - )); - } - Ok(requested_commitment) - } - - #[allow(deprecated)] - async fn maybe_map_filters( - &self, - mut filters: Vec, - ) -> Result, RpcError> { - let node_version = self.get_node_version().await?; - filter::maybe_map_filters(Some(node_version), &mut filters) - .map_err(RpcError::RpcRequestError)?; - Ok(filters) - } - /// Submit a transaction and wait for confirmation. /// /// Once this function returns successfully, the given transaction is @@ -822,11 +769,7 @@ impl RpcClient { self.send_transaction_with_config( transaction, RpcSendTransactionConfig { - preflight_commitment: Some( - self.maybe_map_commitment(self.commitment()) - .await? - .commitment, - ), + preflight_commitment: Some(self.commitment().commitment), ..RpcSendTransactionConfig::default() }, ) @@ -919,15 +862,10 @@ impl RpcClient { transaction: &impl SerializableTransaction, config: RpcSendTransactionConfig, ) -> ClientResult { - let encoding = if let Some(encoding) = config.encoding { - encoding - } else { - self.default_cluster_transaction_encoding().await? - }; + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base64); let preflight_commitment = CommitmentConfig { commitment: config.preflight_commitment.unwrap_or_default(), }; - let preflight_commitment = self.maybe_map_commitment(preflight_commitment).await?; let config = RpcSendTransactionConfig { encoding: Some(encoding), preflight_commitment: Some(preflight_commitment.commitment), @@ -1210,16 +1148,6 @@ impl RpcClient { } } - async fn default_cluster_transaction_encoding( - &self, - ) -> Result { - if self.get_node_version().await? < semver::Version::new(1, 3, 16) { - Ok(UiTransactionEncoding::Base58) - } else { - Ok(UiTransactionEncoding::Base64) - } - } - /// Simulates sending a transaction. /// /// If the transaction fails, then the [`err`] field of the returned @@ -1369,13 +1297,8 @@ impl RpcClient { transaction: &impl SerializableTransaction, config: RpcSimulateTransactionConfig, ) -> RpcResult { - let encoding = if let Some(encoding) = config.encoding { - encoding - } else { - self.default_cluster_transaction_encoding().await? - }; + let encoding = config.encoding.unwrap_or(UiTransactionEncoding::Base64); let commitment = config.commitment.unwrap_or_default(); - let commitment = self.maybe_map_commitment(commitment).await?; let config = RpcSimulateTransactionConfig { encoding: Some(encoding), commitment: Some(commitment), @@ -1844,11 +1767,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetSlot, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetSlot, json!([commitment_config])) + .await } /// Returns the block height that has reached the configured [commitment level][cl]. @@ -1908,11 +1828,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetBlockHeight, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetBlockHeight, json!([commitment_config])) + .await } /// Returns the slot leaders for a given slot range. @@ -2034,103 +1951,6 @@ impl RpcClient { .await } - /// Returns epoch activation information for a stake account. - /// - /// This method uses the configured [commitment level]. - /// - /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment - /// - /// # RPC Reference - /// - /// This method corresponds directly to the [`getStakeActivation`] RPC method. - /// - /// [`getStakeActivation`]: https://solana.com/docs/rpc/http/getstakeactivation - /// - /// # Examples - /// - /// ``` - /// # use solana_rpc_client_api::{ - /// # client_error::Error, - /// # response::StakeActivationState, - /// # }; - /// # use solana_rpc_client::nonblocking::rpc_client::RpcClient; - /// # use solana_sdk::{ - /// # signer::keypair::Keypair, - /// # signature::Signer, - /// # pubkey::Pubkey, - /// # stake, - /// # stake::state::{Authorized, Lockup}, - /// # transaction::Transaction - /// # }; - /// # use std::str::FromStr; - /// # futures::executor::block_on(async { - /// # let alice = Keypair::new(); - /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); - /// // Find some vote account to delegate to - /// let vote_accounts = rpc_client.get_vote_accounts().await?; - /// let vote_account = vote_accounts.current.get(0).unwrap_or_else(|| &vote_accounts.delinquent[0]); - /// let vote_account_pubkey = &vote_account.vote_pubkey; - /// let vote_account_pubkey = Pubkey::from_str(vote_account_pubkey).expect("pubkey"); - /// - /// // Create a stake account - /// let stake_account = Keypair::new(); - /// let stake_account_pubkey = stake_account.pubkey(); - /// - /// // Build the instructions to create new stake account, - /// // funded by alice, and delegate to a validator's vote account. - /// let instrs = stake::instruction::create_account_and_delegate_stake( - /// &alice.pubkey(), - /// &stake_account_pubkey, - /// &vote_account_pubkey, - /// &Authorized::auto(&stake_account_pubkey), - /// &Lockup::default(), - /// 1_000_000, - /// ); - /// - /// let latest_blockhash = rpc_client.get_latest_blockhash().await?; - /// let tx = Transaction::new_signed_with_payer( - /// &instrs, - /// Some(&alice.pubkey()), - /// &[&alice, &stake_account], - /// latest_blockhash, - /// ); - /// - /// rpc_client.send_and_confirm_transaction(&tx).await?; - /// - /// let epoch_info = rpc_client.get_epoch_info().await?; - /// let activation = rpc_client.get_stake_activation( - /// stake_account_pubkey, - /// Some(epoch_info.epoch), - /// ).await?; - /// - /// assert_eq!(activation.state, StakeActivationState::Activating); - /// # Ok::<(), Error>(()) - /// # })?; - /// # Ok::<(), Error>(()) - /// ``` - #[deprecated( - since = "1.18.18", - note = "Do not use; getStakeActivation is deprecated on the JSON-RPC server" - )] - pub async fn get_stake_activation( - &self, - stake_account: Pubkey, - epoch: Option, - ) -> ClientResult { - self.send( - RpcRequest::GetStakeActivation, - json!([ - stake_account.to_string(), - RpcEpochConfig { - epoch, - commitment: Some(self.commitment()), - min_context_slot: None, - } - ]), - ) - .await - } - /// Returns information about the current supply. /// /// This method uses the configured [commitment level][cl]. @@ -2187,11 +2007,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> RpcResult { - self.send( - RpcRequest::GetSupply, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetSupply, json!([commitment_config])) + .await } /// Returns the 20 largest accounts, by lamport balance. @@ -2233,7 +2050,6 @@ impl RpcClient { config: RpcLargestAccountsConfig, ) -> RpcResult> { let commitment = config.commitment.unwrap_or_default(); - let commitment = self.maybe_map_commitment(commitment).await?; let config = RpcLargestAccountsConfig { commitment: Some(commitment), ..config @@ -2303,7 +2119,7 @@ impl RpcClient { commitment_config: CommitmentConfig, ) -> ClientResult { self.get_vote_accounts_with_config(RpcGetVoteAccountsConfig { - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), ..RpcGetVoteAccountsConfig::default() }) .await @@ -2644,16 +2460,9 @@ impl RpcClient { commitment_config: CommitmentConfig, ) -> ClientResult> { let json = if end_slot.is_some() { - json!([ - start_slot, - end_slot, - self.maybe_map_commitment(commitment_config).await? - ]) + json!([start_slot, end_slot, commitment_config]) } else { - json!([ - start_slot, - self.maybe_map_commitment(commitment_config).await? - ]) + json!([start_slot, commitment_config]) }; self.send(RpcRequest::GetBlocks, json).await } @@ -2748,11 +2557,7 @@ impl RpcClient { ) -> ClientResult> { self.send( RpcRequest::GetBlocksWithLimit, - json!([ - start_slot, - limit, - self.maybe_map_commitment(commitment_config).await? - ]), + json!([start_slot, limit, commitment_config]), ) .await } @@ -3093,11 +2898,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetEpochInfo, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetEpochInfo, json!([commitment_config])) + .await } /// Returns the leader schedule for an epoch. @@ -3170,7 +2972,7 @@ impl RpcClient { self.get_leader_schedule_with_config( slot, RpcLeaderScheduleConfig { - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), ..RpcLeaderScheduleConfig::default() }, ) @@ -3616,7 +3418,7 @@ impl RpcClient { ) -> RpcResult> { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::Base64Zstd), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -3843,7 +3645,7 @@ impl RpcClient { pubkeys, RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::Base64Zstd), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }, @@ -4072,10 +3874,7 @@ impl RpcClient { ) -> RpcResult { self.send( RpcRequest::GetBalance, - json!([ - pubkey.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([pubkey.to_string(), commitment_config]), ) .await } @@ -4195,11 +3994,7 @@ impl RpcClient { .account_config .commitment .unwrap_or_else(|| self.commitment()); - let commitment = self.maybe_map_commitment(commitment).await?; config.account_config.commitment = Some(commitment); - if let Some(filters) = config.filters { - config.filters = Some(self.maybe_map_filters(filters).await?); - } let accounts = self .send::>>( @@ -4264,7 +4059,7 @@ impl RpcClient { Ok(self .send::>( RpcRequest::GetStakeMinimumDelegation, - json!([self.maybe_map_commitment(commitment_config).await?]), + json!([commitment_config]), ) .await? .value) @@ -4280,11 +4075,8 @@ impl RpcClient { &self, commitment_config: CommitmentConfig, ) -> ClientResult { - self.send( - RpcRequest::GetTransactionCount, - json!([self.maybe_map_commitment(commitment_config).await?]), - ) - .await + self.send(RpcRequest::GetTransactionCount, json!([commitment_config])) + .await } pub async fn get_first_available_block(&self) -> ClientResult { @@ -4323,7 +4115,7 @@ impl RpcClient { ) -> RpcResult> { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::JsonParsed), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -4386,10 +4178,7 @@ impl RpcClient { ) -> RpcResult { self.send( RpcRequest::GetTokenAccountBalance, - json!([ - pubkey.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([pubkey.to_string(), commitment_config]), ) .await } @@ -4424,7 +4213,7 @@ impl RpcClient { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::JsonParsed), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -4466,7 +4255,7 @@ impl RpcClient { let config = RpcAccountInfoConfig { encoding: Some(UiAccountEncoding::JsonParsed), - commitment: Some(self.maybe_map_commitment(commitment_config).await?), + commitment: Some(commitment_config), data_slice: None, min_context_slot: None, }; @@ -4495,10 +4284,7 @@ impl RpcClient { ) -> RpcResult> { self.send( RpcRequest::GetTokenLargestAccounts, - json!([ - mint.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([mint.to_string(), commitment_config]), ) .await } @@ -4517,10 +4303,7 @@ impl RpcClient { ) -> RpcResult { self.send( RpcRequest::GetTokenSupply, - json!([ - mint.to_string(), - self.maybe_map_commitment(commitment_config).await? - ]), + json!([mint.to_string(), commitment_config]), ) .await } @@ -4561,7 +4344,6 @@ impl RpcClient { config: RpcRequestAirdropConfig, ) -> ClientResult { let commitment = config.commitment.unwrap_or_default(); - let commitment = self.maybe_map_commitment(commitment).await?; let config = RpcRequestAirdropConfig { commitment: Some(commitment), ..config @@ -4784,10 +4566,7 @@ impl RpcClient { blockhash, last_valid_block_height, } = self - .send::>( - RpcRequest::GetLatestBlockhash, - json!([self.maybe_map_commitment(commitment).await?]), - ) + .send::>(RpcRequest::GetLatestBlockhash, json!([commitment])) .await? .value; let blockhash = blockhash.parse().map_err(|_| { diff --git a/rpc-client/src/rpc_client.rs b/rpc-client/src/rpc_client.rs index a95548bf0520d6..32bd08cef49f03 100644 --- a/rpc-client/src/rpc_client.rs +++ b/rpc-client/src/rpc_client.rs @@ -1703,90 +1703,6 @@ impl RpcClient { self.invoke((self.rpc_client.as_ref()).get_block_production_with_config(config)) } - /// Returns epoch activation information for a stake account. - /// - /// This method uses the configured [commitment level]. - /// - /// [cl]: https://solana.com/docs/rpc#configuring-state-commitment - /// - /// # RPC Reference - /// - /// This method corresponds directly to the [`getStakeActivation`] RPC method. - /// - /// [`getStakeActivation`]: https://solana.com/docs/rpc/http/getstakeactivation - /// - /// # Examples - /// - /// ``` - /// # use solana_rpc_client_api::{ - /// # client_error::Error, - /// # response::StakeActivationState, - /// # }; - /// # use solana_rpc_client::rpc_client::RpcClient; - /// # use solana_sdk::{ - /// # signer::keypair::Keypair, - /// # signature::Signer, - /// # pubkey::Pubkey, - /// # stake, - /// # stake::state::{Authorized, Lockup}, - /// # transaction::Transaction - /// # }; - /// # use std::str::FromStr; - /// # let alice = Keypair::new(); - /// # let rpc_client = RpcClient::new_mock("succeeds".to_string()); - /// // Find some vote account to delegate to - /// let vote_accounts = rpc_client.get_vote_accounts()?; - /// let vote_account = vote_accounts.current.get(0).unwrap_or_else(|| &vote_accounts.delinquent[0]); - /// let vote_account_pubkey = &vote_account.vote_pubkey; - /// let vote_account_pubkey = Pubkey::from_str(vote_account_pubkey).expect("pubkey"); - /// - /// // Create a stake account - /// let stake_account = Keypair::new(); - /// let stake_account_pubkey = stake_account.pubkey(); - /// - /// // Build the instructions to create new stake account, - /// // funded by alice, and delegate to a validator's vote account. - /// let instrs = stake::instruction::create_account_and_delegate_stake( - /// &alice.pubkey(), - /// &stake_account_pubkey, - /// &vote_account_pubkey, - /// &Authorized::auto(&stake_account_pubkey), - /// &Lockup::default(), - /// 1_000_000, - /// ); - /// - /// let latest_blockhash = rpc_client.get_latest_blockhash()?; - /// let tx = Transaction::new_signed_with_payer( - /// &instrs, - /// Some(&alice.pubkey()), - /// &[&alice, &stake_account], - /// latest_blockhash, - /// ); - /// - /// rpc_client.send_and_confirm_transaction(&tx)?; - /// - /// let epoch_info = rpc_client.get_epoch_info()?; - /// let activation = rpc_client.get_stake_activation( - /// stake_account_pubkey, - /// Some(epoch_info.epoch), - /// )?; - /// - /// assert_eq!(activation.state, StakeActivationState::Activating); - /// # Ok::<(), Error>(()) - /// ``` - #[deprecated( - since = "1.18.18", - note = "Do not use; getStakeActivation is deprecated on the JSON-RPC server" - )] - #[allow(deprecated)] - pub fn get_stake_activation( - &self, - stake_account: Pubkey, - epoch: Option, - ) -> ClientResult { - self.invoke((self.rpc_client.as_ref()).get_stake_activation(stake_account, epoch)) - } - /// Returns information about the current supply. /// /// This method uses the configured [commitment level][cl]. diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index eb69e4c95d8cdc..435edafdc5f780 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -33,6 +33,7 @@ solana-transaction-status = { workspace = true } tokio = { workspace = true, features = ["full"] } [dev-dependencies] +solana-connection-cache = { workspace = true } solana-logger = { workspace = true } [package.metadata.docs.rs] diff --git a/rpc-test/tests/nonblocking.rs b/rpc-test/tests/nonblocking.rs index 2a346c93e25a3a..160b01e1e4f41d 100644 --- a/rpc-test/tests/nonblocking.rs +++ b/rpc-test/tests/nonblocking.rs @@ -1,11 +1,9 @@ use { - solana_client::{ - connection_cache::Protocol, - nonblocking::tpu_client::{LeaderTpuService, TpuClient}, - tpu_client::TpuClientConfig, - }, + solana_client::nonblocking::tpu_client::{LeaderTpuService, TpuClient}, + solana_connection_cache::connection_cache::Protocol, solana_sdk::{clock::DEFAULT_MS_PER_SLOT, pubkey::Pubkey, system_transaction}, solana_test_validator::TestValidatorGenesis, + solana_tpu_client::tpu_client::TpuClientConfig, std::sync::{ atomic::{AtomicBool, Ordering}, Arc, diff --git a/rpc-test/tests/rpc.rs b/rpc-test/tests/rpc.rs index 463e2046867346..67d229d43a1823 100644 --- a/rpc-test/tests/rpc.rs +++ b/rpc-test/tests/rpc.rs @@ -6,10 +6,7 @@ use { reqwest::{self, header::CONTENT_TYPE}, serde_json::{json, Value}, solana_account_decoder::UiAccount, - solana_client::{ - connection_cache::ConnectionCache, - tpu_client::{TpuClient, TpuClientConfig}, - }, + solana_client::connection_cache::ConnectionCache, solana_pubsub_client::nonblocking::pubsub_client::PubsubClient, solana_rpc_client::rpc_client::RpcClient, solana_rpc_client_api::{ @@ -29,7 +26,7 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, - solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE, + solana_tpu_client::tpu_client::{TpuClient, TpuClientConfig, DEFAULT_TPU_CONNECTION_POOL_SIZE}, solana_transaction_status::TransactionStatus, std::{ collections::HashSet, diff --git a/rpc/src/cluster_tpu_info.rs b/rpc/src/cluster_tpu_info.rs index c2e515ab5e2b9b..777dfd5b8ff868 100644 --- a/rpc/src/cluster_tpu_info.rs +++ b/rpc/src/cluster_tpu_info.rs @@ -8,6 +8,7 @@ use { solana_send_transaction_service::tpu_info::TpuInfo, std::{ collections::HashMap, + iter::once, net::SocketAddr, sync::{Arc, RwLock}, }, @@ -36,6 +37,7 @@ impl TpuInfo for ClusterTpuInfo { .cluster_info .tpu_peers() .into_iter() + .chain(once(self.cluster_info.my_contact_info())) .filter_map(|node| { Some(( *node.pubkey(), @@ -75,8 +77,8 @@ impl TpuInfo for ClusterTpuInfo { ) -> Vec<(&SocketAddr, Slot)> { let recorder = self.poh_recorder.read().unwrap(); let leaders: Vec<_> = (0..max_count) + .rev() .filter_map(|future_slot| { - let future_slot = max_count.wrapping_sub(future_slot); NUM_CONSECUTIVE_LEADER_SLOTS .checked_mul(future_slot) .and_then(|slots_in_the_future| { @@ -127,6 +129,74 @@ mod test { std::{net::Ipv4Addr, sync::atomic::AtomicBool}, }; + #[test] + fn test_refresh_recent_peers() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let blockstore = Blockstore::open(ledger_path.path()).unwrap(); + + let validator_vote_keypairs0 = ValidatorVoteKeypairs::new_rand(); + let validator_vote_keypairs1 = ValidatorVoteKeypairs::new_rand(); + let validator_vote_keypairs2 = ValidatorVoteKeypairs::new_rand(); + let mut expected_validator_pubkeys = vec![ + validator_vote_keypairs0.node_keypair.pubkey(), + validator_vote_keypairs1.node_keypair.pubkey(), + validator_vote_keypairs2.node_keypair.pubkey(), + ]; + expected_validator_pubkeys.sort(); + let validator_keypairs = vec![ + &validator_vote_keypairs0, + &validator_vote_keypairs1, + &validator_vote_keypairs2, + ]; + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( + 1_000_000_000, + &validator_keypairs, + vec![10_000; 3], + ); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + + let (poh_recorder, _entry_receiver, _record_receiver) = PohRecorder::new( + 0, + bank.last_blockhash(), + bank.clone(), + Some((2, 2)), + bank.ticks_per_slot(), + Arc::new(blockstore), + &Arc::new(LeaderScheduleCache::new_from_bank(&bank)), + &PohConfig::default(), + Arc::new(AtomicBool::default()), + ); + + let validator0_contact_info = ContactInfo::new_localhost( + &validator_vote_keypairs0.node_keypair.pubkey(), + timestamp(), + ); + let validator1_contact_info = ContactInfo::new_localhost( + &validator_vote_keypairs1.node_keypair.pubkey(), + timestamp(), + ); + let validator2_contact_info = ContactInfo::new_localhost( + &validator_vote_keypairs2.node_keypair.pubkey(), + timestamp(), + ); + let cluster_info = Arc::new(ClusterInfo::new( + validator0_contact_info, + Arc::new(validator_vote_keypairs0.node_keypair), + SocketAddrSpace::Unspecified, + )); + cluster_info.insert_info(validator1_contact_info); + cluster_info.insert_info(validator2_contact_info); + + let mut leader_info = + ClusterTpuInfo::new(cluster_info, Arc::new(RwLock::new(poh_recorder))); + leader_info.refresh_recent_peers(); + let mut refreshed_recent_peers = + leader_info.recent_peers.keys().copied().collect::>(); + refreshed_recent_peers.sort(); + + assert_eq!(refreshed_recent_peers, expected_validator_pubkeys); + } + #[test] fn test_get_leader_tpus() { let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -208,6 +278,10 @@ mod test { leader_info.get_leader_tpus(1, Protocol::UDP), vec![&recent_peers.get(&first_leader).unwrap().0] ); + assert_eq!( + leader_info.get_leader_tpus_with_slots(1, Protocol::UDP), + vec![(&recent_peers.get(&first_leader).unwrap().0, 0)] + ); let second_leader = solana_ledger::leader_schedule_utils::slot_leader_at( slot + NUM_CONSECUTIVE_LEADER_SLOTS, @@ -223,6 +297,13 @@ mod test { leader_info.get_leader_tpus(2, Protocol::UDP), expected_leader_sockets ); + assert_eq!( + leader_info.get_leader_tpus_with_slots(2, Protocol::UDP), + expected_leader_sockets + .into_iter() + .zip([0, 4]) + .collect::>() + ); let third_leader = solana_ledger::leader_schedule_utils::slot_leader_at( slot + (2 * NUM_CONSECUTIVE_LEADER_SLOTS), @@ -239,9 +320,24 @@ mod test { leader_info.get_leader_tpus(3, Protocol::UDP), expected_leader_sockets ); + // Only 2 leader tpus are returned always... so [0, 4, 8] isn't right here. + // This assumption is safe. After all, leader schedule generation must be deterministic. + assert_eq!( + leader_info.get_leader_tpus_with_slots(3, Protocol::UDP), + expected_leader_sockets + .into_iter() + .zip([0, 4]) + .collect::>() + ); for x in 4..8 { assert!(leader_info.get_leader_tpus(x, Protocol::UDP).len() <= recent_peers.len()); + assert!( + leader_info + .get_leader_tpus_with_slots(x, Protocol::UDP) + .len() + <= recent_peers.len() + ); } } } diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index 51c651b481aa71..d62a61ec81fe00 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -39,7 +39,7 @@ use { solana_rpc_client_api::{ config::*, custom_error::RpcCustomError, - filter::{Memcmp, MemcmpEncodedBytes, RpcFilterType}, + filter::{Memcmp, RpcFilterType}, request::{ TokenAccountsFilter, DELINQUENT_VALIDATOR_SLOT_DISTANCE, MAX_GET_CONFIRMED_BLOCKS_RANGE, MAX_GET_CONFIRMED_SIGNATURES_FOR_ADDRESS2_LIMIT, @@ -61,7 +61,6 @@ use { }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - account_utils::StateMut, clock::{Slot, UnixTimestamp, MAX_PROCESSING_AGE}, commitment_config::{CommitmentConfig, CommitmentLevel}, epoch_info::EpochInfo, @@ -73,10 +72,7 @@ use { message::SanitizedMessage, pubkey::{Pubkey, PUBKEY_BYTES}, signature::{Keypair, Signature, Signer}, - stake::state::{StakeActivationStatus, StakeStateV2}, - stake_history::StakeHistory, system_instruction, - sysvar::stake_history, transaction::{ self, AddressLoader, MessageHash, SanitizedTransaction, TransactionError, VersionedTransaction, MAX_TX_ACCOUNT_LOCKS, @@ -269,23 +265,13 @@ impl JsonRpcRequestProcessor { .slot_with_commitment(commitment.commitment); match commitment.commitment { - // Recent variant is deprecated - CommitmentLevel::Recent | CommitmentLevel::Processed => { + CommitmentLevel::Processed => { debug!("RPC using the heaviest slot: {:?}", slot); } - // Root variant is deprecated - CommitmentLevel::Root => { - debug!("RPC using node root: {:?}", slot); - } - // Single variant is deprecated - CommitmentLevel::Single => { - debug!("RPC using confirmed slot: {:?}", slot); - } - // Max variant is deprecated - CommitmentLevel::Max | CommitmentLevel::Finalized => { + CommitmentLevel::Finalized => { debug!("RPC using block: {:?}", slot); } - CommitmentLevel::SingleGossip | CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated + CommitmentLevel::Confirmed => unreachable!(), // SingleGossip variant is deprecated }; let r_bank_forks = self.bank_forks.read().unwrap(); @@ -1823,87 +1809,6 @@ impl JsonRpcRequestProcessor { slot } - pub fn get_stake_activation( - &self, - pubkey: &Pubkey, - config: Option, - ) -> Result { - let config = config.unwrap_or_default(); - let bank = self.get_bank_with_config(RpcContextConfig { - commitment: config.commitment, - min_context_slot: config.min_context_slot, - })?; - let epoch = config.epoch.unwrap_or_else(|| bank.epoch()); - if epoch != bank.epoch() { - return Err(Error::invalid_params(format!( - "Invalid param: epoch {epoch:?}. Only the current epoch ({:?}) is supported", - bank.epoch() - ))); - } - - let stake_account = bank - .get_account(pubkey) - .ok_or_else(|| Error::invalid_params("Invalid param: account not found".to_string()))?; - let stake_state: StakeStateV2 = stake_account - .state() - .map_err(|_| Error::invalid_params("Invalid param: not a stake account".to_string()))?; - let delegation = stake_state.delegation(); - - let rent_exempt_reserve = stake_state - .meta() - .ok_or_else(|| { - Error::invalid_params("Invalid param: stake account not initialized".to_string()) - })? - .rent_exempt_reserve; - - let delegation = match delegation { - None => { - return Ok(RpcStakeActivation { - state: StakeActivationState::Inactive, - active: 0, - inactive: stake_account.lamports().saturating_sub(rent_exempt_reserve), - }) - } - Some(delegation) => delegation, - }; - - let stake_history_account = bank - .get_account(&stake_history::id()) - .ok_or_else(Error::internal_error)?; - let stake_history = - solana_sdk::account::from_account::(&stake_history_account) - .ok_or_else(Error::internal_error)?; - let new_rate_activation_epoch = bank.new_warmup_cooldown_rate_epoch(); - - let StakeActivationStatus { - effective, - activating, - deactivating, - } = delegation.stake_activating_and_deactivating( - epoch, - &stake_history, - new_rate_activation_epoch, - ); - let stake_activation_state = if deactivating > 0 { - StakeActivationState::Deactivating - } else if activating > 0 { - StakeActivationState::Activating - } else if effective > 0 { - StakeActivationState::Active - } else { - StakeActivationState::Inactive - }; - let inactive_stake = stake_account - .lamports() - .saturating_sub(effective) - .saturating_sub(rent_exempt_reserve); - Ok(RpcStakeActivation { - state: stake_activation_state, - active: effective, - inactive: inactive_stake, - }) - } - pub fn get_token_account_balance( &self, pubkey: &Pubkey, @@ -2474,7 +2379,7 @@ fn encode_account( /// Analyze custom filters to determine if the result will be a subset of spl-token accounts by /// owner. /// NOTE: `optimize_filters()` should almost always be called before using this method because of -/// the strict match on `MemcmpEncodedBytes::Bytes`. +/// the requirement that `Memcmp::raw_bytes_as_ref().is_some()`. fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option { if !is_known_spl_token_id(program_id) { return None; @@ -2488,28 +2393,21 @@ fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == account_packed_len && *program_id == token_2022::id() => { - memcmp_filter = Some(bytes) - } - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == SPL_TOKEN_ACCOUNT_OWNER_OFFSET => { - if bytes.len() == PUBKEY_BYTES { - owner_key = Pubkey::try_from(&bytes[..]).ok(); - } else { - incorrect_owner_len = Some(bytes.len()); + RpcFilterType::Memcmp(memcmp) => { + let offset = memcmp.offset(); + if let Some(bytes) = memcmp.raw_bytes_as_ref() { + if offset == account_packed_len && *program_id == token_2022::id() { + memcmp_filter = Some(bytes); + } else if offset == SPL_TOKEN_ACCOUNT_OWNER_OFFSET { + if bytes.len() == PUBKEY_BYTES { + owner_key = Pubkey::try_from(bytes).ok(); + } else { + incorrect_owner_len = Some(bytes.len()); + } + } } } RpcFilterType::TokenAccountState => token_account_state_filter = true, - _ => {} } } if data_size_filter == Some(account_packed_len as u64) @@ -2532,7 +2430,7 @@ fn get_spl_token_owner_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> /// Analyze custom filters to determine if the result will be a subset of spl-token accounts by /// mint. /// NOTE: `optimize_filters()` should almost always be called before using this method because of -/// the strict match on `MemcmpEncodedBytes::Bytes`. +/// the requirement that `Memcmp::raw_bytes_as_ref().is_some()`. fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> Option { if !is_known_spl_token_id(program_id) { return None; @@ -2546,28 +2444,21 @@ fn get_spl_token_mint_filter(program_id: &Pubkey, filters: &[RpcFilterType]) -> for filter in filters { match filter { RpcFilterType::DataSize(size) => data_size_filter = Some(*size), - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == account_packed_len && *program_id == token_2022::id() => { - memcmp_filter = Some(bytes) - } - #[allow(deprecated)] - RpcFilterType::Memcmp(Memcmp { - offset, - bytes: MemcmpEncodedBytes::Bytes(bytes), - .. - }) if *offset == SPL_TOKEN_ACCOUNT_MINT_OFFSET => { - if bytes.len() == PUBKEY_BYTES { - mint = Pubkey::try_from(&bytes[..]).ok(); - } else { - incorrect_mint_len = Some(bytes.len()); + RpcFilterType::Memcmp(memcmp) => { + let offset = memcmp.offset(); + if let Some(bytes) = memcmp.raw_bytes_as_ref() { + if offset == account_packed_len && *program_id == token_2022::id() { + memcmp_filter = Some(bytes); + } else if offset == SPL_TOKEN_ACCOUNT_MINT_OFFSET { + if bytes.len() == PUBKEY_BYTES { + mint = Pubkey::try_from(bytes).ok(); + } else { + incorrect_mint_len = Some(bytes.len()); + } + } } } RpcFilterType::TokenAccountState => token_account_state_filter = true, - _ => {} } } if data_size_filter == Some(account_packed_len as u64) @@ -4439,7 +4330,7 @@ pub mod tests { JSON_RPC_SERVER_ERROR_TRANSACTION_HISTORY_NOT_AVAILABLE, JSON_RPC_SERVER_ERROR_UNSUPPORTED_TRANSACTION_VERSION, }, - filter::{Memcmp, MemcmpEncodedBytes}, + filter::MemcmpEncodedBytes, }, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::BankTestConfig, diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 00b14c0b3d9502..dfa82c87a00588 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -3000,7 +3000,7 @@ pub(crate) mod tests { .get(0) .unwrap() .process_transaction_with_metadata(tx.clone()) - .was_executed()); + .is_ok()); subscriptions.notify_subscribers(CommitmentSlots::new_from_slot(0)); diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index f8356296e970a6..49a78f22db7752 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -6,7 +6,10 @@ use { blockstore::Blockstore, blockstore_processor::{TransactionStatusBatch, TransactionStatusMessage}, }, - solana_svm::transaction_results::TransactionExecutionDetails, + solana_svm::{ + transaction_commit_result::CommittedTransaction, + transaction_execution_result::TransactionExecutionDetails, + }, solana_transaction_status::{ extract_and_fmt_memos, map_inner_instructions, Reward, TransactionStatusMeta, }, @@ -68,42 +71,44 @@ impl TransactionStatusService { TransactionStatusMessage::Batch(TransactionStatusBatch { bank, transactions, - execution_results, + commit_results, balances, token_balances, - rent_debits, transaction_indexes, }) => { let slot = bank.slot(); for ( transaction, - execution_result, + commit_result, pre_balances, post_balances, pre_token_balances, post_token_balances, - rent_debits, transaction_index, ) in izip!( transactions, - execution_results, + commit_results, balances.pre_balances, balances.post_balances, token_balances.pre_token_balances, token_balances.post_token_balances, - rent_debits, transaction_indexes, ) { - if let Some(details) = execution_result { - let TransactionExecutionDetails { - status, - log_messages, - inner_instructions, - return_data, - executed_units, + if let Ok(committed_tx) = commit_result { + let CommittedTransaction { + execution_details: + TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + return_data, + executed_units, + .. + }, fee_details, + rent_debits, .. - } = details; + } = committed_tx; let tx_account_locks = transaction.get_account_locks_unchecked(); let fee = fee_details.total_fee(); @@ -220,6 +225,7 @@ pub(crate) mod tests { VersionedTransaction, }, }, + solana_svm::transaction_execution_result::TransactionLoadedAccountsStats, solana_transaction_status::{ token_balances::TransactionTokenBalancesSet, TransactionStatusMeta, TransactionTokenBalance, @@ -291,7 +297,7 @@ pub(crate) mod tests { #[test] fn test_notify_transaction() { let genesis_config = create_genesis_config(2).genesis_config; - let bank = Bank::new_no_wallclock_throttle_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_no_wallclock_throttle_for_tests(&genesis_config); let (transaction_status_sender, transaction_status_receiver) = unbounded(); let ledger_path = get_tmp_ledger_path_auto_delete!(); @@ -325,14 +331,18 @@ pub(crate) mod tests { let mut rent_debits = RentDebits::default(); rent_debits.insert(&pubkey, 123, 456); - let transaction_result = Some(TransactionExecutionDetails { - status: Ok(()), - log_messages: None, - inner_instructions: None, + let commit_result = Ok(CommittedTransaction { + loaded_account_stats: TransactionLoadedAccountsStats::default(), + execution_details: TransactionExecutionDetails { + status: Ok(()), + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: 0, + accounts_data_len_delta: 0, + }, fee_details: FeeDetails::default(), - return_data: None, - executed_units: 0, - accounts_data_len_delta: 0, + rent_debits, }); let balances = TransactionBalancesSet { @@ -375,10 +385,9 @@ pub(crate) mod tests { let transaction_status_batch = TransactionStatusBatch { bank, transactions: vec![transaction], - execution_results: vec![transaction_result], + commit_results: vec![commit_result], balances, token_balances, - rent_debits: vec![rent_debits], transaction_indexes: vec![transaction_index], }; diff --git a/compute-budget/src/compute_budget_processor.rs b/runtime-transaction/src/instructions_processor.rs similarity index 87% rename from compute-budget/src/compute_budget_processor.rs rename to runtime-transaction/src/instructions_processor.rs index edd56e382a6bf2..06c0f265712e7c 100644 --- a/compute-budget/src/compute_budget_processor.rs +++ b/runtime-transaction/src/instructions_processor.rs @@ -1,66 +1,15 @@ use { - crate::prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, + solana_compute_budget::compute_budget_limits::*, solana_sdk::{ borsh1::try_from_slice_unchecked, compute_budget::{self, ComputeBudgetInstruction}, - entrypoint::HEAP_LENGTH, - fee::FeeBudgetLimits, instruction::{CompiledInstruction, InstructionError}, pubkey::Pubkey, transaction::TransactionError, }, + std::num::NonZeroU32, }; -/// Roughly 0.5us/page, where page is 32K; given roughly 15CU/us, the -/// default heap page cost = 0.5 * 15 ~= 8CU/page -pub const DEFAULT_HEAP_COST: u64 = 8; -pub const DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT: u32 = 200_000; -pub const MAX_COMPUTE_UNIT_LIMIT: u32 = 1_400_000; -pub const MAX_HEAP_FRAME_BYTES: u32 = 256 * 1024; -pub const MIN_HEAP_FRAME_BYTES: u32 = HEAP_LENGTH as u32; - -/// The total accounts data a transaction can load is limited to 64MiB to not break -/// anyone in Mainnet-beta today. It can be set by set_loaded_accounts_data_size_limit instruction -pub const MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES: u32 = 64 * 1024 * 1024; - -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct ComputeBudgetLimits { - pub updated_heap_bytes: u32, - pub compute_unit_limit: u32, - pub compute_unit_price: u64, - pub loaded_accounts_bytes: u32, -} - -impl Default for ComputeBudgetLimits { - fn default() -> Self { - ComputeBudgetLimits { - updated_heap_bytes: MIN_HEAP_FRAME_BYTES, - compute_unit_limit: MAX_COMPUTE_UNIT_LIMIT, - compute_unit_price: 0, - loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, - } - } -} - -impl From for FeeBudgetLimits { - fn from(val: ComputeBudgetLimits) -> Self { - let prioritization_fee_details = PrioritizationFeeDetails::new( - PrioritizationFeeType::ComputeUnitPrice(val.compute_unit_price), - u64::from(val.compute_unit_limit), - ); - let prioritization_fee = prioritization_fee_details.get_fee(); - - FeeBudgetLimits { - // NOTE - usize::from(u32).unwrap() may fail if target is 16-bit and - // `loaded_accounts_bytes` is greater than u16::MAX. In that case, panic is proper. - loaded_accounts_data_size_limit: usize::try_from(val.loaded_accounts_bytes).unwrap(), - heap_cost: DEFAULT_HEAP_COST, - compute_unit_limit: u64::from(val.compute_unit_limit), - prioritization_fee, - } - } -} - /// Processing compute_budget could be part of tx sanitizing, failed to process /// these instructions will drop the transaction eventually without execution, /// may as well fail it early. @@ -110,7 +59,10 @@ pub fn process_compute_budget_instructions<'a>( if updated_loaded_accounts_data_size_limit.is_some() { return Err(duplicate_instruction_error); } - updated_loaded_accounts_data_size_limit = Some(bytes); + updated_loaded_accounts_data_size_limit = Some( + NonZeroU32::new(bytes) + .ok_or(TransactionError::InvalidLoadedAccountsDataSizeLimit)?, + ); } _ => return Err(invalid_instruction_data_error), } @@ -407,7 +359,7 @@ mod tests { let data_size = 1; let expected_result = Ok(ComputeBudgetLimits { compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, - loaded_accounts_bytes: data_size, + loaded_accounts_bytes: NonZeroU32::new(data_size).unwrap(), ..ComputeBudgetLimits::default() }); @@ -421,7 +373,7 @@ mod tests { // Assert when set_loaded_accounts_data_size_limit presents, with greater than max value // budget is set to max data size - let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES + 1; + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES.get() + 1; let expected_result = Ok(ComputeBudgetLimits { compute_unit_limit: DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, loaded_accounts_bytes: MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES, @@ -455,7 +407,7 @@ mod tests { // Assert when set_loaded_accounts_data_size_limit presents more than once, // return DuplicateInstruction - let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES; + let data_size = MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES.get(); let expected_result = Err(TransactionError::DuplicateInstruction(2)); test!( diff --git a/runtime-transaction/src/lib.rs b/runtime-transaction/src/lib.rs index 0fdeb7c5b6bd65..4b980ce5dd92bc 100644 --- a/runtime-transaction/src/lib.rs +++ b/runtime-transaction/src/lib.rs @@ -1,5 +1,6 @@ #![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] +pub mod instructions_processor; pub mod runtime_transaction; pub mod transaction_meta; diff --git a/runtime-transaction/src/runtime_transaction.rs b/runtime-transaction/src/runtime_transaction.rs index 625ec28fdb22d8..05bb4e6bbb45f7 100644 --- a/runtime-transaction/src/runtime_transaction.rs +++ b/runtime-transaction/src/runtime_transaction.rs @@ -10,10 +10,11 @@ //! ALT, RuntimeTransaction transits into Dynamically Loaded state, //! with its dynamic metadata loaded. use { - crate::transaction_meta::{DynamicMeta, StaticMeta, TransactionMeta}, - solana_compute_budget::compute_budget_processor::{ - process_compute_budget_instructions, ComputeBudgetLimits, + crate::{ + instructions_processor::process_compute_budget_instructions, + transaction_meta::{DynamicMeta, StaticMeta, TransactionMeta}, }, + solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_sdk::{ hash::Hash, message::{AddressLoader, SanitizedMessage, SanitizedVersionedMessage}, @@ -89,7 +90,7 @@ impl RuntimeTransaction { } = process_compute_budget_instructions(message.program_instructions_iter())?; meta.set_compute_unit_limit(compute_unit_limit); meta.set_compute_unit_price(compute_unit_price); - meta.set_loaded_accounts_bytes(loaded_accounts_bytes); + meta.set_loaded_accounts_bytes(loaded_accounts_bytes.get()); Ok(Self { signatures, diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 184914f9c2b782..37beded4870b0e 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -54,6 +54,7 @@ solana-compute-budget = { workspace = true } solana-compute-budget-program = { workspace = true } solana-config-program = { workspace = true } solana-cost-model = { workspace = true } +solana-fee = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-inline-spl = { workspace = true } @@ -63,10 +64,12 @@ solana-metrics = { workspace = true } solana-perf = { workspace = true } solana-program-runtime = { workspace = true } solana-rayon-threadlimit = { workspace = true } +solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } solana-stake-program = { workspace = true } solana-svm = { workspace = true } solana-system-program = { workspace = true } +solana-timings = { workspace = true } solana-transaction-status = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } diff --git a/runtime/src/accounts_background_service.rs b/runtime/src/accounts_background_service.rs index 3736f4a9709c5a..70da57c818f17d 100644 --- a/runtime/src/accounts_background_service.rs +++ b/runtime/src/accounts_background_service.rs @@ -147,7 +147,6 @@ impl SnapshotRequestHandler { &self, test_hash_calculation: bool, non_snapshot_time_us: u128, - last_full_snapshot_slot: &mut Option, exit: &AtomicBool, ) -> Option> { let ( @@ -155,7 +154,7 @@ impl SnapshotRequestHandler { accounts_package_kind, num_outstanding_requests, num_re_enqueued_requests, - ) = self.get_next_snapshot_request(*last_full_snapshot_slot)?; + ) = self.get_next_snapshot_request()?; datapoint_info!( "handle_snapshot_requests", @@ -171,7 +170,6 @@ impl SnapshotRequestHandler { Some(self.handle_snapshot_request( test_hash_calculation, non_snapshot_time_us, - last_full_snapshot_slot, snapshot_request, accounts_package_kind, exit, @@ -189,7 +187,6 @@ impl SnapshotRequestHandler { /// ones re-enqueued. fn get_next_snapshot_request( &self, - last_full_snapshot_slot: Option, ) -> Option<( SnapshotRequest, AccountsPackageKind, @@ -200,11 +197,8 @@ impl SnapshotRequestHandler { .snapshot_request_receiver .try_iter() .map(|request| { - let accounts_package_kind = new_accounts_package_kind( - &request, - &self.snapshot_config, - last_full_snapshot_slot, - ); + let accounts_package_kind = + new_accounts_package_kind(&request, &self.snapshot_config); (request, accounts_package_kind) }) .collect(); @@ -293,7 +287,6 @@ impl SnapshotRequestHandler { &self, test_hash_calculation: bool, non_snapshot_time_us: u128, - last_full_snapshot_slot: &mut Option, snapshot_request: SnapshotRequest, accounts_package_kind: AccountsPackageKind, exit: &AtomicBool, @@ -311,7 +304,14 @@ impl SnapshotRequestHandler { assert!(snapshot_root_bank.is_startup_verification_complete()); if accounts_package_kind == AccountsPackageKind::Snapshot(SnapshotKind::FullSnapshot) { - *last_full_snapshot_slot = Some(snapshot_root_bank.slot()); + // The latest full snapshot slot is what accounts-db uses to properly handle zero lamport + // accounts. We are handling a full snapshot request here, and since taking a snapshot + // is not allowed to fail, we can update accounts-db now. + snapshot_root_bank + .rc + .accounts + .accounts_db + .set_latest_full_snapshot_slot(snapshot_root_bank.slot()); } let previous_accounts_hash = test_hash_calculation.then(|| { @@ -365,7 +365,7 @@ impl SnapshotRequestHandler { }); let mut clean_time = Measure::start("clean_time"); - snapshot_root_bank.clean_accounts(*last_full_snapshot_slot); + snapshot_root_bank.clean_accounts(); clean_time.stop(); let (_, shrink_ancient_time_us) = measure_us!(snapshot_root_bank.shrink_ancient_slots()); @@ -551,13 +551,11 @@ impl AbsRequestHandlers { &self, test_hash_calculation: bool, non_snapshot_time_us: u128, - last_full_snapshot_slot: &mut Option, exit: &AtomicBool, ) -> Option> { self.snapshot_request_handler.handle_snapshot_requests( test_hash_calculation, non_snapshot_time_us, - last_full_snapshot_slot, exit, ) } @@ -573,7 +571,6 @@ impl AccountsBackgroundService { exit: Arc, request_handlers: AbsRequestHandlers, test_hash_calculation: bool, - mut last_full_snapshot_slot: Option, ) -> Self { let mut last_cleaned_block_height = 0; let mut removed_slots_count = 0; @@ -637,7 +634,6 @@ impl AccountsBackgroundService { request_handlers.handle_snapshot_requests( test_hash_calculation, non_snapshot_time, - &mut last_full_snapshot_slot, &exit, ) }) @@ -675,7 +671,7 @@ impl AccountsBackgroundService { // as any later snapshots that are taken are of // slots >= bank.slot() bank.force_flush_accounts_cache(); - bank.clean_accounts(last_full_snapshot_slot); + bank.clean_accounts(); last_cleaned_block_height = bank.block_height(); // See justification below for why we skip 'shrink' here. if bank.is_startup_verification_complete() { @@ -736,9 +732,14 @@ impl AccountsBackgroundService { fn new_accounts_package_kind( snapshot_request: &SnapshotRequest, snapshot_config: &SnapshotConfig, - last_full_snapshot_slot: Option, ) -> AccountsPackageKind { let block_height = snapshot_request.snapshot_root_bank.block_height(); + let latest_full_snapshot_slot = snapshot_request + .snapshot_root_bank + .rc + .accounts + .accounts_db + .latest_full_snapshot_slot(); match snapshot_request.request_kind { SnapshotRequestKind::EpochAccountsHash => AccountsPackageKind::EpochAccountsHash, SnapshotRequestKind::Snapshot => { @@ -750,10 +751,10 @@ fn new_accounts_package_kind( } else if snapshot_utils::should_take_incremental_snapshot( block_height, snapshot_config.incremental_snapshot_archive_interval_slots, - last_full_snapshot_slot, + latest_full_snapshot_slot, ) { AccountsPackageKind::Snapshot(SnapshotKind::IncrementalSnapshot( - last_full_snapshot_slot.unwrap(), + latest_full_snapshot_slot.unwrap(), )) } else { AccountsPackageKind::AccountsHashVerifier @@ -930,6 +931,20 @@ mod test { .epoch_accounts_hash_manager .set_valid(EpochAccountsHash::new(Hash::new_unique()), 0); + // We need to get and set accounts-db's latest full snapshot slot to test + // get_next_snapshot_request(). To workaround potential borrowing issues + // caused by make_banks() below, Arc::clone bank0 and add helper functions. + let bank0 = bank.clone(); + fn latest_full_snapshot_slot(bank: &Bank) -> Option { + bank.rc.accounts.accounts_db.latest_full_snapshot_slot() + } + fn set_latest_full_snapshot_slot(bank: &Bank, slot: Slot) { + bank.rc + .accounts + .accounts_db + .set_latest_full_snapshot_slot(slot); + } + // Create new banks and send snapshot requests so that the following requests will be in // the channel before handling the requests: // @@ -950,7 +965,7 @@ mod test { // // (slots not called out will all be AHV) // Also, incremental snapshots before slot 240 (the first full snapshot handled), will - // actually be AHV since the last full snapshot slot will be `None`. This is expected and + // actually be AHV since the latest full snapshot slot will be `None`. This is expected and // fine; but maybe unexpected for a reader/debugger without this additional context. let mut make_banks = |num_banks| { for _ in 0..num_banks { @@ -976,8 +991,9 @@ mod test { make_banks(303); // Ensure the EAH is handled 1st + assert_eq!(latest_full_snapshot_slot(&bank0), None,); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(None) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, @@ -987,19 +1003,22 @@ mod test { // Ensure the full snapshot from slot 240 is handled 2nd // (the older full snapshots are skipped and dropped) + assert_eq!(latest_full_snapshot_slot(&bank0), None,); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(None) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, AccountsPackageKind::Snapshot(SnapshotKind::FullSnapshot) ); assert_eq!(snapshot_request.snapshot_root_bank.slot(), 240); + set_latest_full_snapshot_slot(&bank0, 240); // Ensure the incremental snapshot from slot 300 is handled 3rd // (the older incremental snapshots are skipped and dropped) + assert_eq!(latest_full_snapshot_slot(&bank0), Some(240),); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(Some(240)) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, @@ -1009,8 +1028,9 @@ mod test { // Ensure the accounts hash verifier from slot 303 is handled 4th // (the older accounts hash verifiers are skipped and dropped) + assert_eq!(latest_full_snapshot_slot(&bank0), Some(240),); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(Some(240)) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, @@ -1019,8 +1039,9 @@ mod test { assert_eq!(snapshot_request.snapshot_root_bank.slot(), 303); // And now ensure the snapshot request channel is empty! + assert_eq!(latest_full_snapshot_slot(&bank0), Some(240),); assert!(snapshot_request_handler - .get_next_snapshot_request(Some(240)) + .get_next_snapshot_request() .is_none()); // Create more banks and send snapshot requests so that the following requests will be in @@ -1039,18 +1060,21 @@ mod test { make_banks(240); // Ensure the full snapshot is handled 1st + assert_eq!(latest_full_snapshot_slot(&bank0), Some(240),); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(None) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, AccountsPackageKind::Snapshot(SnapshotKind::FullSnapshot) ); assert_eq!(snapshot_request.snapshot_root_bank.slot(), 480); + set_latest_full_snapshot_slot(&bank0, 480); // Ensure the EAH is handled 2nd + assert_eq!(latest_full_snapshot_slot(&bank0), Some(480),); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(Some(480)) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, @@ -1059,8 +1083,9 @@ mod test { assert_eq!(snapshot_request.snapshot_root_bank.slot(), 500); // Ensure the incremental snapshot is handled 3rd + assert_eq!(latest_full_snapshot_slot(&bank0), Some(480),); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(Some(480)) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, @@ -1069,8 +1094,9 @@ mod test { assert_eq!(snapshot_request.snapshot_root_bank.slot(), 540); // Ensure the accounts hash verifier is handled 4th + assert_eq!(latest_full_snapshot_slot(&bank0), Some(480),); let (snapshot_request, accounts_package_kind, ..) = snapshot_request_handler - .get_next_snapshot_request(Some(480)) + .get_next_snapshot_request() .unwrap(); assert_eq!( accounts_package_kind, @@ -1079,8 +1105,9 @@ mod test { assert_eq!(snapshot_request.snapshot_root_bank.slot(), 543); // And now ensure the snapshot request channel is empty! + assert_eq!(latest_full_snapshot_slot(&bank0), Some(480),); assert!(snapshot_request_handler - .get_next_snapshot_request(Some(480)) + .get_next_snapshot_request() .is_none()); } diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 6a5dfd85d83c19..2eeb6f924b13a3 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -33,8 +33,6 @@ //! It offers a high-level API that signs transactions //! on behalf of the caller, and a low-level API for when they have //! already been signed and verified. -#[allow(deprecated)] -use solana_sdk::recent_blockhashes_account; use { crate::{ bank::{ @@ -43,7 +41,7 @@ use { partitioned_epoch_rewards::{EpochRewardStatus, StakeRewards, VoteRewardsAccounts}, }, bank_forks::BankForks, - epoch_stakes::{EpochStakes, NodeVoteAccounts}, + epoch_stakes::{split_epoch_stakes, EpochStakes, NodeVoteAccounts, VersionedEpochStakes}, installed_scheduler_pool::{BankWithScheduler, InstalledSchedulerRwLock}, runtime_config::RuntimeConfig, serde_snapshot::BankIncrementalSnapshotPersistence, @@ -60,7 +58,6 @@ use { }, byteorder::{ByteOrder, LittleEndian}, dashmap::{DashMap, DashSet}, - itertools::izip, log::*, rayon::{ iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}, @@ -88,19 +85,15 @@ use { storable_accounts::StorableAccounts, }, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, - solana_compute_budget::{ - compute_budget::ComputeBudget, - compute_budget_processor::process_compute_budget_instructions, - }, + solana_compute_budget::compute_budget::ComputeBudget, solana_cost_model::cost_tracker::CostTracker, solana_loader_v4_program::create_program_runtime_environment_v2, - solana_measure::{measure, measure::Measure, measure_us}, + solana_measure::{measure::Measure, measure_time, measure_us}, solana_perf::perf_libs, solana_program_runtime::{ - invoke_context::BuiltinFunctionWithContext, - loaded_programs::ProgramCacheEntry, - timings::{ExecuteTimingType, ExecuteTimings}, + invoke_context::BuiltinFunctionWithContext, loaded_programs::ProgramCacheEntry, }, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ account::{ create_account_shared_data_with_fields as create_account, from_account, Account, @@ -118,10 +111,9 @@ use { epoch_schedule::EpochSchedule, feature, feature_set::{ - self, include_loaded_accounts_data_size_in_fee_calculation, - remove_rounding_in_fee_calculation, reward_full_priority_fee, FeatureSet, + self, remove_rounding_in_fee_calculation, reward_full_priority_fee, FeatureSet, }, - fee::{FeeDetails, FeeStructure}, + fee::{FeeBudgetLimits, FeeDetails, FeeStructure}, fee_calculator::FeeRateGovernor, genesis_config::{ClusterType, GenesisConfig}, hard_forks::HardForks, @@ -141,7 +133,6 @@ use { rent_debits::RentDebits, reserved_account_keys::ReservedAccountKeys, reward_info::RewardInfo, - saturating_add_assign, signature::{Keypair, Signature}, slot_hashes::SlotHashes, slot_history::{Check, SlotHistory}, @@ -162,28 +153,29 @@ use { solana_svm::{ account_loader::{ collect_rent_from_account, CheckedTransactionDetails, TransactionCheckResult, - TransactionLoadResult, }, account_overrides::AccountOverrides, - nonce_info::NoncePartial, + account_saver::collect_accounts_to_store, + nonce_info::NonceInfo, + transaction_commit_result::{CommittedTransaction, TransactionCommitResult}, transaction_error_metrics::TransactionErrorMetrics, + transaction_execution_result::{ + TransactionExecutionDetails, TransactionExecutionResult, TransactionLoadedAccountsStats, + }, transaction_processing_callback::TransactionProcessingCallback, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionLogMessages, TransactionProcessingConfig, TransactionProcessingEnvironment, }, - transaction_results::{ - TransactionExecutionDetails, TransactionExecutionResult, - TransactionLoadedAccountsStats, TransactionResults, - }, }, + solana_timings::{ExecuteTimingType, ExecuteTimings}, solana_vote::vote_account::{VoteAccount, VoteAccountsHashMap}, solana_vote_program::vote_state::VoteState, std::{ borrow::Cow, collections::{HashMap, HashSet}, convert::TryFrom, - fmt, mem, + fmt, ops::{AddAssign, RangeFull, RangeInclusive}, path::PathBuf, slice, @@ -192,7 +184,7 @@ use { AtomicBool, AtomicI64, AtomicU64, AtomicUsize, Ordering::{AcqRel, Acquire, Relaxed}, }, - Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, + Arc, LockResult, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak, }, thread::Builder, time::{Duration, Instant}, @@ -228,6 +220,7 @@ pub mod epoch_accounts_hash_utils; mod fee_distribution; mod metrics; pub(crate) mod partitioned_epoch_rewards; +mod recent_blockhashes_account; mod serde_snapshot; mod sysvar_cache; pub(crate) mod tests; @@ -273,7 +266,6 @@ impl AddAssign for SquashTiming { } } -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug, Default, PartialEq)] pub(crate) struct CollectorFeeDetails { transaction_fee: u64, @@ -315,22 +307,6 @@ pub struct BankRc { pub(crate) bank_id_generator: Arc, } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -use solana_frozen_abi::abi_example::AbiExample; - -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -impl AbiExample for BankRc { - fn example() -> Self { - BankRc { - // Set parent to None to cut the recursion into another Bank - parent: RwLock::new(None), - // AbiExample for Accounts is specially implemented to contain a storage example - accounts: AbiExample::example(), - bank_id_generator: Arc::new(AtomicU64::new(0)), - } - } -} - impl BankRc { pub(crate) fn new(accounts: Accounts) -> Self { Self { @@ -342,11 +318,9 @@ impl BankRc { } pub struct LoadAndExecuteTransactionsOutput { - pub loaded_transactions: Vec, // Vector of results indicating whether a transaction was executed or could not // be executed. Note executed transactions can still have failed! pub execution_results: Vec, - pub retryable_transaction_indexes: Vec, // Total number of transactions that were executed pub executed_transactions_count: usize, // Number of non-vote transactions that were executed @@ -355,7 +329,6 @@ pub struct LoadAndExecuteTransactionsOutput { // an error. pub executed_with_successful_result_count: usize, pub signature_count: u64, - pub error_counters: TransactionErrorMetrics, } pub struct TransactionSimulationResult { @@ -382,7 +355,6 @@ impl TransactionBalancesSet { } pub type TransactionBalances = Vec>; -#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub enum TransactionLogCollectorFilter { All, @@ -397,14 +369,12 @@ impl Default for TransactionLogCollectorFilter { } } -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug, Default)] pub struct TransactionLogCollectorConfig { pub mentioned_addresses: HashSet, pub filter: TransactionLogCollectorFilter, } -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Clone, Debug, PartialEq, Eq)] pub struct TransactionLogInfo { pub signature: Signature, @@ -413,7 +383,6 @@ pub struct TransactionLogInfo { pub log_messages: TransactionLogMessages, } -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Default, Debug)] pub struct TransactionLogCollector { // All the logs collected for from this Bank. Exact contents depend on the @@ -526,6 +495,7 @@ pub struct BankFieldsToSerialize { pub epoch_stakes: HashMap, pub is_delta: bool, pub accounts_data_len: u64, + pub versioned_epoch_stakes: HashMap, } // Can't derive PartialEq because RwLock doesn't implement PartialEq @@ -590,7 +560,6 @@ impl PartialEq for Bank { accounts_data_size_initial: _, accounts_data_size_delta_on_chain: _, accounts_data_size_delta_off_chain: _, - incremental_snapshot_persistence: _, epoch_reward_status: _, transaction_processor: _, check_program_modification_slot: _, @@ -670,6 +639,7 @@ impl BankFieldsToSerialize { epoch_stakes: HashMap::default(), is_delta: bool::default(), accounts_data_len: u64::default(), + versioned_epoch_stakes: HashMap::default(), } } } @@ -698,17 +668,7 @@ pub trait DropCallback: fmt::Debug { #[derive(Debug, Default)] pub struct OptionalDropCallback(Option>); -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -impl AbiExample for OptionalDropCallback { - fn example() -> Self { - Self(None) - } -} - /// Manager for the state of all accounts and programs after processing its entries. -/// AbiExample is needed even without Serialize/Deserialize; actual (de-)serialization -/// are implemented elsewhere for versioning -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug)] pub struct Bank { /// References to accounts, parent and signature status @@ -867,8 +827,6 @@ pub struct Bank { /// the account hash of the accounts that would have been rewritten as bank hash expects. skipped_rewrites: Mutex>, - pub incremental_snapshot_persistence: Option, - epoch_reward_status: EpochRewardStatus, transaction_processor: TransactionBatchProcessor, @@ -943,7 +901,6 @@ impl Bank { fn default_with_accounts(accounts: Accounts) -> Self { let mut bank = Self { skipped_rewrites: Mutex::default(), - incremental_snapshot_persistence: None, rc: BankRc::new(accounts), status_cache: Arc::>::default(), blockhash_queue: RwLock::::default(), @@ -1178,7 +1135,6 @@ impl Bank { let accounts_data_size_initial = parent.load_accounts_data_size(); let mut new = Self { skipped_rewrites: Mutex::default(), - incremental_snapshot_persistence: None, rc, status_cache, slot, @@ -1299,7 +1255,6 @@ impl Bank { new.update_slot_hashes(); new.update_stake_history(Some(parent.epoch())); new.update_clock(Some(parent.epoch())); - new.update_fees(); new.update_last_restart_slot() }); @@ -1353,7 +1308,7 @@ impl Bank { new } - pub fn set_fork_graph_in_program_cache(&self, fork_graph: Arc>) { + pub fn set_fork_graph_in_program_cache(&self, fork_graph: Weak>) { self.transaction_processor .program_cache .write() @@ -1393,74 +1348,60 @@ impl Bank { ) { let epoch = self.epoch(); let slot = self.slot(); - let (thread_pool, thread_pool_time) = measure!( - ThreadPoolBuilder::new() - .thread_name(|i| format!("solBnkNewEpch{i:02}")) - .build() - .expect("new rayon threadpool"), - "thread_pool_creation", - ); + let (thread_pool, thread_pool_time_us) = measure_us!(ThreadPoolBuilder::new() + .thread_name(|i| format!("solBnkNewEpch{i:02}")) + .build() + .expect("new rayon threadpool")); - let (_, apply_feature_activations_time) = measure!( - self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false), - "apply_feature_activation", + let (_, apply_feature_activations_time_us) = measure_us!( + self.apply_feature_activations(ApplyFeatureActivationsCaller::NewFromParent, false) ); // Add new entry to stakes.stake_history, set appropriate epoch and // update vote accounts with warmed up stakes before saving a // snapshot of stakes in epoch stakes - let (_, activate_epoch_time) = measure!( - self.stakes_cache.activate_epoch( - epoch, - &thread_pool, - self.new_warmup_cooldown_rate_epoch() - ), - "activate_epoch", - ); + let (_, activate_epoch_time_us) = measure_us!(self.stakes_cache.activate_epoch( + epoch, + &thread_pool, + self.new_warmup_cooldown_rate_epoch() + )); // Save a snapshot of stakes for use in consensus and stake weighted networking let leader_schedule_epoch = self.epoch_schedule.get_leader_schedule_epoch(slot); - let (_, update_epoch_stakes_time) = measure!( - self.update_epoch_stakes(leader_schedule_epoch), - "update_epoch_stakes", - ); + let (_, update_epoch_stakes_time_us) = + measure_us!(self.update_epoch_stakes(leader_schedule_epoch)); let mut rewards_metrics = RewardsMetrics::default(); // After saving a snapshot of stakes, apply stake rewards and commission - let (_, update_rewards_with_thread_pool_time) = measure!( - { - if self.is_partitioned_rewards_code_enabled() { - self.begin_partitioned_rewards( - reward_calc_tracer, - &thread_pool, - parent_epoch, - parent_slot, - parent_height, - &mut rewards_metrics, - ); - } else { - self.update_rewards_with_thread_pool( - parent_epoch, - reward_calc_tracer, - &thread_pool, - &mut rewards_metrics, - ) - } - }, - "update_rewards_with_thread_pool", - ); + let (_, update_rewards_with_thread_pool_time_us) = + measure_us!(if self.is_partitioned_rewards_code_enabled() { + self.begin_partitioned_rewards( + reward_calc_tracer, + &thread_pool, + parent_epoch, + parent_slot, + parent_height, + &mut rewards_metrics, + ); + } else { + self.update_rewards_with_thread_pool( + parent_epoch, + reward_calc_tracer, + &thread_pool, + &mut rewards_metrics, + ) + }); report_new_epoch_metrics( epoch, slot, parent_slot, NewEpochTimings { - thread_pool_time_us: thread_pool_time.as_us(), - apply_feature_activations_time_us: apply_feature_activations_time.as_us(), - activate_epoch_time_us: activate_epoch_time.as_us(), - update_epoch_stakes_time_us: update_epoch_stakes_time.as_us(), - update_rewards_with_thread_pool_time_us: update_rewards_with_thread_pool_time - .as_us(), + thread_pool_time_us, + apply_feature_activations_time_us, + activate_epoch_time_us, + update_epoch_stakes_time_us, + update_rewards_with_thread_pool_time_us, }, rewards_metrics, ); @@ -1566,7 +1507,7 @@ impl Bank { // Note that we are disabling the read cache while we populate the stakes cache. // The stakes accounts will not be expected to be loaded again. // If we populate the read cache with these loads, then we'll just soon have to evict these. - let (stakes, stakes_time) = measure!(Stakes::new(&fields.stakes, |pubkey| { + let (stakes, stakes_time) = measure_time!(Stakes::new(&fields.stakes, |pubkey| { let (account, _slot) = bank_rc .accounts .load_with_fixed_root_do_not_populate_read_cache(&ancestors, pubkey)?; @@ -1580,7 +1521,6 @@ impl Bank { let stakes_accounts_load_duration = now.elapsed(); let mut bank = Self { skipped_rewrites: Mutex::default(), - incremental_snapshot_persistence: fields.incremental_snapshot_persistence, rc: bank_rc, status_cache: Arc::>::default(), blockhash_queue: RwLock::new(fields.blockhash_queue), @@ -1713,6 +1653,7 @@ impl Bank { /// Return subset of bank fields representing serializable state pub(crate) fn get_fields_to_serialize(&self) -> BankFieldsToSerialize { + let (epoch_stakes, versioned_epoch_stakes) = split_epoch_stakes(self.epoch_stakes.clone()); BankFieldsToSerialize { blockhash_queue: self.blockhash_queue.read().unwrap().clone(), ancestors: AncestorsForSerialization::from(&self.ancestors), @@ -1741,9 +1682,10 @@ impl Bank { epoch_schedule: self.epoch_schedule.clone(), inflation: *self.inflation.read().unwrap(), stakes: StakesEnum::from(self.stakes_cache.stakes().clone()), - epoch_stakes: self.epoch_stakes.clone(), + epoch_stakes, is_delta: self.is_delta.load(Relaxed), accounts_data_len: self.load_accounts_data_size(), + versioned_epoch_stakes, } } @@ -2034,21 +1976,6 @@ impl Bank { } } - #[allow(deprecated)] - fn update_fees(&self) { - if !self - .feature_set - .is_active(&feature_set::disable_fees_sysvar::id()) - { - self.update_sysvar_account(&sysvar::fees::id(), |account| { - create_account( - &sysvar::fees::Fees::new(&self.fee_rate_governor.create_fee_calculator()), - self.inherit_specially_retained_account_fields(account), - ) - }); - } - } - fn update_rent(&self) { self.update_sysvar_account(&sysvar::rent::id(), |account| { create_account( @@ -2272,7 +2199,7 @@ impl Bank { solana_stake_program::get_minimum_delegation(&self.feature_set) .max(LAMPORTS_PER_SOL); - let (stake_delegations, filter_timer) = measure!(stakes + let (stake_delegations, filter_time_us) = measure_us!(stakes .stake_delegations() .iter() .filter(|(_stake_pubkey, cached_stake_account)| { @@ -2282,7 +2209,7 @@ impl Bank { datapoint_info!( "stake_account_filter_time", - ("filter_time_us", filter_timer.as_us(), i64), + ("filter_time_us", filter_time_us, i64), ("num_stake_delegations_before", num_stake_delegations, i64), ("num_stake_delegations_after", stake_delegations.len(), i64) ); @@ -2463,13 +2390,13 @@ impl Bank { invalid_vote_keys, vote_accounts_cache_miss_count, }, - measure, - ) = measure!({ + load_vote_and_stake_accounts_us, + ) = measure_us!({ self._load_vote_and_stake_accounts(thread_pool, reward_calc_tracer.as_ref()) }); metrics .load_vote_and_stake_accounts_us - .fetch_add(measure.as_us(), Relaxed); + .fetch_add(load_vote_and_stake_accounts_us, Relaxed); metrics.vote_accounts_cache_miss_count += vote_accounts_cache_miss_count; self.stakes_cache .handle_invalid_keys(invalid_vote_keys, self.slot()); @@ -2485,7 +2412,7 @@ impl Bank { metrics: &RewardsMetrics, ) -> Option { let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); - let (points, measure) = measure!(thread_pool.install(|| { + let (points, calculate_points_us) = measure_us!(thread_pool.install(|| { vote_with_stake_delegations_map .par_iter() .map(|entry| { @@ -2512,7 +2439,7 @@ impl Bank { })); metrics .calculate_points_us - .fetch_add(measure.as_us(), Relaxed); + .fetch_add(calculate_points_us, Relaxed); (points > 0).then_some(PointValue { rewards, points }) } @@ -2554,7 +2481,7 @@ impl Bank { }, ); - let (stake_rewards, measure) = measure!(thread_pool.install(|| { + let (stake_rewards, redeem_rewards_us) = measure_us!(thread_pool.install(|| { stake_delegation_iterator .filter_map(|(vote_pubkey, vote_state, (stake_pubkey, stake_account))| { // curry closure to add the contextual stake_pubkey @@ -2610,7 +2537,7 @@ impl Bank { }) .collect() })); - metrics.redeem_rewards_us += measure.as_us(); + metrics.redeem_rewards_us += redeem_rewards_us; (vote_account_rewards, stake_rewards) } @@ -2645,7 +2572,7 @@ impl Bank { vote_account_rewards: VoteRewards, metrics: &RewardsMetrics, ) -> Vec<(Pubkey, RewardInfo)> { - let (vote_rewards, measure) = measure!(vote_account_rewards + let (vote_rewards, store_vote_accounts_us) = measure_us!(vote_account_rewards .into_iter() .filter_map( |( @@ -2681,7 +2608,7 @@ impl Bank { metrics .store_vote_accounts_us - .fetch_add(measure.as_us(), Relaxed); + .fetch_add(store_vote_accounts_us, Relaxed); vote_rewards } @@ -2936,9 +2863,6 @@ impl Bank { self.capitalization.fetch_add(account.lamports(), Relaxed); self.accounts_data_size_initial += account.data().len() as u64; } - // updating sysvars (the fees sysvar in this case) now depends on feature activations in - // genesis_config.accounts above - self.update_fees(); for (pubkey, account) in genesis_config.rewards_pools.iter() { assert!( @@ -3079,11 +3003,6 @@ impl Bank { blockhash_queue.get_lamports_per_signature(hash) } - #[deprecated(since = "1.9.0", note = "Please use `get_fee_for_message` instead")] - pub fn get_fee_rate_governor(&self) -> &FeeRateGovernor { - &self.fee_rate_governor - } - pub fn get_fee_for_message(&self, message: &SanitizedMessage) -> Option { let lamports_per_signature = { let blockhash_queue = self.blockhash_queue.read().unwrap(); @@ -3124,32 +3043,20 @@ impl Bank { message: &SanitizedMessage, lamports_per_signature: u64, ) -> u64 { - self.fee_structure().calculate_fee( + let fee_budget_limits = FeeBudgetLimits::from( + process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default(), + ); + solana_fee::calculate_fee( message, - lamports_per_signature, - &process_compute_budget_instructions(message.program_instructions_iter()) - .unwrap_or_default() - .into(), - self.feature_set - .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + lamports_per_signature == 0, + self.fee_structure().lamports_per_signature, + fee_budget_limits.prioritization_fee, self.feature_set .is_active(&remove_rounding_in_fee_calculation::id()), ) } - #[deprecated( - since = "1.6.11", - note = "Please use `get_blockhash_last_valid_block_height`" - )] - pub fn get_blockhash_last_valid_slot(&self, blockhash: &Hash) -> Option { - let blockhash_queue = self.blockhash_queue.read().unwrap(); - // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue - // length is made variable by epoch - blockhash_queue - .get_hash_age(blockhash) - .map(|age| self.slot + MAX_PROCESSING_AGE as u64 - age) - } - pub fn get_blockhash_last_valid_block_height(&self, blockhash: &Hash) -> Option { let blockhash_queue = self.blockhash_queue.read().unwrap(); // This calculation will need to be updated to consider epoch boundaries if BlockhashQueue @@ -3236,8 +3143,22 @@ impl Bank { } #[cfg(feature = "dev-context-only-utils")] - pub fn register_recent_blockhash_for_test(&self, hash: &Hash) { - self.register_recent_blockhash(hash, &BankWithScheduler::no_scheduler_available()); + pub fn register_recent_blockhash_for_test( + &self, + blockhash: &Hash, + lamports_per_signature: Option, + ) { + // Only acquire the write lock for the blockhash queue on block boundaries because + // readers can starve this write lock acquisition and ticks would be slowed down too + // much if the write lock is acquired for each tick. + let mut w_blockhash_queue = self.blockhash_queue.write().unwrap(); + if let Some(lamports_per_signature) = lamports_per_signature { + w_blockhash_queue.register_hash(blockhash, lamports_per_signature); + } else { + w_blockhash_queue + .register_hash(blockhash, self.fee_rate_governor.lamports_per_signature); + } + self.update_recent_blockhashes_locked(&w_blockhash_queue); } /// Tell the bank which Entry IDs exist on the ledger. This function assumes subsequent calls @@ -3402,7 +3323,6 @@ impl Bank { let mut timings = ExecuteTimings::default(); let LoadAndExecuteTransactionsOutput { - loaded_transactions, mut execution_results, .. } = self.load_and_execute_transactions( @@ -3412,6 +3332,7 @@ impl Bank { // delay is not accounted for. MAX_PROCESSING_AGE - MAX_TRANSACTION_FORWARDING_DELAY, &mut timings, + &mut TransactionErrorMetrics::default(), TransactionProcessingConfig { account_overrides: Some(&account_overrides), check_program_modification_slot: self.check_program_modification_slot, @@ -3427,19 +3348,6 @@ impl Bank { }, ); - let post_simulation_accounts = loaded_transactions - .into_iter() - .next() - .and_then(|loaded_transactions_res| loaded_transactions_res.ok()) - .map(|loaded_transaction| { - loaded_transaction - .accounts - .into_iter() - .take(number_of_accounts) - .collect::>() - }) - .unwrap_or_default(); - let units_consumed = timings .details @@ -3459,14 +3367,25 @@ impl Bank { TransactionError::InvalidProgramForExecution, )); let flattened_result = execution_result.flattened_result(); - let (logs, return_data, inner_instructions) = match execution_result { - TransactionExecutionResult::Executed { details, .. } => ( - details.log_messages, - details.return_data, - details.inner_instructions, - ), - TransactionExecutionResult::NotExecuted(_) => (None, None, None), - }; + let (post_simulation_accounts, logs, return_data, inner_instructions) = + match execution_result { + TransactionExecutionResult::Executed(executed_tx) => { + let details = executed_tx.execution_details; + let post_simulation_accounts = executed_tx + .loaded_transaction + .accounts + .into_iter() + .take(number_of_accounts) + .collect::>(); + ( + post_simulation_accounts, + details.log_messages, + details.return_data, + details.inner_instructions, + ) + } + TransactionExecutionResult::NotExecuted(_) => (vec![], None, None, None), + }; let logs = logs.unwrap_or_default(); TransactionSimulationResult { @@ -3502,7 +3421,7 @@ impl Bank { pub fn unlock_accounts<'a>( &self, - txs_and_results: impl Iterator)>, + txs_and_results: impl Iterator)> + Clone, ) { self.rc.accounts.unlock_accounts(txs_and_results) } @@ -3615,7 +3534,7 @@ impl Bank { fn load_message_nonce_account( &self, message: &SanitizedMessage, - ) -> Option<(NoncePartial, nonce::state::Data)> { + ) -> Option<(NonceInfo, nonce::state::Data)> { let nonce_address = message.get_durable_nonce()?; let nonce_account = self.get_account_with_fixed_root(nonce_address)?; let nonce_data = @@ -3628,14 +3547,14 @@ impl Bank { return None; } - Some((NoncePartial::new(*nonce_address, nonce_account), nonce_data)) + Some((NonceInfo::new(*nonce_address, nonce_account), nonce_data)) } fn check_and_load_message_nonce_account( &self, message: &SanitizedMessage, next_durable_nonce: &DurableNonce, - ) -> Option<(NoncePartial, nonce::state::Data)> { + ) -> Option<(NonceInfo, nonce::state::Data)> { let nonce_is_advanceable = message.recent_blockhash() != next_durable_nonce.as_hash(); if nonce_is_advanceable { self.load_message_nonce_account(message) @@ -3672,58 +3591,18 @@ impl Bank { batch: &TransactionBatch, max_age: usize, timings: &mut ExecuteTimings, + error_counters: &mut TransactionErrorMetrics, processing_config: TransactionProcessingConfig, ) -> LoadAndExecuteTransactionsOutput { let sanitized_txs = batch.sanitized_transactions(); - debug!("processing transactions: {}", sanitized_txs.len()); - let mut error_counters = TransactionErrorMetrics::default(); - - let retryable_transaction_indexes: Vec<_> = batch - .lock_results() - .iter() - .enumerate() - .filter_map(|(index, res)| match res { - // following are retryable errors - Err(TransactionError::AccountInUse) => { - error_counters.account_in_use += 1; - Some(index) - } - Err(TransactionError::WouldExceedMaxBlockCostLimit) => { - error_counters.would_exceed_max_block_cost_limit += 1; - Some(index) - } - Err(TransactionError::WouldExceedMaxVoteCostLimit) => { - error_counters.would_exceed_max_vote_cost_limit += 1; - Some(index) - } - Err(TransactionError::WouldExceedMaxAccountCostLimit) => { - error_counters.would_exceed_max_account_cost_limit += 1; - Some(index) - } - Err(TransactionError::WouldExceedAccountDataBlockLimit) => { - error_counters.would_exceed_account_data_block_limit += 1; - Some(index) - } - // following are non-retryable errors - Err(TransactionError::TooManyAccountLocks) => { - error_counters.too_many_account_locks += 1; - None - } - Err(_) => None, - Ok(_) => None, - }) - .collect(); - let mut check_time = Measure::start("check_transactions"); - let check_results = self.check_transactions( + let (check_results, check_us) = measure_us!(self.check_transactions( sanitized_txs, batch.lock_results(), max_age, - &mut error_counters, - ); - check_time.stop(); - debug!("check: {}us", check_time.as_us()); - timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_time.as_us()); + error_counters, + )); + timings.saturating_add_in_place(ExecuteTimingType::CheckUs, check_us); let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); let processing_environment = TransactionProcessingEnvironment { @@ -3752,16 +3631,16 @@ impl Bank { // Accumulate the transaction batch execution timings. timings.accumulate(&sanitized_output.execute_timings); - let mut signature_count = 0; + let ((), collect_logs_us) = + measure_us!(self.collect_logs(sanitized_txs, &sanitized_output.execution_results)); + timings.saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_us); + let mut signature_count = 0; let mut executed_transactions_count: usize = 0; let mut executed_non_vote_transactions_count: usize = 0; let mut executed_with_successful_result_count: usize = 0; let err_count = &mut error_counters.total; - let transaction_log_collector_config = - self.transaction_log_collector_config.read().unwrap(); - let mut collect_logs_time = Measure::start("collect_logs_time"); for (execution_result, tx) in sanitized_output.execution_results.iter().zip(sanitized_txs) { if let Some(debug_keys) = &self.transaction_debug_keys { for key in tx.message().account_keys().iter() { @@ -3773,65 +3652,6 @@ impl Bank { } } - let is_vote = tx.is_simple_vote_transaction(); - - if execution_result.was_executed() // Skip log collection for unprocessed transactions - && transaction_log_collector_config.filter != TransactionLogCollectorFilter::None - { - let mut filtered_mentioned_addresses = Vec::new(); - if !transaction_log_collector_config - .mentioned_addresses - .is_empty() - { - for key in tx.message().account_keys().iter() { - if transaction_log_collector_config - .mentioned_addresses - .contains(key) - { - filtered_mentioned_addresses.push(*key); - } - } - } - - let store = match transaction_log_collector_config.filter { - TransactionLogCollectorFilter::All => { - !is_vote || !filtered_mentioned_addresses.is_empty() - } - TransactionLogCollectorFilter::AllWithVotes => true, - TransactionLogCollectorFilter::None => false, - TransactionLogCollectorFilter::OnlyMentionedAddresses => { - !filtered_mentioned_addresses.is_empty() - } - }; - - if store { - if let Some(TransactionExecutionDetails { - status, - log_messages: Some(log_messages), - .. - }) = execution_result.details() - { - let mut transaction_log_collector = - self.transaction_log_collector.write().unwrap(); - let transaction_log_index = transaction_log_collector.logs.len(); - - transaction_log_collector.logs.push(TransactionLogInfo { - signature: *tx.signature(), - result: status.clone(), - is_vote, - log_messages: log_messages.clone(), - }); - for key in filtered_mentioned_addresses.into_iter() { - transaction_log_collector - .mentioned_address_map - .entry(key) - .or_default() - .push(transaction_log_index); - } - } - } - } - if execution_result.was_executed() { // Signature count must be accumulated only if the transaction // is executed, otherwise a mismatched count between banking and @@ -3839,7 +3659,7 @@ impl Bank { signature_count += u64::from(tx.message().header().num_required_signatures); executed_transactions_count += 1; - if !is_vote { + if !tx.is_simple_vote_transaction() { executed_non_vote_transactions_count += 1; } } @@ -3856,27 +3676,104 @@ impl Bank { } } } - collect_logs_time.stop(); - timings - .saturating_add_in_place(ExecuteTimingType::CollectLogsUs, collect_logs_time.as_us()); - - if *err_count > 0 { - debug!( - "{} errors of {} txs", - *err_count, - *err_count + executed_with_successful_result_count - ); - } LoadAndExecuteTransactionsOutput { - loaded_transactions: sanitized_output.loaded_transactions, execution_results: sanitized_output.execution_results, - retryable_transaction_indexes, executed_transactions_count, executed_non_vote_transactions_count, executed_with_successful_result_count, signature_count, - error_counters, + } + } + + fn collect_logs( + &self, + transactions: &[SanitizedTransaction], + execution_results: &[TransactionExecutionResult], + ) { + let transaction_log_collector_config = + self.transaction_log_collector_config.read().unwrap(); + if transaction_log_collector_config.filter == TransactionLogCollectorFilter::None { + return; + } + + let collected_logs: Vec<_> = execution_results + .iter() + .zip(transactions) + .filter_map(|(execution_result, transaction)| { + // Skip log collection for unprocessed transactions + let execution_details = execution_result.details()?; + Self::collect_transaction_logs( + &transaction_log_collector_config, + transaction, + execution_details, + ) + }) + .collect(); + + if !collected_logs.is_empty() { + let mut transaction_log_collector = self.transaction_log_collector.write().unwrap(); + for (log, filtered_mentioned_addresses) in collected_logs { + let transaction_log_index = transaction_log_collector.logs.len(); + transaction_log_collector.logs.push(log); + for key in filtered_mentioned_addresses.into_iter() { + transaction_log_collector + .mentioned_address_map + .entry(key) + .or_default() + .push(transaction_log_index); + } + } + } + } + + fn collect_transaction_logs( + transaction_log_collector_config: &TransactionLogCollectorConfig, + transaction: &SanitizedTransaction, + execution_details: &TransactionExecutionDetails, + ) -> Option<(TransactionLogInfo, Vec)> { + // Skip log collection if no log messages were recorded + let log_messages = execution_details.log_messages.as_ref()?; + + let mut filtered_mentioned_addresses = Vec::new(); + if !transaction_log_collector_config + .mentioned_addresses + .is_empty() + { + for key in transaction.message().account_keys().iter() { + if transaction_log_collector_config + .mentioned_addresses + .contains(key) + { + filtered_mentioned_addresses.push(*key); + } + } + } + + let is_vote = transaction.is_simple_vote_transaction(); + let store = match transaction_log_collector_config.filter { + TransactionLogCollectorFilter::All => { + !is_vote || !filtered_mentioned_addresses.is_empty() + } + TransactionLogCollectorFilter::AllWithVotes => true, + TransactionLogCollectorFilter::None => false, + TransactionLogCollectorFilter::OnlyMentionedAddresses => { + !filtered_mentioned_addresses.is_empty() + } + }; + + if store { + Some(( + TransactionLogInfo { + signature: *transaction.signature(), + result: execution_details.status.clone(), + is_vote, + log_messages: log_messages.clone(), + }, + filtered_mentioned_addresses, + )) + } else { + None } } @@ -3948,59 +3845,52 @@ impl Bank { fn filter_program_errors_and_collect_fee( &self, execution_results: &[TransactionExecutionResult], - ) -> Vec> { + ) { let mut fees = 0; - let results = execution_results + execution_results .iter() - .map(|execution_result| match execution_result { - TransactionExecutionResult::Executed { details, .. } => { - fees += details.fee_details.total_fee(); - Ok(()) + .for_each(|execution_result| match execution_result { + TransactionExecutionResult::Executed(executed_tx) => { + fees += executed_tx.loaded_transaction.fee_details.total_fee(); } - TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), - }) - .collect(); + TransactionExecutionResult::NotExecuted(_) => {} + }); self.collector_fees.fetch_add(fees, Relaxed); - results } // Note: this function is not yet used; next PR will call it behind a feature gate fn filter_program_errors_and_collect_fee_details( &self, execution_results: &[TransactionExecutionResult], - ) -> Vec> { + ) { let mut accumulated_fee_details = FeeDetails::default(); - let results = execution_results + execution_results .iter() - .map(|execution_result| match execution_result { - TransactionExecutionResult::Executed { details, .. } => { - accumulated_fee_details.accumulate(&details.fee_details); - Ok(()) + .for_each(|execution_result| match execution_result { + TransactionExecutionResult::Executed(executed_tx) => { + accumulated_fee_details.accumulate(&executed_tx.loaded_transaction.fee_details); } - TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), - }) - .collect(); + TransactionExecutionResult::NotExecuted(_) => {} + }); self.collector_fee_details .write() .unwrap() .accumulate(&accumulated_fee_details); - results } pub fn commit_transactions( &self, sanitized_txs: &[SanitizedTransaction], - loaded_txs: &mut [TransactionLoadResult], - execution_results: Vec, + mut execution_results: Vec, last_blockhash: Hash, lamports_per_signature: u64, counts: ExecutedTransactionCounts, timings: &mut ExecuteTimings, - ) -> TransactionResults { + ) -> Vec { assert!( !self.freeze_started(), "commit_transactions() working on a bank that is already frozen or is undergoing freezing!" @@ -4032,56 +3922,41 @@ impl Bank { .fetch_max(executed_transactions_count, Relaxed); } - let mut write_time = Measure::start("write_time"); - let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); - self.rc.accounts.store_cached( - self.slot(), - sanitized_txs, - &execution_results, - loaded_txs, - &durable_nonce, - lamports_per_signature, - ); - let rent_debits = self.collect_rent(&execution_results, loaded_txs); + let ((), store_accounts_us) = measure_us!({ + let durable_nonce = DurableNonce::from_blockhash(&last_blockhash); + let (accounts_to_store, transactions) = collect_accounts_to_store( + sanitized_txs, + &mut execution_results, + &durable_nonce, + lamports_per_signature, + ); + self.rc + .accounts + .store_cached((self.slot(), accounts_to_store.as_slice()), &transactions); + }); + + self.collect_rent(&execution_results); // Cached vote and stake accounts are synchronized with accounts-db // after each transaction. - let mut update_stakes_cache_time = Measure::start("update_stakes_cache_time"); - self.update_stakes_cache(sanitized_txs, &execution_results, loaded_txs); - update_stakes_cache_time.stop(); - - // once committed there is no way to unroll - write_time.stop(); - debug!( - "store: {}us txs_len={}", - write_time.as_us(), - sanitized_txs.len() - ); - - let mut store_executors_which_were_deployed_time = - Measure::start("store_executors_which_were_deployed_time"); - let mut cache = None; - for execution_result in &execution_results { - if let TransactionExecutionResult::Executed { - details, - programs_modified_by_tx, - } = execution_result - { - if details.status.is_ok() && !programs_modified_by_tx.is_empty() { - cache - .get_or_insert_with(|| { - self.transaction_processor.program_cache.write().unwrap() - }) - .merge(programs_modified_by_tx); + let ((), update_stakes_cache_us) = + measure_us!(self.update_stakes_cache(sanitized_txs, &execution_results)); + + let ((), update_executors_us) = measure_us!({ + let mut cache = None; + for execution_result in &execution_results { + if let TransactionExecutionResult::Executed(executed_tx) = execution_result { + let programs_modified_by_tx = &executed_tx.programs_modified_by_tx; + if executed_tx.was_successful() && !programs_modified_by_tx.is_empty() { + cache + .get_or_insert_with(|| { + self.transaction_processor.program_cache.write().unwrap() + }) + .merge(programs_modified_by_tx); + } } } - } - drop(cache); - store_executors_which_were_deployed_time.stop(); - saturating_add_assign!( - timings.execute_accessories.update_executors_us, - store_executors_which_were_deployed_time.as_us() - ); + }); let accounts_data_len_delta = execution_results .iter() @@ -4095,78 +3970,69 @@ impl Bank { .sum(); self.update_accounts_data_size_delta_on_chain(accounts_data_len_delta); - timings.saturating_add_in_place(ExecuteTimingType::StoreUs, write_time.as_us()); - timings.saturating_add_in_place( - ExecuteTimingType::UpdateStakesCacheUs, - update_stakes_cache_time.as_us(), - ); + let ((), update_transaction_statuses_us) = + measure_us!(self.update_transaction_statuses(sanitized_txs, &execution_results)); - let mut update_transaction_statuses_time = Measure::start("update_transaction_statuses"); - self.update_transaction_statuses(sanitized_txs, &execution_results); - let fee_collection_results = if self.feature_set.is_active(&reward_full_priority_fee::id()) - { + if self.feature_set.is_active(&reward_full_priority_fee::id()) { self.filter_program_errors_and_collect_fee_details(&execution_results) } else { self.filter_program_errors_and_collect_fee(&execution_results) }; - update_transaction_statuses_time.stop(); + + timings.saturating_add_in_place(ExecuteTimingType::StoreUs, store_accounts_us); timings.saturating_add_in_place( - ExecuteTimingType::UpdateTransactionStatuses, - update_transaction_statuses_time.as_us(), + ExecuteTimingType::UpdateStakesCacheUs, + update_stakes_cache_us, ); - - let loaded_accounts_stats = Self::collect_loaded_accounts_stats(loaded_txs); - assert_eq!( - loaded_accounts_stats.len(), - execution_results.len(), - "loaded_account_stats and execution_results are not the same size" + timings.saturating_add_in_place(ExecuteTimingType::UpdateExecutorsUs, update_executors_us); + timings.saturating_add_in_place( + ExecuteTimingType::UpdateTransactionStatuses, + update_transaction_statuses_us, ); - TransactionResults { - fee_collection_results, - loaded_accounts_stats, - execution_results, - rent_debits, - } + Self::create_commit_results(execution_results) } - fn collect_loaded_accounts_stats( - loaded_txs: &[TransactionLoadResult], - ) -> Vec> { - loaded_txs - .iter() - .map(|load_result| match load_result { - Ok(loaded_tx) => Ok(TransactionLoadedAccountsStats { - loaded_accounts_data_size: loaded_tx.loaded_accounts_data_size, - loaded_accounts_count: loaded_tx.accounts.len(), - }), - Err(err) => Err(err.clone()), + fn create_commit_results( + execution_results: Vec, + ) -> Vec { + execution_results + .into_iter() + .map(|execution_result| match execution_result { + TransactionExecutionResult::Executed(executed_tx) => { + let loaded_tx = &executed_tx.loaded_transaction; + let loaded_account_stats = TransactionLoadedAccountsStats { + loaded_accounts_data_size: loaded_tx.loaded_accounts_data_size, + loaded_accounts_count: loaded_tx.accounts.len(), + }; + + // Rent is only collected for successfully executed transactions + let rent_debits = if executed_tx.was_successful() { + executed_tx.loaded_transaction.rent_debits + } else { + RentDebits::default() + }; + + Ok(CommittedTransaction { + loaded_account_stats, + execution_details: executed_tx.execution_details, + fee_details: executed_tx.loaded_transaction.fee_details, + rent_debits, + }) + } + TransactionExecutionResult::NotExecuted(err) => Err(err), }) .collect() } - fn collect_rent( - &self, - execution_results: &[TransactionExecutionResult], - loaded_txs: &mut [TransactionLoadResult], - ) -> Vec { - let mut collected_rent: u64 = 0; - let rent_debits: Vec<_> = loaded_txs - .iter_mut() - .zip(execution_results) - .map(|(load_result, execution_result)| { - if let (Ok(loaded_transaction), true) = - (load_result, execution_result.was_executed_successfully()) - { - collected_rent += loaded_transaction.rent; - mem::take(&mut loaded_transaction.rent_debits) - } else { - RentDebits::default() - } - }) - .collect(); + fn collect_rent(&self, execution_results: &[TransactionExecutionResult]) { + let collected_rent = execution_results + .iter() + .filter_map(|executed_result| executed_result.executed_transaction()) + .filter(|executed_tx| executed_tx.was_successful()) + .map(|executed_tx| executed_tx.loaded_transaction.rent) + .sum(); self.collected_rent.fetch_add(collected_rent, Relaxed); - rent_debits } fn run_incinerator(&self) { @@ -4215,7 +4081,7 @@ impl Bank { } let (skipped_rewrites, measure_skipped_rewrites) = - measure!(self.calculate_skipped_rewrites()); + measure_time!(self.calculate_skipped_rewrites()); info!( "Rebuilding skipped rewrites of {} accounts{measure_skipped_rewrites}", skipped_rewrites.len() @@ -4446,13 +4312,13 @@ impl Bank { .test_skip_rewrites_but_include_in_bank_hash; let mut skipped_rewrites = Vec::default(); for (pubkey, account, _loaded_slot) in accounts.iter_mut() { - let (rent_collected_info, measure) = measure!(collect_rent_from_account( + let (rent_collected_info, collect_rent_us) = measure_us!(collect_rent_from_account( &self.feature_set, &self.rent_collector, pubkey, account )); - time_collecting_rent_us += measure.as_us(); + time_collecting_rent_us += collect_rent_us; // only store accounts where we collected rent // but get the hash for all these accounts even if collected rent is 0 (= not updated). @@ -4505,9 +4371,9 @@ impl Bank { if !accounts_to_store.is_empty() { // TODO: Maybe do not call `store_accounts()` here. Instead return `accounts_to_store` // and have `collect_rent_in_partition()` perform all the stores. - let (_, measure) = - measure!(self.store_accounts((self.slot(), &accounts_to_store[..],))); - time_storing_accounts_us += measure.as_us(); + let (_, store_accounts_us) = + measure_us!(self.store_accounts((self.slot(), &accounts_to_store[..]))); + time_storing_accounts_us += store_accounts_us; } CollectRentFromAccountsInfo { @@ -4589,7 +4455,7 @@ impl Bank { bound }; let start = merge_prefix(start, *subrange_full.start()); - let (accounts, measure_load_accounts) = measure!(if last { + let (accounts, measure_load_accounts) = measure_time!(if last { let end = *subrange_full.end(); let subrange = start..=end; // IN-clusive self.rc @@ -4827,7 +4693,7 @@ impl Bank { recording_config: ExecutionRecordingConfig, timings: &mut ExecuteTimings, log_messages_bytes_limit: Option, - ) -> (TransactionResults, TransactionBalancesSet) { + ) -> (Vec, TransactionBalancesSet) { let pre_balances = if collect_balances { self.collect_balances(batch) } else { @@ -4835,17 +4701,16 @@ impl Bank { }; let LoadAndExecuteTransactionsOutput { - mut loaded_transactions, execution_results, executed_transactions_count, executed_non_vote_transactions_count, executed_with_successful_result_count, signature_count, - .. } = self.load_and_execute_transactions( batch, max_age, timings, + &mut TransactionErrorMetrics::default(), TransactionProcessingConfig { account_overrides: None, check_program_modification_slot: self.check_program_modification_slot, @@ -4859,9 +4724,8 @@ impl Bank { let (last_blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); - let results = self.commit_transactions( + let commit_results = self.commit_transactions( batch.sanitized_transactions(), - &mut loaded_transactions, execution_results, last_blockhash, lamports_per_signature, @@ -4881,7 +4745,7 @@ impl Bank { vec![] }; ( - results, + commit_results, TransactionBalancesSet::new(pre_balances, post_balances), ) } @@ -4897,24 +4761,14 @@ impl Bank { /// Process a Transaction and store metadata. This is used for tests and the banks services. It /// replicates the vector Bank::process_transaction method with metadata recording enabled. - #[must_use] pub fn process_transaction_with_metadata( &self, tx: impl Into, - ) -> TransactionExecutionResult { + ) -> Result { let txs = vec![tx.into()]; - let batch = match self.prepare_entry_batch(txs) { - Ok(batch) => batch, - Err(err) => return TransactionExecutionResult::NotExecuted(err), - }; + let batch = self.prepare_entry_batch(txs)?; - let ( - TransactionResults { - mut execution_results, - .. - }, - .., - ) = self.load_execute_and_commit_transactions( + let (mut commit_results, ..) = self.load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, false, // collect_balances @@ -4927,7 +4781,8 @@ impl Bank { Some(1000 * 1000), ); - execution_results.remove(0) + let committed_tx = commit_results.remove(0)?; + Ok(committed_tx.execution_details) } /// Process multiple transaction in a single batch. This is used for benches and unit tests. @@ -4963,7 +4818,9 @@ impl Bank { None, ) .0 - .fee_collection_results + .into_iter() + .map(|commit_result| commit_result.map(|_| ())) + .collect() } /// Create, sign, and process a Transaction from `keypair` to `to` of @@ -5547,7 +5404,7 @@ impl Bank { /// If the epoch accounts hash should be included in this Bank, then fetch it. If the EAH /// calculation has not completed yet, this fn will block until it does complete. fn wait_get_epoch_accounts_hash(&self) -> EpochAccountsHash { - let (epoch_accounts_hash, measure) = measure!(self + let (epoch_accounts_hash, waiting_time_us) = measure_us!(self .rc .accounts .accounts_db @@ -5556,8 +5413,8 @@ impl Bank { datapoint_info!( "bank-wait_get_epoch_accounts_hash", - ("slot", self.slot() as i64, i64), - ("waiting-time-us", measure.as_us() as i64, i64), + ("slot", self.slot(), i64), + ("waiting-time-us", waiting_time_us, i64), ); epoch_accounts_hash } @@ -5979,7 +5836,7 @@ impl Bank { test_hash_calculation: bool, skip_shrink: bool, force_clean: bool, - last_full_snapshot_slot: Slot, + latest_full_snapshot_slot: Slot, base: Option<(Slot, /*capitalization*/ u64)>, ) -> bool { let (_, clean_time_us) = measure_us!({ @@ -5991,9 +5848,8 @@ impl Bank { // that slot, then accounts could be removed from older storages, which would // change the accounts hash. self.rc.accounts.accounts_db.clean_accounts( - Some(last_full_snapshot_slot), + Some(latest_full_snapshot_slot), true, - Some(last_full_snapshot_slot), self.epoch_schedule(), ); info!("Cleaning... Done."); @@ -6008,8 +5864,9 @@ impl Bank { info!("Shrinking..."); self.rc.accounts.accounts_db.shrink_all_slots( true, - Some(last_full_snapshot_slot), self.epoch_schedule(), + // we cannot allow the snapshot slot to be shrunk + Some(self.slot()), ); info!("Shrinking... Done."); } else { @@ -6120,18 +5977,21 @@ impl Bank { &self, txs: &[SanitizedTransaction], execution_results: &[TransactionExecutionResult], - loaded_txs: &[TransactionLoadResult], ) { debug_assert_eq!(txs.len(), execution_results.len()); - debug_assert_eq!(txs.len(), loaded_txs.len()); let new_warmup_cooldown_rate_epoch = self.new_warmup_cooldown_rate_epoch(); - izip!(txs, execution_results, loaded_txs) - .filter(|(_, execution_result, _)| execution_result.was_executed_successfully()) - .flat_map(|(tx, _, load_result)| { - load_result.iter().flat_map(|loaded_transaction| { - let num_account_keys = tx.message().account_keys().len(); - loaded_transaction.accounts.iter().take(num_account_keys) - }) + txs.iter() + .zip(execution_results) + .filter_map(|(tx, execution_result)| { + execution_result + .executed_transaction() + .map(|executed_tx| (tx, executed_tx)) + }) + .filter(|(_, executed_tx)| executed_tx.was_successful()) + .flat_map(|(tx, executed_tx)| { + let num_account_keys = tx.message().account_keys().len(); + let loaded_tx = &executed_tx.loaded_transaction; + loaded_tx.accounts.iter().take(num_account_keys) }) .for_each(|(pubkey, account)| { // note that this could get timed to: self.rc.accounts.accounts_db.stats.stakes_cache_check_and_store_us, @@ -6276,7 +6136,7 @@ impl Bank { // // This fn is meant to be called by the snapshot handler in Accounts Background Service. If // calling from elsewhere, ensure the same invariants hold/expectations are met. - pub(crate) fn clean_accounts(&self, last_full_snapshot_slot: Option) { + pub(crate) fn clean_accounts(&self) { // Don't clean the slot we're snapshotting because it may have zero-lamport // accounts that were included in the bank delta hash when the bank was frozen, // and if we clean them here, any newly created snapshot's hash for this bank @@ -6288,7 +6148,6 @@ impl Bank { self.rc.accounts.accounts_db.clean_accounts( Some(highest_slot_to_clean), false, - last_full_snapshot_slot, self.epoch_schedule(), ); } @@ -6679,7 +6538,7 @@ impl Bank { return None; } - let (epoch_accounts_hash, measure) = measure!(self + let (epoch_accounts_hash, waiting_time_us) = measure_us!(self .rc .accounts .accounts_db @@ -6689,7 +6548,7 @@ impl Bank { datapoint_info!( "bank-get_epoch_accounts_hash_to_serialize", ("slot", self.slot(), i64), - ("waiting-time-us", measure.as_us(), i64), + ("waiting-time-us", waiting_time_us, i64), ); Some(epoch_accounts_hash) } diff --git a/runtime/src/bank/bank_hash_details.rs b/runtime/src/bank/bank_hash_details.rs index 9f82386a4e15cd..a6c449a50c72b8 100644 --- a/runtime/src/bank/bank_hash_details.rs +++ b/runtime/src/bank/bank_hash_details.rs @@ -18,7 +18,7 @@ use { hash::Hash, pubkey::Pubkey, }, - solana_svm::transaction_results::TransactionExecutionDetails, + solana_svm::transaction_execution_result::TransactionExecutionDetails, solana_transaction_status::UiInstruction, std::str::FromStr, }; @@ -69,6 +69,7 @@ impl BankHashDetails { #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize, Default)] pub struct TransactionDetails { + pub signature: String, pub index: usize, pub accounts: Vec, pub instructions: Vec, diff --git a/runtime/src/bank/builtins/prototypes.rs b/runtime/src/bank/builtins/prototypes.rs index 813d948a3bd1e5..403544d4469f97 100644 --- a/runtime/src/bank/builtins/prototypes.rs +++ b/runtime/src/bank/builtins/prototypes.rs @@ -23,24 +23,6 @@ impl std::fmt::Debug for BuiltinPrototype { } } -#[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -impl solana_frozen_abi::abi_example::AbiExample for BuiltinPrototype { - fn example() -> Self { - // BuiltinPrototype isn't serializable by definition. - solana_program_runtime::declare_process_instruction!(MockBuiltin, 0, |_invoke_context| { - // Do nothing - Ok(()) - }); - Self { - core_bpf_migration_config: None, - enable_feature_id: None, - program_id: Pubkey::default(), - name: "", - entrypoint: MockBuiltin::vm, - } - } -} - /// Transitions of stateless built-in programs at epoch boundaries when /// features are activated. /// These are built-in programs that don't actually exist, but their address diff --git a/runtime/src/bank/fee_distribution.rs b/runtime/src/bank/fee_distribution.rs index 671dba8ac2e9cb..8ac0f8e7338b31 100644 --- a/runtime/src/bank/fee_distribution.rs +++ b/runtime/src/bank/fee_distribution.rs @@ -4,10 +4,7 @@ use { log::{debug, warn}, solana_sdk::{ account::{ReadableAccount, WritableAccount}, - feature_set::{ - include_loaded_accounts_data_size_in_fee_calculation, - remove_rounding_in_fee_calculation, reward_full_priority_fee, - }, + feature_set::{remove_rounding_in_fee_calculation, reward_full_priority_fee}, fee::FeeBudgetLimits, pubkey::Pubkey, reward_info::RewardInfo, @@ -85,12 +82,11 @@ impl Bank { transaction: &SanitizedTransaction, fee_budget_limits: &FeeBudgetLimits, ) -> u64 { - let fee_details = self.fee_structure().calculate_fee_details( - transaction.message(), - self.get_lamports_per_signature(), - fee_budget_limits, - self.feature_set - .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + let fee_details = solana_fee::calculate_fee_details( + transaction, + self.get_lamports_per_signature() == 0, + self.fee_structure().lamports_per_signature, + fee_budget_limits.prioritization_fee, self.feature_set .is_active(&remove_rounding_in_fee_calculation::id()), ); @@ -227,7 +223,7 @@ impl Bank { None } else { total_staked += *staked; - Some((account.node_pubkey()?, *staked)) + Some((*account.node_pubkey()?, *staked)) } }) .collect::>(); diff --git a/runtime/src/bank/partitioned_epoch_rewards/mod.rs b/runtime/src/bank/partitioned_epoch_rewards/mod.rs index ea405dce8e1316..49622ba183a396 100644 --- a/runtime/src/bank/partitioned_epoch_rewards/mod.rs +++ b/runtime/src/bank/partitioned_epoch_rewards/mod.rs @@ -26,7 +26,6 @@ use { /// Distributing rewards to stake accounts begins AFTER this many blocks. const REWARD_CALCULATION_NUM_BLOCKS: u64 = 1; -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub(crate) struct PartitionedStakeReward { /// Stake account address @@ -55,7 +54,6 @@ impl PartitionedStakeReward { type PartitionedStakeRewards = Vec; -#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub(crate) struct StartBlockHeightAndRewards { /// the block height of the slot at which rewards distribution began @@ -65,7 +63,6 @@ pub(crate) struct StartBlockHeightAndRewards { } /// Represent whether bank is in the reward phase or not. -#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Default)] pub(crate) enum EpochRewardStatus { /// this bank is in the reward phase. diff --git a/sdk/src/recent_blockhashes_account.rs b/runtime/src/bank/recent_blockhashes_account.rs similarity index 72% rename from sdk/src/recent_blockhashes_account.rs rename to runtime/src/bank/recent_blockhashes_account.rs index 4235fc798a0b87..71815ef00f4e02 100644 --- a/sdk/src/recent_blockhashes_account.rs +++ b/runtime/src/bank/recent_blockhashes_account.rs @@ -1,29 +1,19 @@ //! Helpers for the recent blockhashes sysvar. #[allow(deprecated)] -use solana_program::sysvar::recent_blockhashes::{ +use solana_sdk::sysvar::recent_blockhashes::{ IntoIterSorted, IterItem, RecentBlockhashes, MAX_ENTRIES, }; use { - crate::{ - account::{ - create_account_shared_data_with_fields, to_account, AccountSharedData, - InheritableAccountFields, DUMMY_INHERITABLE_ACCOUNT_FIELDS, - }, - clock::INITIAL_RENT_EPOCH, + solana_sdk::account::{ + create_account_shared_data_with_fields, to_account, AccountSharedData, + InheritableAccountFields, }, std::{collections::BinaryHeap, iter::FromIterator}, }; -#[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" -)] #[allow(deprecated)] -pub fn update_account<'a, I>( - account: &mut AccountSharedData, - recent_blockhash_iter: I, -) -> Option<()> +fn update_account<'a, I>(account: &mut AccountSharedData, recent_blockhash_iter: I) -> Option<()> where I: IntoIterator>, { @@ -37,25 +27,8 @@ where to_account(&recent_blockhashes, account) } -#[deprecated( - since = "1.5.17", - note = "Please use `create_account_with_data_for_test` instead" -)] -#[allow(deprecated)] -pub fn create_account_with_data<'a, I>(lamports: u64, recent_blockhash_iter: I) -> AccountSharedData -where - I: IntoIterator>, -{ - #[allow(deprecated)] - create_account_with_data_and_fields(recent_blockhash_iter, (lamports, INITIAL_RENT_EPOCH)) -} - -#[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" -)] #[allow(deprecated)] -pub fn create_account_with_data_and_fields<'a, I>( +pub(in crate::bank) fn create_account_with_data_and_fields<'a, I>( recent_blockhash_iter: I, fields: InheritableAccountFields, ) -> AccountSharedData @@ -70,31 +43,26 @@ where account } -#[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" -)] -#[allow(deprecated)] -pub fn create_account_with_data_for_test<'a, I>(recent_blockhash_iter: I) -> AccountSharedData -where - I: IntoIterator>, -{ - create_account_with_data_and_fields(recent_blockhash_iter, DUMMY_INHERITABLE_ACCOUNT_FIELDS) -} - #[cfg(test)] mod tests { #![allow(deprecated)] use { super::*, - crate::account::from_account, rand::{seq::SliceRandom, thread_rng}, - solana_program::{ + solana_sdk::{ + account::{from_account, DUMMY_INHERITABLE_ACCOUNT_FIELDS}, hash::{Hash, HASH_BYTES}, sysvar::recent_blockhashes::Entry, }, }; + fn create_account_with_data_for_test<'a, I>(recent_blockhash_iter: I) -> AccountSharedData + where + I: IntoIterator>, + { + create_account_with_data_and_fields(recent_blockhash_iter, DUMMY_INHERITABLE_ACCOUNT_FIELDS) + } + #[test] fn test_create_account_empty() { let account = create_account_with_data_for_test(vec![]); diff --git a/runtime/src/bank/serde_snapshot.rs b/runtime/src/bank/serde_snapshot.rs index c4f716071064f1..2f633c0910b2d8 100644 --- a/runtime/src/bank/serde_snapshot.rs +++ b/runtime/src/bank/serde_snapshot.rs @@ -11,15 +11,15 @@ mod tests { genesis_utils::activate_all_features, runtime_config::RuntimeConfig, serde_snapshot::{ - self, BankIncrementalSnapshotPersistence, SerdeAccountsHash, - SerdeIncrementalAccountsHash, SnapshotStreams, + self, BankIncrementalSnapshotPersistence, ExtraFieldsToSerialize, + SerdeAccountsHash, SerdeIncrementalAccountsHash, SnapshotStreams, }, snapshot_bank_utils, snapshot_utils::{ create_tmp_accounts_dir_for_tests, get_storages_to_serialize, ArchiveFormat, StorageAndNextAccountsFileId, }, - stakes::Stakes, + stakes::{Stakes, StakesEnum}, }, solana_accounts_db::{ account_storage::{AccountStorageMap, AccountStorageReference}, @@ -37,8 +37,8 @@ mod tests { pubkey::Pubkey, stake::state::Stake, }, std::{ - collections::HashMap, io::{BufReader, BufWriter, Cursor}, + mem, ops::RangeFull, path::Path, sync::{atomic::Ordering, Arc}, @@ -59,8 +59,7 @@ mod tests { for storage_entry in storage_entries.into_iter() { // Copy file to new directory let storage_path = storage_entry.path(); - let file_name = - AccountsFile::file_name(storage_entry.slot(), storage_entry.append_vec_id()); + let file_name = AccountsFile::file_name(storage_entry.slot(), storage_entry.id()); let output_path = output_dir.as_ref().join(file_name); std::fs::copy(storage_path, &output_path)?; @@ -72,15 +71,15 @@ mod tests { )?; let new_storage_entry = AccountStorageEntry::new_existing( storage_entry.slot(), - storage_entry.append_vec_id(), + storage_entry.id(), accounts_file, num_accounts, ); - next_append_vec_id = next_append_vec_id.max(new_storage_entry.append_vec_id()); + next_append_vec_id = next_append_vec_id.max(new_storage_entry.id()); storage.insert( new_storage_entry.slot(), AccountStorageReference { - id: new_storage_entry.append_vec_id(), + id: new_storage_entry.id(), storage: Arc::new(new_storage_entry), }, ); @@ -125,7 +124,7 @@ mod tests { } else { 0 } + 2; - let bank2 = Bank::new_from_parent(bank0, &Pubkey::default(), bank2_slot); + let mut bank2 = Bank::new_from_parent(bank0, &Pubkey::default(), bank2_slot); // Test new account let key2 = Pubkey::new_unique(); @@ -159,21 +158,59 @@ mod tests { epoch_accounts_hash }); + // Only if a bank was recently recreated from a snapshot will it have an epoch stakes entry + // of type "delegations" which cannot be serialized into the versioned epoch stakes map. Simulate + // this condition by replacing the epoch 0 stakes map of stake accounts with an epoch stakes map + // of delegations. + { + assert_eq!(bank2.epoch_stakes.len(), 2); + assert!(bank2 + .epoch_stakes + .values() + .all(|epoch_stakes| matches!(epoch_stakes.stakes(), &StakesEnum::Accounts(_)))); + + let StakesEnum::Accounts(stake_accounts) = + bank2.epoch_stakes.remove(&0).unwrap().stakes().clone() + else { + panic!("expected the epoch 0 stakes entry to have stake accounts"); + }; + + bank2.epoch_stakes.insert( + 0, + EpochStakes::new(Arc::new(StakesEnum::Delegations(stake_accounts.into())), 0), + ); + } + let mut buf = Vec::new(); let cursor = Cursor::new(&mut buf); let mut writer = BufWriter::new(cursor); - serde_snapshot::serialize_bank_snapshot_into( - &mut writer, - bank2.get_fields_to_serialize(), - accounts_db.get_bank_hash_stats(bank2_slot).unwrap(), - accounts_db.get_accounts_delta_hash(bank2_slot).unwrap(), - expected_accounts_hash, - &get_storages_to_serialize(&bank2.get_snapshot_storages(None)), - expected_incremental_snapshot_persistence.as_ref(), - expected_epoch_accounts_hash, - accounts_db.write_version.load(Ordering::Acquire), - ) - .unwrap(); + { + let mut bank_fields = bank2.get_fields_to_serialize(); + // Ensure that epoch_stakes and versioned_epoch_stakes are each + // serialized with at least one entry to verify that epoch stakes + // entries are combined correctly during deserialization + assert!(!bank_fields.epoch_stakes.is_empty()); + assert!(!bank_fields.versioned_epoch_stakes.is_empty()); + + let versioned_epoch_stakes = mem::take(&mut bank_fields.versioned_epoch_stakes); + serde_snapshot::serialize_bank_snapshot_into( + &mut writer, + bank_fields, + accounts_db.get_bank_hash_stats(bank2_slot).unwrap(), + accounts_db.get_accounts_delta_hash(bank2_slot).unwrap(), + expected_accounts_hash, + &get_storages_to_serialize(&bank2.get_snapshot_storages(None)), + ExtraFieldsToSerialize { + lamports_per_signature: bank2.fee_rate_governor.lamports_per_signature, + incremental_snapshot_persistence: expected_incremental_snapshot_persistence + .as_ref(), + epoch_accounts_hash: expected_epoch_accounts_hash, + versioned_epoch_stakes, + }, + accounts_db.write_version.load(Ordering::Acquire), + ) + .unwrap(); + } drop(writer); // Now deserialize the serialized bank and ensure it matches the original bank @@ -228,14 +265,11 @@ mod tests { assert_eq!(dbank.get_accounts_hash(), Some(expected_accounts_hash)); assert_eq!(dbank.get_incremental_accounts_hash(), None); } - assert_eq!( - dbank.incremental_snapshot_persistence, - expected_incremental_snapshot_persistence, - ); assert_eq!( dbank.get_epoch_accounts_hash_to_serialize(), expected_epoch_accounts_hash, ); + assert_eq!(dbank, bank2); } @@ -266,6 +300,19 @@ mod tests { // Set extra fields bank.fee_rate_governor.lamports_per_signature = 7000; + // Note that epoch_stakes already has two epoch stakes entries for epochs 0 and 1 + // which will also be serialized to the versioned epoch stakes extra field. Those + // entries are of type Stakes so add a new entry for Stakes. + bank.epoch_stakes.insert( + 42, + EpochStakes::from(VersionedEpochStakes::Current { + stakes: Stakes::::default(), + total_stake: 42, + node_id_to_vote_accounts: Arc::::default(), + epoch_authorized_voters: Arc::::default(), + }), + ); + assert_eq!(bank.epoch_stakes.len(), 3); // Serialize let snapshot_storages = bank.get_snapshot_storages(None); @@ -279,18 +326,6 @@ mod tests { ) .unwrap(); - let mut new_epoch_stakes: HashMap = HashMap::new(); - new_epoch_stakes.insert( - 42, - VersionedEpochStakes::Current { - stakes: Stakes::::default(), - total_stake: 42, - node_id_to_vote_accounts: Arc::::default(), - epoch_authorized_voters: Arc::::default(), - }, - ); - bincode::serialize_into(&mut writer, &new_epoch_stakes).unwrap(); - // Deserialize let rdr = Cursor::new(&buf[..]); let mut reader = std::io::BufReader::new(&buf[rdr.position() as usize..]); @@ -324,13 +359,7 @@ mod tests { ) .unwrap(); - assert_eq!( - dbank.epoch_stakes(42), - Some(&EpochStakes::from( - new_epoch_stakes.get(&42).unwrap().clone() - )) - ); - + assert_eq!(bank.epoch_stakes, dbank.epoch_stakes); assert_eq!( bank.fee_rate_governor.lamports_per_signature, dbank.fee_rate_governor.lamports_per_signature @@ -473,34 +502,53 @@ mod tests { assert_eq!(dbank.epoch_reward_status, EpochRewardStatus::Inactive); } - #[cfg(RUSTC_WITH_SPECIALIZATION)] + #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] mod test_bank_serialize { use { super::*, solana_accounts_db::{ account_storage::meta::StoredMetaWriteVersion, accounts_db::BankHashStats, }, + solana_frozen_abi::abi_example::AbiExample, solana_sdk::clock::Slot, + std::marker::PhantomData, }; - // This some what long test harness is required to freeze the ABI of - // Bank's serialization due to versioned nature + // This some what long test harness is required to freeze the ABI of Bank's serialization, + // which is implemented manually by calling serialize_bank_snapshot_with() mainly based on + // get_fields_to_serialize(). However, note that Bank's serialization is coupled with + // snapshot storages as well. + // + // It was avoided to impl AbiExample for Bank by wrapping it around PhantomData inside the + // spcecial wrapper called BankAbiTestWrapper. And internally, it creates an actual bank + // from Bank::default_for_tests(). + // + // In this way, frozen abi can increase the coverage of the serialization code path as much + // as possible. Alternatively, we could derive AbiExample for the minimum set of actually + // serialized fields of bank as an ad-hoc tuple. But that was avoided to avoid maintenance + // burden instead. + // + // Involving the Bank here is preferred conceptually because snapshot abi is + // important and snapshot is just a (rooted) serialized bank at the high level. Only + // abi-freezing bank.get_fields_to_serialize() is kind of relying on the implementation + // detail. #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "6riNuebfnAUpS2e3GYb5G8udH5PoEtep48ULchLjRDCB") + frozen_abi(digest = "CeNFPePrUfgJT2GNr7zYfMQVuJwGyU46bz1Skq7hAPht") )] #[derive(Serialize)] pub struct BankAbiTestWrapper { #[serde(serialize_with = "wrapper")] - bank: Bank, + bank: PhantomData, } - pub fn wrapper(bank: &Bank, serializer: S) -> Result + pub fn wrapper(_bank: &PhantomData, serializer: S) -> Result where S: serde::Serializer, { - let snapshot_storages = bank.get_snapshot_storages(None); + let bank = Bank::default_for_tests(); + let snapshot_storages = AccountsDb::example().get_snapshot_storages(0..1).0; // ensure there is at least one snapshot storage example for ABI digesting assert!(!snapshot_storages.is_empty()); @@ -512,15 +560,21 @@ mod tests { incremental_capitalization: u64::default(), }; + let mut bank_fields = bank.get_fields_to_serialize(); + let versioned_epoch_stakes = std::mem::take(&mut bank_fields.versioned_epoch_stakes); serde_snapshot::serialize_bank_snapshot_with( serializer, - bank.get_fields_to_serialize(), + bank_fields, BankHashStats::default(), AccountsDeltaHash(Hash::new_unique()), AccountsHash(Hash::new_unique()), &get_storages_to_serialize(&snapshot_storages), - Some(&incremental_snapshot_persistence), - Some(EpochAccountsHash::new(Hash::new_unique())), + ExtraFieldsToSerialize { + lamports_per_signature: bank.fee_rate_governor.lamports_per_signature, + incremental_snapshot_persistence: Some(&incremental_snapshot_persistence), + epoch_accounts_hash: Some(EpochAccountsHash::new(Hash::new_unique())), + versioned_epoch_stakes, + }, StoredMetaWriteVersion::default(), ) } diff --git a/runtime/src/bank/sysvar_cache.rs b/runtime/src/bank/sysvar_cache.rs index e57fc50850df89..b350b6f37c018f 100644 --- a/runtime/src/bank/sysvar_cache.rs +++ b/runtime/src/bank/sysvar_cache.rs @@ -21,12 +21,10 @@ mod tests { let bank0_sysvar_cache = bank0.transaction_processor.sysvar_cache(); let bank0_cached_clock = bank0_sysvar_cache.get_clock(); let bank0_cached_epoch_schedule = bank0_sysvar_cache.get_epoch_schedule(); - let bank0_cached_fees = bank0_sysvar_cache.get_fees(); let bank0_cached_rent = bank0_sysvar_cache.get_rent(); assert!(bank0_cached_clock.is_ok()); assert!(bank0_cached_epoch_schedule.is_ok()); - assert!(bank0_cached_fees.is_ok()); assert!(bank0_cached_rent.is_ok()); assert!(bank0_sysvar_cache.get_slot_hashes().is_err()); assert!(bank0_sysvar_cache.get_epoch_rewards().is_err()); // partitioned epoch reward feature is not enabled @@ -41,19 +39,16 @@ mod tests { let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache(); let bank1_cached_clock = bank1_sysvar_cache.get_clock(); let bank1_cached_epoch_schedule = bank1_sysvar_cache.get_epoch_schedule(); - let bank1_cached_fees = bank1_sysvar_cache.get_fees(); let bank1_cached_rent = bank1_sysvar_cache.get_rent(); assert!(bank1_cached_clock.is_ok()); assert!(bank1_cached_epoch_schedule.is_ok()); - assert!(bank1_cached_fees.is_ok()); assert!(bank1_cached_rent.is_ok()); assert!(bank1_sysvar_cache.get_slot_hashes().is_ok()); assert!(bank1_sysvar_cache.get_epoch_rewards().is_err()); assert_ne!(bank0_cached_clock, bank1_cached_clock); assert_eq!(bank0_cached_epoch_schedule, bank1_cached_epoch_schedule); - assert_ne!(bank0_cached_fees, bank1_cached_fees); assert_eq!(bank0_cached_rent, bank1_cached_rent); let bank2_slot = bank1.slot() + 1; @@ -62,19 +57,16 @@ mod tests { let bank2_sysvar_cache = bank2.transaction_processor.sysvar_cache(); let bank2_cached_clock = bank2_sysvar_cache.get_clock(); let bank2_cached_epoch_schedule = bank2_sysvar_cache.get_epoch_schedule(); - let bank2_cached_fees = bank2_sysvar_cache.get_fees(); let bank2_cached_rent = bank2_sysvar_cache.get_rent(); assert!(bank2_cached_clock.is_ok()); assert!(bank2_cached_epoch_schedule.is_ok()); - assert!(bank2_cached_fees.is_ok()); assert!(bank2_cached_rent.is_ok()); assert!(bank2_sysvar_cache.get_slot_hashes().is_ok()); assert!(bank2_sysvar_cache.get_epoch_rewards().is_err()); // partitioned epoch reward feature is not enabled assert_ne!(bank1_cached_clock, bank2_cached_clock); assert_eq!(bank1_cached_epoch_schedule, bank2_cached_epoch_schedule); - assert_eq!(bank1_cached_fees, bank2_cached_fees); assert_eq!(bank1_cached_rent, bank2_cached_rent); assert_ne!( bank1_sysvar_cache.get_slot_hashes(), @@ -100,7 +92,6 @@ mod tests { assert!(bank1_cached_clock.is_ok()); assert!(bank1_cached_epoch_schedule.is_ok()); - assert!(bank1_cached_fees.is_ok()); assert!(bank1_cached_rent.is_ok()); assert!(bank1_cached_slot_hashes.is_ok()); assert!(bank1_cached_epoch_rewards.is_err()); @@ -111,7 +102,6 @@ mod tests { let bank1_sysvar_cache = bank1.transaction_processor.sysvar_cache(); assert!(bank1_sysvar_cache.get_clock().is_err()); assert!(bank1_sysvar_cache.get_epoch_schedule().is_err()); - assert!(bank1_sysvar_cache.get_fees().is_err()); assert!(bank1_sysvar_cache.get_rent().is_err()); assert!(bank1_sysvar_cache.get_slot_hashes().is_err()); assert!(bank1_sysvar_cache.get_epoch_rewards().is_err()); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index 6cf2bded8de9ee..c1d03382ee45d0 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -1,6 +1,4 @@ #![cfg(test)] -#[allow(deprecated)] -use solana_sdk::sysvar::fees::Fees; use { super::{ test_utils::{goto_end_of_slot, update_vote_account_timestamp}, @@ -36,7 +34,7 @@ use { }, solana_compute_budget::{ compute_budget::ComputeBudget, - compute_budget_processor::{self, MAX_COMPUTE_UNIT_LIMIT}, + compute_budget_limits::{self, MAX_COMPUTE_UNIT_LIMIT}, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, }, solana_inline_spl::token, @@ -44,7 +42,6 @@ use { solana_program_runtime::{ declare_process_instruction, loaded_programs::{ProgramCacheEntry, ProgramCacheEntryType}, - timings::ExecuteTimings, }, solana_sdk::{ account::{ @@ -103,7 +100,12 @@ use { transaction_context::TransactionAccount, }, solana_stake_program::stake_state::{self, StakeStateV2}, - solana_svm::nonce_info::NoncePartial, + solana_svm::{ + account_loader::LoadedTransaction, nonce_info::NonceInfo, + transaction_commit_result::TransactionCommitResultExtensions, + transaction_execution_result::ExecutedTransaction, + }, + solana_timings::ExecuteTimings, solana_vote_program::{ vote_instruction, vote_state::{ @@ -231,18 +233,21 @@ fn test_race_register_tick_freeze() { } fn new_execution_result(status: Result<()>, fee_details: FeeDetails) -> TransactionExecutionResult { - TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { + TransactionExecutionResult::Executed(Box::new(ExecutedTransaction { + loaded_transaction: LoadedTransaction { + fee_details, + ..LoadedTransaction::default() + }, + execution_details: TransactionExecutionDetails { status, log_messages: None, inner_instructions: None, - fee_details, return_data: None, executed_units: 0, accounts_data_len_delta: 0, }, programs_modified_by_tx: HashMap::new(), - } + })) } impl Bank { @@ -320,7 +325,7 @@ fn create_simple_test_arc_bank(lamports: u64) -> (Arc, Arc(&fees_account).unwrap(); - assert_eq!( - bank.fee_rate_governor.lamports_per_signature, - fees.fee_calculator.lamports_per_signature - ); - assert_eq!(fees.fee_calculator.lamports_per_signature, 12345); -} - #[test] fn test_is_delta_with_no_committables() { let (genesis_config, mint_keypair) = create_genesis_config(8000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); bank.is_delta.store(false, Relaxed); let keypair1 = Keypair::new(); @@ -4488,7 +4472,7 @@ fn test_get_filtered_indexed_accounts() { #[test] fn test_status_cache_ancestors() { solana_logger::setup(); - let parent = create_simple_test_arc_bank(500).0; + let (parent, _bank_forks) = create_simple_test_arc_bank(500); let bank1 = Arc::new(new_from_parent(parent)); let mut bank = bank1; for _ in 0..MAX_CACHE_ENTRIES * 2 { @@ -4549,7 +4533,7 @@ fn test_add_builtin() { bank.last_blockhash(), ); - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); assert_eq!( bank.process_transaction(&transaction), Err(TransactionError::InstructionError( @@ -4715,7 +4699,7 @@ fn test_add_instruction_processor_for_existing_unrelated_accounts() { #[allow(deprecated)] #[test] fn test_recent_blockhashes_sysvar() { - let mut bank = create_simple_test_arc_bank(500).0; + let (mut bank, _bank_forks) = create_simple_test_arc_bank(500); for i in 1..5 { let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); let recent_blockhashes = @@ -4733,7 +4717,7 @@ fn test_recent_blockhashes_sysvar() { #[allow(deprecated)] #[test] fn test_blockhash_queue_sysvar_consistency() { - let bank = create_simple_test_arc_bank(100_000).0; + let (bank, _bank_forks) = create_simple_test_arc_bank(100_000); goto_end_of_slot(bank.clone()); let bhq_account = bank.get_account(&sysvar::recent_blockhashes::id()).unwrap(); @@ -4962,7 +4946,7 @@ fn test_check_and_load_message_nonce_account_ok() { let nonce_data = get_nonce_data_from_account(&nonce_account).unwrap(); assert_eq!( bank.check_and_load_message_nonce_account(&message, &bank.next_durable_nonce()), - Some((NoncePartial::new(nonce_pubkey, nonce_account), nonce_data)) + Some((NonceInfo::new(nonce_pubkey, nonce_account), nonce_data)) ); } @@ -5085,7 +5069,7 @@ fn test_check_and_load_message_nonce_account_bad_tx_hash_fail() { #[test] fn test_assign_from_nonce_account_fail() { - let bank = create_simple_test_arc_bank(100_000_000).0; + let (bank, _bank_forks) = create_simple_test_arc_bank(100_000_000); let nonce = Keypair::new(); let nonce_account = AccountSharedData::new_data( 42_424_242, @@ -5111,7 +5095,7 @@ fn test_assign_from_nonce_account_fail() { fn test_nonce_must_be_advanceable() { let mut bank = create_simple_test_bank(100_000_000); bank.feature_set = Arc::new(FeatureSet::all_enabled()); - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); let nonce_keypair = Keypair::new(); let nonce_authority = nonce_keypair.pubkey(); let durable_nonce = DurableNonce::from_blockhash(&bank.last_blockhash()); @@ -5787,7 +5771,7 @@ fn test_check_ro_durable_nonce_fails() { #[test] fn test_collect_balances() { - let parent = create_simple_test_arc_bank(500).0; + let (parent, _bank_forks) = create_simple_test_arc_bank(500); let bank0 = Arc::new(new_from_parent(parent)); let keypair = Keypair::new(); @@ -5860,20 +5844,19 @@ fn test_pre_post_transaction_balances() { let txs = vec![tx0, tx1, tx2]; let lock_result = bank0.prepare_batch_for_tests(txs); - let (transaction_results, transaction_balances_set) = bank0 - .load_execute_and_commit_transactions( - &lock_result, - MAX_PROCESSING_AGE, - true, - ExecutionRecordingConfig::new_single_setting(false), - &mut ExecuteTimings::default(), - None, - ); + let (commit_results, transaction_balances_set) = bank0.load_execute_and_commit_transactions( + &lock_result, + MAX_PROCESSING_AGE, + true, + ExecutionRecordingConfig::new_single_setting(false), + &mut ExecuteTimings::default(), + None, + ); assert_eq!(transaction_balances_set.pre_balances.len(), 3); assert_eq!(transaction_balances_set.post_balances.len(), 3); - assert!(transaction_results.execution_results[0].was_executed_successfully()); + assert!(commit_results[0].was_executed_successfully()); assert_eq!( transaction_balances_set.pre_balances[0], vec![908_000, 911_000, 1] @@ -5885,27 +5868,18 @@ fn test_pre_post_transaction_balances() { // Failed transactions still produce balance sets // This is a TransactionError - not possible to charge fees - assert_matches!( - transaction_results.execution_results[1], - TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound) - ); + assert_matches!(commit_results[1], Err(TransactionError::AccountNotFound)); assert_eq!(transaction_balances_set.pre_balances[1], vec![0, 0, 1]); assert_eq!(transaction_balances_set.post_balances[1], vec![0, 0, 1]); // Failed transactions still produce balance sets // This is an InstructionError - fees charged - assert_matches!( - transaction_results.execution_results[2], - TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { - status: Err(TransactionError::InstructionError( - 0, - InstructionError::Custom(1), - )), - .. - }, - .. - } + assert_eq!( + commit_results[2].transaction_result(), + Err(TransactionError::InstructionError( + 0, + InstructionError::Custom(1), + )), ); assert_eq!( transaction_balances_set.pre_balances[2], @@ -5922,9 +5896,8 @@ fn test_transaction_with_duplicate_accounts_in_instruction() { let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(500); let mock_program_id = Pubkey::from([2u8; 32]); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) - .0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm); declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; @@ -5979,9 +5952,8 @@ fn test_transaction_with_program_ids_passed_to_programs() { let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee_no_rent(500); let mock_program_id = Pubkey::from([2u8; 32]); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) - .0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6082,12 +6054,11 @@ fn test_incinerator() { fn test_duplicate_account_key() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, solana_vote_program::id(), MockBuiltin::vm, - ) - .0; + ); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6114,12 +6085,11 @@ fn test_duplicate_account_key() { fn test_process_transaction_with_too_many_account_locks() { solana_logger::setup(); let (genesis_config, mint_keypair) = create_genesis_config(500); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, solana_vote_program::id(), MockBuiltin::vm, - ) - .0; + ); let from_pubkey = solana_sdk::pubkey::new_rand(); let to_pubkey = solana_sdk::pubkey::new_rand(); @@ -6250,7 +6220,7 @@ fn test_fuzz_instructions() { (key, name.as_bytes().to_vec()) }) .collect(); - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); let max_keys = 100; let keys: Vec<_> = (0..max_keys) .enumerate() @@ -6413,26 +6383,26 @@ fn test_bank_hash_consistency() { if bank.slot == 0 { assert_eq!( bank.hash().to_string(), - "i5hGiQ3WtEehNrvhbfPFkUdm267t18fSpujcYtkBioW", + "Hn2FoJuoFWXVFVnwcQ6peuT24mUPmhDtXHXVjKD7M4yP", ); } if bank.slot == 32 { assert_eq!( bank.hash().to_string(), - "7NmBtNvbhoqzatJv8NgBs84qWrm4ZhpuC75DCpbqwiS" + "7FPfwBut4b7bXtKPsobQS1cuFgF47SZHDb4teQcJRomv" ); } if bank.slot == 64 { assert_eq!( bank.hash().to_string(), - "A1jjuUaENeDcsSvwejFGaZ5zWmnJ77doSzqdKtfzpoFk" + "28CWiEuA3izdt5xe4LyS4Q1DTALmYgrVctSTazFiPVcW" ); } if bank.slot == 128 { assert_eq!( bank.hash().to_string(), - "ApnMkFt5Bs4yDJ8S2CCPsQRL1He6vWXw6vMzAyc5i811" + "AdCmEvRXWKpvXb9fG6AFQhzGgB5ciAXnDajvaNK7YUg8" ); break; } @@ -6453,9 +6423,8 @@ fn test_same_program_id_uses_unique_executable_accounts() { let (genesis_config, mint_keypair) = create_genesis_config(50000); let program1_pubkey = solana_sdk::pubkey::new_rand(); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, program1_pubkey, MockBuiltin::vm) - .0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program1_pubkey, MockBuiltin::vm); // Add a new program owned by the first let program2_pubkey = solana_sdk::pubkey::new_rand(); @@ -6655,7 +6624,7 @@ fn test_shrink_candidate_slots_cached() { // No more slots should be shrunk assert_eq!(bank2.shrink_candidate_slots(), 0); // alive_counts represents the count of alive accounts in the three slots 0,1,2 - assert_eq!(alive_counts, vec![15, 1, 7]); + assert_eq!(alive_counts, vec![15, 1, 6]); } #[test] @@ -6663,11 +6632,8 @@ fn test_add_builtin_no_overwrite() { let slot = 123; let program_id = solana_sdk::pubkey::new_rand(); - let mut bank = Arc::new(Bank::new_from_parent( - create_simple_test_arc_bank(100_000).0, - &Pubkey::default(), - slot, - )); + let (parent_bank, _bank_forks) = create_simple_test_arc_bank(100_000); + let mut bank = Arc::new(Bank::new_from_parent(parent_bank, &Pubkey::default(), slot)); assert_eq!(bank.get_account_modified_slot(&program_id), None); Arc::get_mut(&mut bank) @@ -6687,11 +6653,8 @@ fn test_add_builtin_loader_no_overwrite() { let slot = 123; let loader_id = solana_sdk::pubkey::new_rand(); - let mut bank = Arc::new(Bank::new_from_parent( - create_simple_test_arc_bank(100_000).0, - &Pubkey::default(), - slot, - )); + let (parent_bank, _bank_forks) = create_simple_test_arc_bank(100_000); + let mut bank = Arc::new(Bank::new_from_parent(parent_bank, &Pubkey::default(), slot)); assert_eq!(bank.get_account_modified_slot(&loader_id), None); Arc::get_mut(&mut bank) @@ -6857,11 +6820,8 @@ fn test_add_builtin_account_after_frozen() { let slot = 123; let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); - let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000).0, - &Pubkey::default(), - slot, - ); + let (parent_bank, _bank_forks) = create_simple_test_arc_bank(100_000); + let bank = Bank::new_from_parent(parent_bank, &Pubkey::default(), slot); bank.freeze(); bank.add_builtin_account("mock_program", &program_id); @@ -6988,11 +6948,8 @@ fn test_add_precompiled_account_after_frozen() { let slot = 123; let program_id = Pubkey::from_str("CiXgo2KHKSDmDnV1F6B69eWFgNAPiSBjjYvfB4cvRNre").unwrap(); - let bank = Bank::new_from_parent( - create_simple_test_arc_bank(100_000).0, - &Pubkey::default(), - slot, - ); + let (parent_bank, _bank_forks) = create_simple_test_arc_bank(100_000); + let bank = Bank::new_from_parent(parent_bank, &Pubkey::default(), slot); bank.freeze(); bank.add_precompiled_account(&program_id); @@ -7873,7 +7830,7 @@ fn test_bpf_loader_upgradeable_deploy_with_max_len() { #[test] fn test_compute_active_feature_set() { - let bank0 = create_simple_test_arc_bank(100_000).0; + let (bank0, _bank_forks) = create_simple_test_arc_bank(100_000); let mut bank = Bank::new_from_parent(bank0, &Pubkey::default(), 1); let test_feature = "TestFeature11111111111111111111111111111111" @@ -7924,7 +7881,7 @@ fn test_compute_active_feature_set() { #[test] fn test_reserved_account_keys() { - let bank0 = create_simple_test_arc_bank(100_000).0; + let (bank0, _bank_forks) = create_simple_test_arc_bank(100_000); let mut bank = Bank::new_from_parent(bank0, &Pubkey::default(), 1); bank.feature_set = Arc::new(FeatureSet::default()); @@ -8206,7 +8163,7 @@ fn test_timestamp_fast() { #[test] fn test_program_is_native_loader() { let (genesis_config, mint_keypair) = create_genesis_config(50000); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let tx = Transaction::new_signed_with_payer( &[Instruction::new_with_bincode( @@ -8543,7 +8500,7 @@ fn test_store_scan_consistency_root() { current_bank.squash(); if current_bank.slot() % 2 == 0 { current_bank.force_flush_accounts_cache(); - current_bank.clean_accounts(None); + current_bank.clean_accounts(); } prev_bank = current_bank.clone(); let slot = current_bank.slot() + 1; @@ -9231,7 +9188,7 @@ fn test_tx_log_order() { &Pubkey::new_unique(), bootstrap_validator_stake_lamports(), ); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); *bank.transaction_log_collector_config.write().unwrap() = TransactionLogCollectorConfig { mentioned_addresses: HashSet::new(), filter: TransactionLogCollectorFilter::All, @@ -9255,7 +9212,7 @@ fn test_tx_log_order() { let txs = vec![tx0, tx1, tx2]; let batch = bank.prepare_batch_for_tests(txs); - let execution_results = bank + let commit_results = bank .load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, @@ -9268,28 +9225,29 @@ fn test_tx_log_order() { &mut ExecuteTimings::default(), None, ) - .0 - .execution_results; + .0; - assert_eq!(execution_results.len(), 3); + assert_eq!(commit_results.len(), 3); - assert!(execution_results[0].details().is_some()); - assert!(execution_results[0] - .details() + assert!(commit_results[0].is_ok()); + assert!(commit_results[0] + .as_ref() .unwrap() + .execution_details .log_messages .as_ref() .unwrap()[1] .contains(&"success".to_string())); - assert!(execution_results[1].details().is_some()); - assert!(execution_results[1] - .details() + assert!(commit_results[1].is_ok()); + assert!(commit_results[1] + .as_ref() .unwrap() + .execution_details .log_messages .as_ref() .unwrap()[2] .contains(&"failed".to_string())); - assert!(!execution_results[2].was_executed()); + assert!(commit_results[2].is_err()); let stored_logs = &bank.transaction_log_collector.read().unwrap().logs; let success_log_info = stored_logs @@ -9321,9 +9279,8 @@ fn test_tx_return_data() { bootstrap_validator_stake_lamports(), ); let mock_program_id = Pubkey::from([2u8; 32]); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm) - .0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, mock_program_id, MockBuiltin::vm); declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let mock_program_id = Pubkey::from([2u8; 32]); @@ -9365,7 +9322,7 @@ fn test_tx_return_data() { blockhash, )]; let batch = bank.prepare_batch_for_tests(txs); - let return_data = bank + let commit_results = bank .load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, @@ -9378,10 +9335,11 @@ fn test_tx_return_data() { &mut ExecuteTimings::default(), None, ) - .0 - .execution_results[0] - .details() + .0; + let return_data = commit_results[0] + .as_ref() .unwrap() + .execution_details .return_data .clone(); if let Some(index) = index { @@ -9397,6 +9355,71 @@ fn test_tx_return_data() { } } +#[test] +fn test_load_and_execute_commit_transactions_rent_debits() { + let (mut genesis_config, mint_keypair) = create_genesis_config(sol_to_lamports(1.)); + genesis_config.rent = Rent::default(); + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); + let bank = Bank::new_from_parent( + bank, + &Pubkey::new_unique(), + genesis_config.epoch_schedule.get_first_slot_in_epoch(1), + ); + let amount = genesis_config.rent.minimum_balance(0); + + // Make sure that rent debits are tracked for successful transactions + { + let alice = Keypair::new(); + test_utils::deposit(&bank, &alice.pubkey(), amount - 1).unwrap(); + let tx = system_transaction::transfer( + &mint_keypair, + &alice.pubkey(), + amount, + genesis_config.hash(), + ); + + let batch = bank.prepare_batch_for_tests(vec![tx]); + let commit_result = bank + .load_execute_and_commit_transactions( + &batch, + MAX_PROCESSING_AGE, + false, + ExecutionRecordingConfig::new_single_setting(false), + &mut ExecuteTimings::default(), + None, + ) + .0 + .remove(0); + assert!(commit_result.is_ok()); + assert!(commit_result.was_executed_successfully()); + assert!(!commit_result.ok().unwrap().rent_debits.is_empty()); + } + + // Make sure that rent debits are ignored for failed transactions + { + let bob = Keypair::new(); + test_utils::deposit(&bank, &bob.pubkey(), amount - 1).unwrap(); + let tx = + system_transaction::transfer(&mint_keypair, &bob.pubkey(), 1, genesis_config.hash()); + + let batch = bank.prepare_batch_for_tests(vec![tx]); + let commit_result = bank + .load_execute_and_commit_transactions( + &batch, + MAX_PROCESSING_AGE, + false, + ExecutionRecordingConfig::new_single_setting(false), + &mut ExecuteTimings::default(), + None, + ) + .0 + .remove(0); + assert!(commit_result.is_ok()); + assert!(!commit_result.was_executed_successfully()); + assert!(commit_result.ok().unwrap().rent_debits.is_empty()); + } +} + #[test] fn test_get_largest_accounts() { let GenesisConfigInfo { genesis_config, .. } = @@ -9523,8 +9546,8 @@ fn test_transfer_sysvar() { ); let program_id = solana_sdk::pubkey::new_rand(); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm); declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let transaction_context = &invoke_context.transaction_context; @@ -9733,8 +9756,8 @@ fn test_compute_budget_program_noop() { bootstrap_validator_stake_lamports(), ); let program_id = solana_sdk::pubkey::new_rand(); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm); declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); @@ -9742,7 +9765,7 @@ fn test_compute_budget_program_noop() { *compute_budget, ComputeBudget { compute_unit_limit: u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT ), heap_size: 48 * 1024, ..ComputeBudget::default() @@ -9754,7 +9777,7 @@ fn test_compute_budget_program_noop() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -9778,8 +9801,8 @@ fn test_compute_request_instruction() { bootstrap_validator_stake_lamports(), ); let program_id = solana_sdk::pubkey::new_rand(); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm); declare_process_instruction!(MockBuiltin, 1, |invoke_context| { let compute_budget = invoke_context.get_compute_budget(); @@ -9787,7 +9810,7 @@ fn test_compute_request_instruction() { *compute_budget, ComputeBudget { compute_unit_limit: u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT ), heap_size: 48 * 1024, ..ComputeBudget::default() @@ -9799,7 +9822,7 @@ fn test_compute_request_instruction() { let message = Message::new( &[ ComputeBudgetInstruction::set_compute_unit_limit( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, ), ComputeBudgetInstruction::request_heap_frame(48 * 1024), Instruction::new_with_bincode(program_id, &0, vec![]), @@ -9824,8 +9847,8 @@ fn test_failed_compute_request_instruction() { ); let program_id = solana_sdk::pubkey::new_rand(); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm); let payer0_keypair = Keypair::new(); let payer1_keypair = Keypair::new(); @@ -9840,7 +9863,7 @@ fn test_failed_compute_request_instruction() { *compute_budget, ComputeBudget { compute_unit_limit: u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT + compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT ), heap_size: 48 * 1024, ..ComputeBudget::default() @@ -9997,7 +10020,7 @@ fn test_call_precomiled_program() { .. } = create_genesis_config_with_leader(42, &Pubkey::new_unique(), 42); activate_all_features(&mut genesis_config); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // libsecp256k1 // Since libsecp256k1 is still using the old version of rand, this test @@ -10061,11 +10084,17 @@ fn calculate_test_fee( lamports_per_signature: u64, fee_structure: &FeeStructure, ) -> u64 { - let budget_limits = process_compute_budget_instructions(message.program_instructions_iter()) - .unwrap_or_default() - .into(); - - fee_structure.calculate_fee(message, lamports_per_signature, &budget_limits, false, true) + let fee_budget_limits = FeeBudgetLimits::from( + process_compute_budget_instructions(message.program_instructions_iter()) + .unwrap_or_default(), + ); + solana_fee::calculate_fee( + message, + lamports_per_signature == 0, + fee_structure.lamports_per_signature, + fee_budget_limits.prioritization_fee, + true, + ) } #[test] @@ -10262,7 +10291,7 @@ fn test_an_empty_instruction_without_program() { let message = Message::new(&[ix], Some(&mint_keypair.pubkey())); let tx = Transaction::new(&[&mint_keypair], message, genesis_config.hash()); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); assert_eq!( bank.process_transaction(&tx).unwrap_err(), TransactionError::InstructionError(0, InstructionError::UnsupportedProgramId), @@ -10290,7 +10319,7 @@ fn test_accounts_data_size_with_good_transaction() { const ACCOUNT_SIZE: u64 = MAX_PERMITTED_DATA_LENGTH; let (genesis_config, mint_keypair) = create_genesis_config(1_000 * LAMPORTS_PER_SOL); let bank = Bank::new_for_tests(&genesis_config); - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); let transaction = system_transaction::create_account( &mint_keypair, &Keypair::new(), @@ -10331,7 +10360,7 @@ fn test_accounts_data_size_with_bad_transaction() { const ACCOUNT_SIZE: u64 = MAX_PERMITTED_DATA_LENGTH; let (genesis_config, _mint_keypair) = create_genesis_config(1_000 * LAMPORTS_PER_SOL); let bank = Bank::new_for_tests(&genesis_config); - let bank = bank.wrap_with_bank_forks_for_tests().0; + let (bank, _bank_forks) = bank.wrap_with_bank_forks_for_tests(); let transaction = system_transaction::create_account( &Keypair::new(), &Keypair::new(), @@ -10448,12 +10477,11 @@ fn test_invalid_rent_state_changes_existing_accounts() { ), ); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, mock_program_id, MockTransferBuiltin::vm, - ) - .0; + ); let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10535,12 +10563,11 @@ fn test_invalid_rent_state_changes_new_accounts() { let account_data_size = 100; let rent_exempt_minimum = genesis_config.rent.minimum_balance(account_data_size); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, mock_program_id, MockTransferBuiltin::vm, - ) - .0; + ); let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10598,12 +10625,11 @@ fn test_drained_created_account() { // Create legacy accounts of various kinds let created_keypair = Keypair::new(); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, mock_program_id, MockTransferBuiltin::vm, - ) - .0; + ); let recent_blockhash = bank.last_blockhash(); // Create and drain a small data size account @@ -10714,7 +10740,7 @@ fn test_rent_state_changes_sysvars() { Account::from(validator_vote_account), ); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); // Ensure transactions with sysvars succeed, even though sysvars appear RentPaying by balance let tx = Transaction::new_signed_with_payer( @@ -10757,7 +10783,7 @@ fn test_invalid_rent_state_changes_fee_payer() { Account::new(rent_exempt_minimum, 0, &system_program::id()), ); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let recent_blockhash = bank.last_blockhash(); let check_account_is_rent_exempt = |pubkey: &Pubkey| -> bool { @@ -10986,7 +11012,7 @@ fn test_rent_state_incinerator() { genesis_config.rent = Rent::default(); let rent_exempt_minimum = genesis_config.rent.minimum_balance(0); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); for amount in [rent_exempt_minimum - 1, rent_exempt_minimum] { bank.transfer(amount, &mint_keypair, &solana_sdk::incinerator::id()) @@ -11156,12 +11182,11 @@ fn test_resize_and_rent() { activate_all_features(&mut genesis_config); let mock_program_id = Pubkey::new_unique(); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, mock_program_id, MockReallocBuiltin::vm, - ) - .0; + ); let recent_blockhash = bank.last_blockhash(); @@ -11432,12 +11457,11 @@ fn test_accounts_data_size_and_resize_transactions() { .. } = genesis_utils::create_genesis_config(100 * LAMPORTS_PER_SOL); let mock_program_id = Pubkey::new_unique(); - let bank = Bank::new_with_mockup_builtin_for_tests( + let (bank, _bank_forks) = Bank::new_with_mockup_builtin_for_tests( &genesis_config, mock_program_id, MockReallocBuiltin::vm, - ) - .0; + ); let recent_blockhash = bank.last_blockhash(); @@ -11687,7 +11711,7 @@ fn test_cap_accounts_data_allocations_per_transaction() { / MAX_PERMITTED_DATA_LENGTH as usize; let (genesis_config, mint_keypair) = create_genesis_config(1_000_000 * LAMPORTS_PER_SOL); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mut instructions = Vec::new(); let mut keypairs = vec![mint_keypair.insecure_clone()]; @@ -11894,7 +11918,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() { fn test_is_in_slot_hashes_history() { use solana_sdk::slot_hashes::MAX_ENTRIES; - let bank0 = create_simple_test_arc_bank(1).0; + let (bank0, _bank_forks) = create_simple_test_arc_bank(1); assert!(!bank0.is_in_slot_hashes_history(&0)); assert!(!bank0.is_in_slot_hashes_history(&1)); let mut last_bank = bank0; @@ -12201,7 +12225,7 @@ fn test_squash_timing_add_assign() { #[test] fn test_system_instruction_allocate() { let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank); let data_len = 2; let amount = genesis_config.rent.minimum_balance(data_len); @@ -12283,7 +12307,7 @@ where ) .unwrap(); - // super fun time; callback chooses to .clean_accounts(None) or not + // super fun time; callback chooses to .clean_accounts() or not let slot = bank.slot() + 1; let bank = new_bank_from_parent_with_bank_forks(bank_forks.as_ref(), bank, &collector, slot); callback(&bank); @@ -12313,7 +12337,7 @@ fn test_create_zero_lamport_with_clean() { bank.force_flush_accounts_cache(); // do clean and assert that it actually did its job assert_eq!(4, bank.get_snapshot_storages(None).len()); - bank.clean_accounts(None); + bank.clean_accounts(); assert_eq!(3, bank.get_snapshot_storages(None).len()); }); } @@ -12328,7 +12352,7 @@ fn test_create_zero_lamport_without_clean() { #[test] fn test_system_instruction_assign_with_seed() { let (genesis_config, mint_keypair) = create_genesis_config_no_tx_fee(sol_to_lamports(1.0)); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank); let alice_keypair = Keypair::new(); @@ -12369,7 +12393,7 @@ fn test_system_instruction_unsigned_transaction() { let amount = genesis_config.rent.minimum_balance(0); // Fund to account to bypass AccountNotFound error - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank); bank_client .transfer_and_confirm(amount, &alice_keypair, &mallory_pubkey) @@ -12873,8 +12897,8 @@ fn test_get_accounts_for_bank_hash_details(skip_rewrites: bool) { fn test_failed_simulation_compute_units() { let (genesis_config, mint_keypair) = create_genesis_config(LAMPORTS_PER_SOL); let program_id = Pubkey::new_unique(); - let bank = - Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm).0; + let (bank, _bank_forks) = + Bank::new_with_mockup_builtin_for_tests(&genesis_config, program_id, MockBuiltin::vm); const TEST_UNITS: u64 = 10_000; const MOCK_BUILTIN_UNITS: u64 = 1; @@ -12908,14 +12932,12 @@ fn test_filter_program_errors_and_collect_fee_details() { let initial_payer_balance = 7_000; let tx_fee = 5000; let priority_fee = 1000; - let tx_fee_details = FeeDetails::new_for_tests(tx_fee, priority_fee, false); + let tx_fee_details = FeeDetails::new(tx_fee, priority_fee, false); let expected_collected_fee_details = CollectorFeeDetails { transaction_fee: 2 * tx_fee, priority_fee: 2 * priority_fee, }; - let expected_collect_results = vec![Err(TransactionError::AccountNotFound), Ok(()), Ok(())]; - let GenesisConfigInfo { genesis_config, mint_keypair, @@ -12935,7 +12957,7 @@ fn test_filter_program_errors_and_collect_fee_details() { ), ]; - let results = bank.filter_program_errors_and_collect_fee_details(&results); + bank.filter_program_errors_and_collect_fee_details(&results); assert_eq!( expected_collected_fee_details, @@ -12945,7 +12967,6 @@ fn test_filter_program_errors_and_collect_fee_details() { initial_payer_balance, bank.get_balance(&mint_keypair.pubkey()) ); - assert_eq!(expected_collect_results, results); } #[test] diff --git a/runtime/src/bank_client.rs b/runtime/src/bank_client.rs index e8c8f7774ec66e..9cac83470bc964 100644 --- a/runtime/src/bank_client.rs +++ b/runtime/src/bank_client.rs @@ -321,7 +321,7 @@ mod tests { let jane_doe_keypair = Keypair::new(); let jane_pubkey = jane_doe_keypair.pubkey(); let doe_keypairs = vec![&john_doe_keypair, &jane_doe_keypair]; - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank); let amount = genesis_config.rent.minimum_balance(0); diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index bbdd2d270a189d..3dd82c1fe85c98 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -134,7 +134,7 @@ impl BankForks { scheduler_pool: None, })); - root_bank.set_fork_graph_in_program_cache(bank_forks.clone()); + root_bank.set_fork_graph_in_program_cache(Arc::downgrade(&bank_forks)); bank_forks } @@ -751,6 +751,14 @@ mod tests { std::{sync::atomic::Ordering::Relaxed, time::Duration}, }; + #[test] + fn test_bank_forks_new_rw_arc_memory_leak() { + for _ in 0..1000 { + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); + } + } + #[test] fn test_bank_forks_new() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); diff --git a/runtime/src/bank_utils.rs b/runtime/src/bank_utils.rs index 0bafef1216193c..0395511008d87a 100644 --- a/runtime/src/bank_utils.rs +++ b/runtime/src/bank_utils.rs @@ -1,6 +1,10 @@ use { - crate::vote_sender_types::ReplayVoteSender, solana_sdk::transaction::SanitizedTransaction, - solana_svm::transaction_results::TransactionResults, solana_vote::vote_parser, + crate::vote_sender_types::ReplayVoteSender, + solana_sdk::transaction::SanitizedTransaction, + solana_svm::transaction_commit_result::{ + TransactionCommitResult, TransactionCommitResultExtensions, + }, + solana_vote::vote_parser, }; #[cfg(feature = "dev-context-only-utils")] use { @@ -37,18 +41,15 @@ pub fn setup_bank_and_vote_pubkeys_for_tests( pub fn find_and_send_votes( sanitized_txs: &[SanitizedTransaction], - tx_results: &TransactionResults, + commit_results: &[TransactionCommitResult], vote_sender: Option<&ReplayVoteSender>, ) { - let TransactionResults { - execution_results, .. - } = tx_results; if let Some(vote_sender) = vote_sender { sanitized_txs .iter() - .zip(execution_results.iter()) - .for_each(|(tx, result)| { - if tx.is_simple_vote_transaction() && result.was_executed_successfully() { + .zip(commit_results.iter()) + .for_each(|(tx, commit_result)| { + if tx.is_simple_vote_transaction() && commit_result.was_executed_successfully() { if let Some(parsed_vote) = vote_parser::parse_sanitized_vote_transaction(tx) { if parsed_vote.1.last_voted_slot().is_some() { let _ = vote_sender.send(parsed_vote); diff --git a/runtime/src/commitment.rs b/runtime/src/commitment.rs index 3f600a9401ae9b..632e4bda3d8505 100644 --- a/runtime/src/commitment.rs +++ b/runtime/src/commitment.rs @@ -111,16 +111,11 @@ impl BlockCommitmentCache { self.highest_confirmed_slot() } - #[allow(deprecated)] pub fn slot_with_commitment(&self, commitment_level: CommitmentLevel) -> Slot { match commitment_level { - CommitmentLevel::Recent | CommitmentLevel::Processed => self.slot(), - CommitmentLevel::Root => self.root(), - CommitmentLevel::Single => self.highest_confirmed_slot(), - CommitmentLevel::SingleGossip | CommitmentLevel::Confirmed => { - self.highest_gossip_confirmed_slot() - } - CommitmentLevel::Max | CommitmentLevel::Finalized => self.highest_super_majority_root(), + CommitmentLevel::Processed => self.slot(), + CommitmentLevel::Confirmed => self.highest_gossip_confirmed_slot(), + CommitmentLevel::Finalized => self.highest_super_majority_root(), } } diff --git a/runtime/src/compute_budget_details.rs b/runtime/src/compute_budget_details.rs deleted file mode 100644 index c14aa24c063538..00000000000000 --- a/runtime/src/compute_budget_details.rs +++ /dev/null @@ -1,183 +0,0 @@ -use { - solana_compute_budget::compute_budget_processor::process_compute_budget_instructions, - solana_sdk::{ - instruction::CompiledInstruction, - pubkey::Pubkey, - transaction::{SanitizedTransaction, SanitizedVersionedTransaction}, - }, -}; - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ComputeBudgetDetails { - pub compute_unit_price: u64, - pub compute_unit_limit: u64, -} - -pub trait GetComputeBudgetDetails { - fn get_compute_budget_details( - &self, - round_compute_unit_price_enabled: bool, - ) -> Option; - - fn process_compute_budget_instruction<'a>( - instructions: impl Iterator, - _round_compute_unit_price_enabled: bool, - ) -> Option { - let compute_budget_limits = process_compute_budget_instructions(instructions).ok()?; - Some(ComputeBudgetDetails { - compute_unit_price: compute_budget_limits.compute_unit_price, - compute_unit_limit: u64::from(compute_budget_limits.compute_unit_limit), - }) - } -} - -impl GetComputeBudgetDetails for SanitizedVersionedTransaction { - fn get_compute_budget_details( - &self, - round_compute_unit_price_enabled: bool, - ) -> Option { - Self::process_compute_budget_instruction( - self.get_message().program_instructions_iter(), - round_compute_unit_price_enabled, - ) - } -} - -impl GetComputeBudgetDetails for SanitizedTransaction { - fn get_compute_budget_details( - &self, - round_compute_unit_price_enabled: bool, - ) -> Option { - Self::process_compute_budget_instruction( - self.message().program_instructions_iter(), - round_compute_unit_price_enabled, - ) - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - solana_sdk::{ - compute_budget::ComputeBudgetInstruction, - message::Message, - pubkey::Pubkey, - signature::{Keypair, Signer}, - system_instruction, - transaction::{Transaction, VersionedTransaction}, - }, - }; - - #[test] - fn test_get_compute_budget_details_with_valid_request_heap_frame_tx() { - let keypair = Keypair::new(); - let transaction = Transaction::new_unsigned(Message::new( - &[ - system_instruction::transfer(&keypair.pubkey(), &Pubkey::new_unique(), 1), - ComputeBudgetInstruction::request_heap_frame(32 * 1024), - ], - Some(&keypair.pubkey()), - )); - - // assert for SanitizedVersionedTransaction - let versioned_transaction = VersionedTransaction::from(transaction.clone()); - let sanitized_versioned_transaction = - SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); - assert_eq!( - sanitized_versioned_transaction.get_compute_budget_details(false), - Some(ComputeBudgetDetails { - compute_unit_price: 0, - compute_unit_limit: - solana_compute_budget::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, - }) - ); - - // assert for SanitizedTransaction - let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); - assert_eq!( - sanitized_transaction.get_compute_budget_details(false), - Some(ComputeBudgetDetails { - compute_unit_price: 0, - compute_unit_limit: - solana_compute_budget::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, - }) - ); - } - - #[test] - fn test_get_compute_budget_details_with_valid_set_compute_units_limit() { - let requested_cu = 101u32; - let keypair = Keypair::new(); - let transaction = Transaction::new_unsigned(Message::new( - &[ - system_instruction::transfer(&keypair.pubkey(), &Pubkey::new_unique(), 1), - ComputeBudgetInstruction::set_compute_unit_limit(requested_cu), - ], - Some(&keypair.pubkey()), - )); - - // assert for SanitizedVersionedTransaction - let versioned_transaction = VersionedTransaction::from(transaction.clone()); - let sanitized_versioned_transaction = - SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); - assert_eq!( - sanitized_versioned_transaction.get_compute_budget_details(false), - Some(ComputeBudgetDetails { - compute_unit_price: 0, - compute_unit_limit: requested_cu as u64, - }) - ); - - // assert for SanitizedTransaction - let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); - assert_eq!( - sanitized_transaction.get_compute_budget_details(false), - Some(ComputeBudgetDetails { - compute_unit_price: 0, - compute_unit_limit: requested_cu as u64, - }) - ); - } - - #[test] - fn test_get_compute_budget_details_with_valid_set_compute_unit_price() { - let requested_price = 1_000; - let keypair = Keypair::new(); - let transaction = Transaction::new_unsigned(Message::new( - &[ - system_instruction::transfer(&keypair.pubkey(), &Pubkey::new_unique(), 1), - ComputeBudgetInstruction::set_compute_unit_price(requested_price), - ], - Some(&keypair.pubkey()), - )); - - // assert for SanitizedVersionedTransaction - let versioned_transaction = VersionedTransaction::from(transaction.clone()); - let sanitized_versioned_transaction = - SanitizedVersionedTransaction::try_new(versioned_transaction).unwrap(); - assert_eq!( - sanitized_versioned_transaction.get_compute_budget_details(false), - Some(ComputeBudgetDetails { - compute_unit_price: requested_price, - compute_unit_limit: - solana_compute_budget::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, - }) - ); - - // assert for SanitizedTransaction - let sanitized_transaction = SanitizedTransaction::from_transaction_for_tests(transaction); - assert_eq!( - sanitized_transaction.get_compute_budget_details(false), - Some(ComputeBudgetDetails { - compute_unit_price: requested_price, - compute_unit_limit: - solana_compute_budget::compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT - as u64, - }) - ); - } -} diff --git a/runtime/src/epoch_stakes.rs b/runtime/src/epoch_stakes.rs index 27fb9971531be8..1cfcf76f6146cf 100644 --- a/runtime/src/epoch_stakes.rs +++ b/runtime/src/epoch_stakes.rs @@ -130,8 +130,9 @@ impl EpochStakes { } } +#[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub(crate) enum VersionedEpochStakes { +pub enum VersionedEpochStakes { Current { stakes: Stakes, total_stake: u64, @@ -158,14 +159,76 @@ impl From for EpochStakes { } } +/// Only the `StakesEnum::Delegations` variant is unable to be serialized as a +/// `StakesEnum::Stakes` variant, so leave those entries and split off the other +/// epoch stakes enum variants into a new map which will be serialized into the +/// new `versioned_epoch_stakes` snapshot field. After a cluster transitions to +/// serializing epoch stakes in the new format, `StakesEnum::Delegations` +/// variants for recent epochs will no longer be created and can be deprecated. +pub(crate) fn split_epoch_stakes( + bank_epoch_stakes: HashMap, +) -> ( + HashMap, + HashMap, +) { + let mut old_epoch_stakes = HashMap::new(); + let mut versioned_epoch_stakes = HashMap::new(); + for (epoch, epoch_stakes) in bank_epoch_stakes.into_iter() { + let EpochStakes { + stakes, + total_stake, + node_id_to_vote_accounts, + epoch_authorized_voters, + } = epoch_stakes; + match stakes.as_ref() { + StakesEnum::Delegations(_) => { + old_epoch_stakes.insert( + epoch, + EpochStakes { + stakes: stakes.clone(), + total_stake, + node_id_to_vote_accounts, + epoch_authorized_voters, + }, + ); + } + StakesEnum::Accounts(stakes) => { + versioned_epoch_stakes.insert( + epoch, + VersionedEpochStakes::Current { + stakes: Stakes::::from(stakes.clone()), + total_stake, + node_id_to_vote_accounts, + epoch_authorized_voters, + }, + ); + } + StakesEnum::Stakes(stakes) => { + versioned_epoch_stakes.insert( + epoch, + VersionedEpochStakes::Current { + stakes: stakes.clone(), + total_stake, + node_id_to_vote_accounts, + epoch_authorized_voters, + }, + ); + } + } + } + (old_epoch_stakes, versioned_epoch_stakes) +} + #[cfg(test)] pub(crate) mod tests { use { super::*, + crate::{stake_account::StakeAccount, stakes::StakesCache}, im::HashMap as ImHashMap, - solana_sdk::account::AccountSharedData, + solana_sdk::{account::AccountSharedData, rent::Rent}, + solana_stake_program::stake_state::{self, Delegation}, solana_vote::vote_account::{VoteAccount, VoteAccounts}, - solana_vote_program::vote_state::create_account_with_authorized, + solana_vote_program::vote_state::{self, create_account_with_authorized}, std::iter, }; @@ -284,6 +347,169 @@ pub(crate) mod tests { ); } + fn create_test_stakes() -> Stakes> { + let stakes_cache = StakesCache::new(Stakes::default()); + + let vote_pubkey = Pubkey::new_unique(); + let vote_account = vote_state::create_account_with_authorized( + &Pubkey::new_unique(), + &Pubkey::new_unique(), + &Pubkey::new_unique(), + 0, + 1, + ); + + let stake = 1_000_000_000; + let stake_pubkey = Pubkey::new_unique(); + let stake_account = stake_state::create_account( + &Pubkey::new_unique(), + &vote_pubkey, + &vote_account, + &Rent::default(), + stake, + ); + + stakes_cache.check_and_store(&vote_pubkey, &vote_account, None); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, None); + + let stakes = Stakes::clone(&stakes_cache.stakes()); + + stakes + } + + #[test] + fn test_split_epoch_stakes_empty() { + let bank_epoch_stakes = HashMap::new(); + let (old, versioned) = split_epoch_stakes(bank_epoch_stakes); + assert!(old.is_empty()); + assert!(versioned.is_empty()); + } + + #[test] + fn test_split_epoch_stakes_delegations() { + let mut bank_epoch_stakes = HashMap::new(); + let epoch = 0; + let stakes = Arc::new(StakesEnum::Delegations(create_test_stakes().into())); + let epoch_stakes = EpochStakes { + stakes, + total_stake: 100, + node_id_to_vote_accounts: Arc::new(HashMap::new()), + epoch_authorized_voters: Arc::new(HashMap::new()), + }; + bank_epoch_stakes.insert(epoch, epoch_stakes.clone()); + + let (old, versioned) = split_epoch_stakes(bank_epoch_stakes); + + assert_eq!(old.len(), 1); + assert_eq!(old.get(&epoch), Some(&epoch_stakes)); + assert!(versioned.is_empty()); + } + + #[test] + fn test_split_epoch_stakes_accounts() { + let mut bank_epoch_stakes = HashMap::new(); + let epoch = 0; + let test_stakes = create_test_stakes(); + let stakes = Arc::new(StakesEnum::Accounts(test_stakes.clone())); + let epoch_stakes = EpochStakes { + stakes, + total_stake: 100, + node_id_to_vote_accounts: Arc::new(HashMap::new()), + epoch_authorized_voters: Arc::new(HashMap::new()), + }; + bank_epoch_stakes.insert(epoch, epoch_stakes.clone()); + + let (old, versioned) = split_epoch_stakes(bank_epoch_stakes); + + assert!(old.is_empty()); + assert_eq!(versioned.len(), 1); + assert_eq!( + versioned.get(&epoch), + Some(&VersionedEpochStakes::Current { + stakes: Stakes::::from(test_stakes), + total_stake: epoch_stakes.total_stake, + node_id_to_vote_accounts: epoch_stakes.node_id_to_vote_accounts, + epoch_authorized_voters: epoch_stakes.epoch_authorized_voters, + }) + ); + } + + #[test] + fn test_split_epoch_stakes_stakes() { + let mut bank_epoch_stakes = HashMap::new(); + let epoch = 0; + let test_stakes: Stakes = create_test_stakes().into(); + let stakes = Arc::new(StakesEnum::Stakes(test_stakes.clone())); + let epoch_stakes = EpochStakes { + stakes, + total_stake: 100, + node_id_to_vote_accounts: Arc::new(HashMap::new()), + epoch_authorized_voters: Arc::new(HashMap::new()), + }; + bank_epoch_stakes.insert(epoch, epoch_stakes.clone()); + + let (old, versioned) = split_epoch_stakes(bank_epoch_stakes); + + assert!(old.is_empty()); + assert_eq!(versioned.len(), 1); + assert_eq!( + versioned.get(&epoch), + Some(&VersionedEpochStakes::Current { + stakes: test_stakes, + total_stake: epoch_stakes.total_stake, + node_id_to_vote_accounts: epoch_stakes.node_id_to_vote_accounts, + epoch_authorized_voters: epoch_stakes.epoch_authorized_voters, + }) + ); + } + + #[test] + fn test_split_epoch_stakes_mixed() { + let mut bank_epoch_stakes = HashMap::new(); + + // Delegations + let epoch1 = 0; + let stakes1 = Arc::new(StakesEnum::Delegations(Stakes::default())); + let epoch_stakes1 = EpochStakes { + stakes: stakes1, + total_stake: 100, + node_id_to_vote_accounts: Arc::new(HashMap::new()), + epoch_authorized_voters: Arc::new(HashMap::new()), + }; + bank_epoch_stakes.insert(epoch1, epoch_stakes1); + + // Accounts + let epoch2 = 1; + let stakes2 = Arc::new(StakesEnum::Accounts(Stakes::default())); + let epoch_stakes2 = EpochStakes { + stakes: stakes2, + total_stake: 200, + node_id_to_vote_accounts: Arc::new(HashMap::new()), + epoch_authorized_voters: Arc::new(HashMap::new()), + }; + bank_epoch_stakes.insert(epoch2, epoch_stakes2); + + // Stakes + let epoch3 = 2; + let stakes3 = Arc::new(StakesEnum::Stakes(Stakes::default())); + let epoch_stakes3 = EpochStakes { + stakes: stakes3, + total_stake: 300, + node_id_to_vote_accounts: Arc::new(HashMap::new()), + epoch_authorized_voters: Arc::new(HashMap::new()), + }; + bank_epoch_stakes.insert(epoch3, epoch_stakes3); + + let (old, versioned) = split_epoch_stakes(bank_epoch_stakes); + + assert_eq!(old.len(), 1); + assert!(old.contains_key(&epoch1)); + + assert_eq!(versioned.len(), 2); + assert!(versioned.contains_key(&epoch2)); + assert!(versioned.contains_key(&epoch3)); + } + #[test] fn test_epoch_stakes() { let num_nodes = 10; @@ -314,5 +540,5 @@ pub(crate) mod tests { Some(*stake * num_vote_accounts_per_node as u64) ); } - } + } } diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 379750b1743381..aac8f56a75b699 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -1,8 +1,9 @@ use { + log::*, solana_sdk::{ account::{Account, AccountSharedData}, feature::{self, Feature}, - feature_set::FeatureSet, + feature_set::{FeatureSet, FEATURE_NAMES}, fee_calculator::FeeRateGovernor, genesis_config::{ClusterType, GenesisConfig}, native_token::sol_to_lamports, @@ -29,15 +30,13 @@ pub fn bootstrap_validator_stake_lamports() -> u64 { pub const fn genesis_sysvar_and_builtin_program_lamports() -> u64 { const NUM_BUILTIN_PROGRAMS: u64 = 9; const NUM_PRECOMPILES: u64 = 2; - const FEES_SYSVAR_MIN_BALANCE: u64 = 946_560; const STAKE_HISTORY_MIN_BALANCE: u64 = 114_979_200; const CLOCK_SYSVAR_MIN_BALANCE: u64 = 1_169_280; const RENT_SYSVAR_MIN_BALANCE: u64 = 1_009_200; const EPOCH_SCHEDULE_SYSVAR_MIN_BALANCE: u64 = 1_120_560; const RECENT_BLOCKHASHES_SYSVAR_MIN_BALANCE: u64 = 42_706_560; - FEES_SYSVAR_MIN_BALANCE - + STAKE_HISTORY_MIN_BALANCE + STAKE_HISTORY_MIN_BALANCE + CLOCK_SYSVAR_MIN_BALANCE + RENT_SYSVAR_MIN_BALANCE + EPOCH_SCHEDULE_SYSVAR_MIN_BALANCE @@ -202,6 +201,23 @@ pub fn activate_all_features(genesis_config: &mut GenesisConfig) { } } +pub fn deactivate_features( + genesis_config: &mut GenesisConfig, + features_to_deactivate: &Vec, +) { + // Remove all features in `features_to_skip` from genesis + for deactivate_feature_pk in features_to_deactivate { + if FEATURE_NAMES.contains_key(deactivate_feature_pk) { + genesis_config.accounts.remove(deactivate_feature_pk); + } else { + warn!( + "Feature {:?} set for deactivation is not a known Feature public key", + deactivate_feature_pk + ); + } + } +} + pub fn activate_feature(genesis_config: &mut GenesisConfig, feature_id: Pubkey) { genesis_config.accounts.insert( feature_id, diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index aeededab8ee784..ee04859e3ea15e 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -23,12 +23,12 @@ use { crate::bank::Bank, log::*, - solana_program_runtime::timings::ExecuteTimings, solana_sdk::{ clock::Slot, hash::Hash, transaction::{Result, SanitizedTransaction, TransactionError}, }, + solana_timings::ExecuteTimings, std::{ fmt::{self, Debug}, mem, diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 0261eee74ab3be..8e9f4b4c82d931 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -10,7 +10,6 @@ pub mod bank_client; pub mod bank_forks; pub mod bank_utils; pub mod commitment; -pub mod compute_budget_details; pub mod epoch_stakes; pub mod genesis_utils; pub mod installed_scheduler_pool; diff --git a/runtime/src/prioritization_fee.rs b/runtime/src/prioritization_fee.rs index 45425059f98c15..b0e3480c4a6569 100644 --- a/runtime/src/prioritization_fee.rs +++ b/runtime/src/prioritization_fee.rs @@ -1,5 +1,5 @@ use { - solana_measure::measure, + solana_measure::measure_us, solana_sdk::{clock::Slot, pubkey::Pubkey, saturating_add_assign}, std::collections::HashMap, }; @@ -166,35 +166,31 @@ impl Default for PrioritizationFee { impl PrioritizationFee { /// Update self for minimum transaction fee in the block and minimum fee for each writable account. pub fn update(&mut self, transaction_fee: u64, writable_accounts: Vec) { - let (_, update_time) = measure!( - { - if !self.is_finalized { - if transaction_fee < self.min_transaction_fee { - self.min_transaction_fee = transaction_fee; - } - - for write_account in writable_accounts { - self.min_writable_account_fees - .entry(write_account) - .and_modify(|write_lock_fee| { - *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) - }) - .or_insert(transaction_fee); - } - - self.metrics - .accumulate_total_prioritization_fee(transaction_fee); - self.metrics.update_prioritization_fee(transaction_fee); - } else { - self.metrics - .increment_attempted_update_on_finalized_fee_count(1); + let (_, update_us) = measure_us!({ + if !self.is_finalized { + if transaction_fee < self.min_transaction_fee { + self.min_transaction_fee = transaction_fee; } - }, - "update_time", - ); - self.metrics - .accumulate_total_update_elapsed_us(update_time.as_us()); + for write_account in writable_accounts { + self.min_writable_account_fees + .entry(write_account) + .and_modify(|write_lock_fee| { + *write_lock_fee = std::cmp::min(*write_lock_fee, transaction_fee) + }) + .or_insert(transaction_fee); + } + + self.metrics + .accumulate_total_prioritization_fee(transaction_fee); + self.metrics.update_prioritization_fee(transaction_fee); + } else { + self.metrics + .increment_attempted_update_on_finalized_fee_count(1); + } + }); + + self.metrics.accumulate_total_update_elapsed_us(update_us); } /// Accounts that have minimum fees lesser or equal to the minimum fee in the block are redundant, they are diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index 796fafbb41b62a..332449e81c04c1 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -1,8 +1,9 @@ use { - crate::{bank::Bank, compute_budget_details::GetComputeBudgetDetails, prioritization_fee::*}, + crate::{bank::Bank, prioritization_fee::*}, crossbeam_channel::{unbounded, Receiver, Sender}, log::*, - solana_measure::measure, + solana_measure::measure_us, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ clock::{BankId, Slot}, pubkey::Pubkey, @@ -193,59 +194,56 @@ impl PrioritizationFeeCache { /// transactions have both valid compute_budget_details and account_locks will be used to update /// fee_cache asynchronously. pub fn update<'a>(&self, bank: &Bank, txs: impl Iterator) { - let (_, send_updates_time) = measure!( - { - for sanitized_transaction in txs { - // Vote transactions are not prioritized, therefore they are excluded from - // updating fee_cache. - if sanitized_transaction.is_simple_vote_transaction() { - continue; - } - - let round_compute_unit_price_enabled = false; // TODO: bank.feture_set.is_active(round_compute_unit_price) - let compute_budget_details = sanitized_transaction - .get_compute_budget_details(round_compute_unit_price_enabled); - let account_locks = sanitized_transaction - .get_account_locks(bank.get_transaction_account_lock_limit()); + let (_, send_updates_us) = measure_us!({ + for sanitized_transaction in txs { + // Vote transactions are not prioritized, therefore they are excluded from + // updating fee_cache. + if sanitized_transaction.is_simple_vote_transaction() { + continue; + } - if compute_budget_details.is_none() || account_locks.is_err() { - continue; - } - let compute_budget_details = compute_budget_details.unwrap(); + let compute_budget_limits = process_compute_budget_instructions( + sanitized_transaction.message().program_instructions_iter(), + ); + let account_locks = sanitized_transaction + .get_account_locks(bank.get_transaction_account_lock_limit()); - // filter out any transaction that requests zero compute_unit_limit - // since its priority fee amount is not instructive - if compute_budget_details.compute_unit_limit == 0 { - continue; - } + if compute_budget_limits.is_err() || account_locks.is_err() { + continue; + } + let compute_budget_limits = compute_budget_limits.unwrap(); - let writable_accounts = account_locks - .unwrap() - .writable - .iter() - .map(|key| **key) - .collect::>(); - - self.sender - .send(CacheServiceUpdate::TransactionUpdate { - slot: bank.slot(), - bank_id: bank.bank_id(), - transaction_fee: compute_budget_details.compute_unit_price, - writable_accounts, - }) - .unwrap_or_else(|err| { - warn!( - "prioritization fee cache transaction updates failed: {:?}", - err - ); - }); + // filter out any transaction that requests zero compute_unit_limit + // since its priority fee amount is not instructive + if compute_budget_limits.compute_unit_limit == 0 { + continue; } - }, - "send_updates", - ); + + let writable_accounts = account_locks + .unwrap() + .writable + .iter() + .map(|key| **key) + .collect::>(); + + self.sender + .send(CacheServiceUpdate::TransactionUpdate { + slot: bank.slot(), + bank_id: bank.bank_id(), + transaction_fee: compute_budget_limits.compute_unit_price, + writable_accounts, + }) + .unwrap_or_else(|err| { + warn!( + "prioritization fee cache transaction updates failed: {:?}", + err + ); + }); + } + }); self.metrics - .accumulate_total_update_elapsed_us(send_updates_time.as_us()); + .accumulate_total_update_elapsed_us(send_updates_us); } /// Finalize prioritization fee when it's bank is completely replayed from blockstore, @@ -270,18 +268,13 @@ impl PrioritizationFeeCache { writable_accounts: Vec, metrics: &PrioritizationFeeCacheMetrics, ) { - let (_, entry_update_time) = measure!( - { - unfinalized - .entry(slot) - .or_default() - .entry(bank_id) - .or_default() - .update(transaction_fee, writable_accounts); - }, - "entry_update_time" - ); - metrics.accumulate_total_entry_update_elapsed_us(entry_update_time.as_us()); + let (_, entry_update_us) = measure_us!(unfinalized + .entry(slot) + .or_default() + .entry(bank_id) + .or_default() + .update(transaction_fee, writable_accounts)); + metrics.accumulate_total_entry_update_elapsed_us(entry_update_us); metrics.accumulate_successful_transaction_update_count(1); } @@ -300,57 +293,51 @@ impl PrioritizationFeeCache { // prune cache by evicting write account entry from prioritization fee if its fee is less // or equal to block's minimum transaction fee, because they are irrelevant in calculating // block minimum fee. - let (slot_prioritization_fee, slot_finalize_time) = measure!( - { - // remove unfinalized slots - *unfinalized = unfinalized - .split_off(&slot.checked_sub(MAX_UNFINALIZED_SLOTS).unwrap_or_default()); - - let Some(mut slot_prioritization_fee) = unfinalized.remove(&slot) else { - return; - }; - - // Only retain priority fee reported from optimistically confirmed bank - let pre_purge_bank_count = slot_prioritization_fee.len() as u64; - let mut prioritization_fee = slot_prioritization_fee.remove(&bank_id); - let post_purge_bank_count = prioritization_fee.as_ref().map(|_| 1).unwrap_or(0); - metrics.accumulate_total_purged_duplicated_bank_count( - pre_purge_bank_count.saturating_sub(post_purge_bank_count), - ); - // It should be rare that optimistically confirmed bank had no prioritized - // transactions, but duplicated and unconfirmed bank had. - if pre_purge_bank_count > 0 && post_purge_bank_count == 0 { - warn!("Finalized bank has empty prioritization fee cache. slot {slot} bank id {bank_id}"); - } + let (slot_prioritization_fee, slot_finalize_us) = measure_us!({ + // remove unfinalized slots + *unfinalized = + unfinalized.split_off(&slot.checked_sub(MAX_UNFINALIZED_SLOTS).unwrap_or_default()); + + let Some(mut slot_prioritization_fee) = unfinalized.remove(&slot) else { + return; + }; + + // Only retain priority fee reported from optimistically confirmed bank + let pre_purge_bank_count = slot_prioritization_fee.len() as u64; + let mut prioritization_fee = slot_prioritization_fee.remove(&bank_id); + let post_purge_bank_count = prioritization_fee.as_ref().map(|_| 1).unwrap_or(0); + metrics.accumulate_total_purged_duplicated_bank_count( + pre_purge_bank_count.saturating_sub(post_purge_bank_count), + ); + // It should be rare that optimistically confirmed bank had no prioritized + // transactions, but duplicated and unconfirmed bank had. + if pre_purge_bank_count > 0 && post_purge_bank_count == 0 { + warn!("Finalized bank has empty prioritization fee cache. slot {slot} bank id {bank_id}"); + } - if let Some(prioritization_fee) = &mut prioritization_fee { - if let Err(err) = prioritization_fee.mark_block_completed() { - error!( - "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", - err - ); - } - prioritization_fee.report_metrics(slot); + if let Some(prioritization_fee) = &mut prioritization_fee { + if let Err(err) = prioritization_fee.mark_block_completed() { + error!( + "Unsuccessful finalizing slot {slot}, bank ID {bank_id}: {:?}", + err + ); } - prioritization_fee - }, - "slot_finalize_time" - ); - metrics.accumulate_total_block_finalize_elapsed_us(slot_finalize_time.as_us()); + prioritization_fee.report_metrics(slot); + } + prioritization_fee + }); + metrics.accumulate_total_block_finalize_elapsed_us(slot_finalize_us); // Create new cache entry if let Some(slot_prioritization_fee) = slot_prioritization_fee { - let (_, cache_lock_time) = measure!( - { - let mut cache = cache.write().unwrap(); - while cache.len() >= cache_max_size { - cache.pop_first(); - } - cache.insert(slot, slot_prioritization_fee); - }, - "cache_lock_time" - ); - metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_time.as_us()); + let (_, cache_lock_us) = measure_us!({ + let mut cache = cache.write().unwrap(); + while cache.len() >= cache_max_size { + cache.pop_first(); + } + cache.insert(slot, slot_prioritization_fee); + }); + metrics.accumulate_total_cache_lock_elapsed_us(cache_lock_us); } } diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 28964394c21785..a83ed12325741c 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -33,7 +33,7 @@ use { solana_measure::measure::Measure, solana_sdk::{ clock::{Epoch, Slot, UnixTimestamp}, - deserialize_utils::{default_on_eof, ignore_eof_error}, + deserialize_utils::default_on_eof, epoch_schedule::EpochSchedule, fee_calculator::{FeeCalculator, FeeRateGovernor}, genesis_config::GenesisConfig, @@ -284,7 +284,7 @@ impl From for SerializableVersionedBank { } #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -impl<'a> solana_frozen_abi::abi_example::IgnoreAsHelper for SerializableVersionedBank {} +impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableVersionedBank {} /// Helper type to wrap BufReader streams when deserializing and reconstructing from either just a /// full snapshot, or both a full and incremental snapshot @@ -382,6 +382,41 @@ where deserialize_from::<_, _>(stream) } +/// Extra fields that are deserialized from the end of snapshots. +/// +/// Note that this struct's fields should stay synced with the fields in +/// ExtraFieldsToSerialize with the exception that new "extra fields" should be +/// added to this struct a minor release before they are added to the serialize +/// struct. +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[derive(Clone, Debug, Deserialize, PartialEq)] +struct ExtraFieldsToDeserialize { + #[serde(deserialize_with = "default_on_eof")] + lamports_per_signature: u64, + #[serde(deserialize_with = "default_on_eof")] + incremental_snapshot_persistence: Option, + #[serde(deserialize_with = "default_on_eof")] + epoch_accounts_hash: Option, + #[serde(deserialize_with = "default_on_eof")] + versioned_epoch_stakes: HashMap, +} + +/// Extra fields that are serialized at the end of snapshots. +/// +/// Note that this struct's fields should stay synced with the fields in +/// ExtraFieldsToDeserialize with the exception that new "extra fields" should +/// be added to the deserialize struct a minor release before they are added to +/// this one. +#[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[cfg_attr(feature = "dev-context-only-utils", derive(Default))] +#[derive(Debug, Serialize, PartialEq)] +pub struct ExtraFieldsToSerialize<'a> { + pub lamports_per_signature: u64, + pub incremental_snapshot_persistence: Option<&'a BankIncrementalSnapshotPersistence>, + pub epoch_accounts_hash: Option, + pub versioned_epoch_stakes: HashMap, +} + fn deserialize_bank_fields( mut stream: &mut BufReader, ) -> Result< @@ -397,24 +432,26 @@ where let mut bank_fields: BankFieldsToDeserialize = deserialize_from::<_, DeserializableVersionedBank>(&mut stream)?.into(); let accounts_db_fields = deserialize_accounts_db_fields(stream)?; + let extra_fields = deserialize_from(stream)?; + // Process extra fields - let lamports_per_signature = ignore_eof_error(deserialize_from(&mut stream))?; + let ExtraFieldsToDeserialize { + lamports_per_signature, + incremental_snapshot_persistence, + epoch_accounts_hash, + versioned_epoch_stakes, + } = extra_fields; + bank_fields.fee_rate_governor = bank_fields .fee_rate_governor .clone_with_lamports_per_signature(lamports_per_signature); - - let incremental_snapshot_persistence = ignore_eof_error(deserialize_from(&mut stream))?; bank_fields.incremental_snapshot_persistence = incremental_snapshot_persistence; - - let epoch_accounts_hash = ignore_eof_error(deserialize_from(&mut stream))?; bank_fields.epoch_accounts_hash = epoch_accounts_hash; // If we deserialize the new epoch stakes, add all of the entries into the // other deserialized map which could still have old epoch stakes entries - let new_epoch_stakes: HashMap = - ignore_eof_error(deserialize_from(&mut stream))?; bank_fields.epoch_stakes.extend( - new_epoch_stakes + versioned_epoch_stakes .into_iter() .map(|(epoch, versioned_epoch_stakes)| (epoch, versioned_epoch_stakes.into())), ); @@ -585,8 +622,7 @@ pub fn serialize_bank_snapshot_into( accounts_delta_hash: AccountsDeltaHash, accounts_hash: AccountsHash, account_storage_entries: &[Vec>], - incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, - epoch_accounts_hash: Option, + extra_fields: ExtraFieldsToSerialize, write_version: StoredMetaWriteVersion, ) -> Result<(), Error> where @@ -603,8 +639,7 @@ where accounts_delta_hash, accounts_hash, account_storage_entries, - incremental_snapshot_persistence, - epoch_accounts_hash, + extra_fields, write_version, ) } @@ -617,15 +652,13 @@ pub fn serialize_bank_snapshot_with( accounts_delta_hash: AccountsDeltaHash, accounts_hash: AccountsHash, account_storage_entries: &[Vec>], - incremental_snapshot_persistence: Option<&BankIncrementalSnapshotPersistence>, - epoch_accounts_hash: Option, + extra_fields: ExtraFieldsToSerialize, write_version: StoredMetaWriteVersion, ) -> Result where S: serde::Serializer, { let slot = bank_fields.slot; - let lamports_per_signature = bank_fields.fee_rate_governor.lamports_per_signature; let serializable_bank = SerializableVersionedBank::from(bank_fields); let serializable_accounts_db = SerializableAccountsDb::<'_> { slot, @@ -635,15 +668,7 @@ where accounts_hash, write_version, }; - - ( - serializable_bank, - serializable_accounts_db, - lamports_per_signature, - incremental_snapshot_persistence, - epoch_accounts_hash, - ) - .serialize(serializer) + (serializable_bank, serializable_accounts_db, extra_fields).serialize(serializer) } #[cfg(test)] @@ -659,15 +684,16 @@ impl<'a> Serialize for SerializableBankAndStorage<'a> { S: serde::ser::Serializer, { let slot = self.bank.slot(); - let fields = self.bank.get_fields_to_serialize(); + let mut bank_fields = self.bank.get_fields_to_serialize(); let accounts_db = &self.bank.rc.accounts.accounts_db; let bank_hash_stats = accounts_db.get_bank_hash_stats(slot).unwrap(); let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; let write_version = accounts_db.write_version.load(Ordering::Acquire); - let lamports_per_signature = fields.fee_rate_governor.lamports_per_signature; + let lamports_per_signature = bank_fields.fee_rate_governor.lamports_per_signature; + let versioned_epoch_stakes = std::mem::take(&mut bank_fields.versioned_epoch_stakes); let bank_fields_to_serialize = ( - SerializableVersionedBank::from(fields), + SerializableVersionedBank::from(bank_fields), SerializableAccountsDb::<'_> { slot, account_storage_entries: self.snapshot_storages, @@ -676,11 +702,12 @@ impl<'a> Serialize for SerializableBankAndStorage<'a> { accounts_hash, write_version, }, - lamports_per_signature, - None::, - self.bank - .get_epoch_accounts_hash_to_serialize() - .map(|epoch_accounts_hash| *epoch_accounts_hash.as_ref()), + ExtraFieldsToSerialize { + lamports_per_signature, + incremental_snapshot_persistence: None, + epoch_accounts_hash: self.bank.get_epoch_accounts_hash_to_serialize(), + versioned_epoch_stakes, + }, ); bank_fields_to_serialize.serialize(serializer) } @@ -699,14 +726,14 @@ impl<'a> Serialize for SerializableBankAndStorageNoExtra<'a> { S: serde::ser::Serializer, { let slot = self.bank.slot(); - let fields = self.bank.get_fields_to_serialize(); + let bank_fields = self.bank.get_fields_to_serialize(); let accounts_db = &self.bank.rc.accounts.accounts_db; let bank_hash_stats = accounts_db.get_bank_hash_stats(slot).unwrap(); let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); let accounts_hash = accounts_db.get_accounts_hash(slot).unwrap().0; let write_version = accounts_db.write_version.load(Ordering::Acquire); ( - SerializableVersionedBank::from(fields), + SerializableVersionedBank::from(bank_fields), SerializableAccountsDb::<'_> { slot, account_storage_entries: self.snapshot_storages, @@ -790,7 +817,7 @@ impl<'a> Serialize for SerializableAccountsDb<'a> { } #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -impl<'a> solana_frozen_abi::abi_example::IgnoreAsHelper for SerializableAccountsDb<'a> {} +impl<'a> solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountsDb<'a> {} #[allow(clippy::too_many_arguments)] fn reconstruct_bank_from_fields( @@ -907,7 +934,7 @@ pub(crate) fn remap_append_vec_file( let remapped_file_name = AccountsFile::file_name(slot, remapped_append_vec_id); remapped_append_vec_path = append_vec_path.parent().unwrap().join(remapped_file_name); - #[cfg(target_os = "linux")] + #[cfg(all(target_os = "linux", target_env = "gnu"))] { let remapped_append_vec_path_cstr = cstring_from_path(&remapped_append_vec_path)?; @@ -923,7 +950,10 @@ pub(crate) fn remap_append_vec_file( } } - #[cfg(not(target_os = "linux"))] + #[cfg(any( + not(target_os = "linux"), + all(target_os = "linux", not(target_env = "gnu")) + ))] if std::fs::metadata(&remapped_append_vec_path).is_err() { break (remapped_append_vec_id, remapped_append_vec_path); } @@ -935,7 +965,10 @@ pub(crate) fn remap_append_vec_file( // Only rename the file if the new ID is actually different from the original. In the target_os // = linux case, we have already renamed if necessary. - #[cfg(not(target_os = "linux"))] + #[cfg(any( + not(target_os = "linux"), + all(target_os = "linux", not(target_env = "gnu")) + ))] if old_append_vec_id != remapped_append_vec_id as SerializedAccountsFileId { std::fs::rename(append_vec_path, &remapped_append_vec_path)?; } @@ -1206,7 +1239,7 @@ where } // Rename `src` to `dest` only if `dest` doesn't already exist. -#[cfg(target_os = "linux")] +#[cfg(all(target_os = "linux", target_env = "gnu"))] fn rename_no_replace(src: &CStr, dest: &CStr) -> io::Result<()> { let ret = unsafe { libc::renameat2( diff --git a/runtime/src/serde_snapshot/storage.rs b/runtime/src/serde_snapshot/storage.rs index 5b8bed48fa90b1..7308e24c6e025a 100644 --- a/runtime/src/serde_snapshot/storage.rs +++ b/runtime/src/serde_snapshot/storage.rs @@ -30,11 +30,11 @@ impl SerializableStorage for SerializableAccountStorageEntry { impl From<&AccountStorageEntry> for SerializableAccountStorageEntry { fn from(rhs: &AccountStorageEntry) -> Self { Self { - id: rhs.append_vec_id() as SerializedAccountsFileId, + id: rhs.id() as SerializedAccountsFileId, accounts_current_len: rhs.accounts.len(), } } } #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -impl solana_frozen_abi::abi_example::IgnoreAsHelper for SerializableAccountStorageEntry {} +impl solana_frozen_abi::abi_example::TransparentAsHelper for SerializableAccountStorageEntry {} diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 72df611dcfbe4c..cfc283d54eba2d 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -140,8 +140,7 @@ mod serde_snapshot_tests { for storage_entry in storage_entries.into_iter() { // Copy file to new directory let storage_path = storage_entry.path(); - let file_name = - AccountsFile::file_name(storage_entry.slot(), storage_entry.append_vec_id()); + let file_name = AccountsFile::file_name(storage_entry.slot(), storage_entry.id()); let output_path = output_dir.as_ref().join(file_name); std::fs::copy(storage_path, &output_path)?; @@ -153,15 +152,15 @@ mod serde_snapshot_tests { )?; let new_storage_entry = AccountStorageEntry::new_existing( storage_entry.slot(), - storage_entry.append_vec_id(), + storage_entry.id(), accounts_file, num_accounts, ); - next_append_vec_id = next_append_vec_id.max(new_storage_entry.append_vec_id()); + next_append_vec_id = next_append_vec_id.max(new_storage_entry.id()); storage.insert( new_storage_entry.slot(), AccountStorageReference { - id: new_storage_entry.append_vec_id(), + id: new_storage_entry.id(), storage: Arc::new(new_storage_entry), }, ); @@ -824,7 +823,7 @@ mod serde_snapshot_tests { pubkey_count, accounts.all_account_count_in_accounts_file(shrink_slot) ); - accounts.shrink_all_slots(*startup, None, &EpochSchedule::default()); + accounts.shrink_all_slots(*startup, &EpochSchedule::default(), None); assert_eq!( pubkey_count_after_shrink, accounts.all_account_count_in_accounts_file(shrink_slot) @@ -852,7 +851,7 @@ mod serde_snapshot_tests { .unwrap(); // repeating should be no-op - accounts.shrink_all_slots(*startup, None, &epoch_schedule); + accounts.shrink_all_slots(*startup, &epoch_schedule, None); assert_eq!( pubkey_count_after_shrink, accounts.all_account_count_in_accounts_file(shrink_slot) diff --git a/runtime/src/serde_snapshot/utils.rs b/runtime/src/serde_snapshot/utils.rs index d12edd21222876..a9b953ba4851bf 100644 --- a/runtime/src/serde_snapshot/utils.rs +++ b/runtime/src/serde_snapshot/utils.rs @@ -3,7 +3,7 @@ use serde::{ Serialize, Serializer, }; #[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -use solana_frozen_abi::abi_example::IgnoreAsHelper; +use solana_frozen_abi::abi_example::TransparentAsHelper; // consumes an iterator and returns an object that will serialize as a serde seq #[allow(dead_code)] @@ -18,7 +18,7 @@ where } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] - impl IgnoreAsHelper for SerializableSequencedIterator {} + impl TransparentAsHelper for SerializableSequencedIterator {} impl Serialize for SerializableSequencedIterator where @@ -57,7 +57,7 @@ where } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] - impl IgnoreAsHelper for SerializableSequencedIterator {} + impl TransparentAsHelper for SerializableSequencedIterator {} impl Serialize for SerializableSequencedIterator where @@ -96,7 +96,7 @@ where } #[cfg(all(test, RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] - impl IgnoreAsHelper for SerializableMappedIterator {} + impl TransparentAsHelper for SerializableMappedIterator {} impl Serialize for SerializableMappedIterator where diff --git a/runtime/src/snapshot_bank_utils.rs b/runtime/src/snapshot_bank_utils.rs index 5b3f83f28a259d..330669fcfa7e6d 100644 --- a/runtime/src/snapshot_bank_utils.rs +++ b/runtime/src/snapshot_bank_utils.rs @@ -35,7 +35,7 @@ use { accounts_update_notifier_interface::AccountsUpdateNotifier, utils::delete_contents_of_path, }, - solana_measure::{measure, measure::Measure}, + solana_measure::{measure::Measure, measure_time}, solana_sdk::{ clock::{Epoch, Slot}, genesis_config::GenesisConfig, @@ -369,7 +369,7 @@ pub fn bank_from_snapshot_dir( .map(|config| config.storage_access) .unwrap_or_default(); - let (storage, measure_rebuild_storages) = measure!( + let (storage, measure_rebuild_storages) = measure_time!( rebuild_storages_from_snapshot_dir( bank_snapshot, account_paths, @@ -386,7 +386,7 @@ pub fn bank_from_snapshot_dir( storage, next_append_vec_id, }; - let (bank, measure_rebuild_bank) = measure!( + let (bank, measure_rebuild_bank) = measure_time!( rebuild_bank_from_snapshot( bank_snapshot, account_paths, @@ -906,10 +906,16 @@ fn bank_to_full_snapshot_archive_with( archive_format: ArchiveFormat, ) -> snapshot_utils::Result { assert!(bank.is_complete()); + // set accounts-db's latest full snapshot slot here to ensure zero lamport + // accounts are handled properly. + bank.rc + .accounts + .accounts_db + .set_latest_full_snapshot_slot(bank.slot()); bank.squash(); // Bank may not be a root bank.rehash(); // Bank accounts may have been manually modified by the caller bank.force_flush_accounts_cache(); - bank.clean_accounts(Some(bank.slot())); + bank.clean_accounts(); let calculated_accounts_hash = bank.update_accounts_hash(CalcAccountsHashDataSource::Storages, false, false); @@ -963,10 +969,16 @@ pub fn bank_to_incremental_snapshot_archive( assert!(bank.is_complete()); assert!(bank.slot() > full_snapshot_slot); + // set accounts-db's latest full snapshot slot here to ensure zero lamport + // accounts are handled properly. + bank.rc + .accounts + .accounts_db + .set_latest_full_snapshot_slot(full_snapshot_slot); bank.squash(); // Bank may not be a root bank.rehash(); // Bank accounts may have been manually modified by the caller bank.force_flush_accounts_cache(); - bank.clean_accounts(Some(full_snapshot_slot)); + bank.clean_accounts(); let calculated_incremental_accounts_hash = bank.update_incremental_accounts_hash(full_snapshot_slot); @@ -1707,7 +1719,7 @@ mod tests { // Ensure account1 has been cleaned/purged from everywhere bank4.squash(); - bank4.clean_accounts(Some(full_snapshot_slot)); + bank4.clean_accounts(); assert!( bank4.get_account_modified_slot(&key1.pubkey()).is_none(), "Ensure Account1 has been cleaned and purged from AccountsDb" diff --git a/runtime/src/snapshot_minimizer.rs b/runtime/src/snapshot_minimizer.rs index 0404c3df99187f..b48f1832fc0256 100644 --- a/runtime/src/snapshot_minimizer.rs +++ b/runtime/src/snapshot_minimizer.rs @@ -13,7 +13,7 @@ use { accounts_partition, storable_accounts::StorableAccountsBySlot, }, - solana_measure::measure, + solana_measure::measure_time, solana_sdk::{ account::ReadableAccount, account_utils::StateMut, @@ -86,7 +86,7 @@ impl<'a> SnapshotMinimizer<'a> { F: Fn(&SnapshotMinimizer<'a>), { let initial_accounts_len = self.minimized_account_set.len(); - let (_, measure) = measure!(add_accounts_fn(self), name); + let (_, measure) = measure_time!(add_accounts_fn(self), name); let total_accounts_len = self.minimized_account_set.len(); let added_accounts = total_accounts_len - initial_accounts_len; @@ -212,10 +212,10 @@ impl<'a> SnapshotMinimizer<'a> { /// Remove accounts not in `minimized_accoun_set` from accounts_db fn minimize_accounts_db(&self) { let (minimized_slot_set, minimized_slot_set_measure) = - measure!(self.get_minimized_slot_set(), "generate minimized slot set"); + measure_time!(self.get_minimized_slot_set(), "generate minimized slot set"); info!("{minimized_slot_set_measure}"); - let ((dead_slots, dead_storages), process_snapshot_storages_measure) = measure!( + let ((dead_slots, dead_storages), process_snapshot_storages_measure) = measure_time!( self.process_snapshot_storages(minimized_slot_set), "process snapshot storages" ); @@ -227,10 +227,10 @@ impl<'a> SnapshotMinimizer<'a> { .store(false, Ordering::Relaxed); let (_, purge_dead_slots_measure) = - measure!(self.purge_dead_slots(dead_slots), "purge dead slots"); + measure_time!(self.purge_dead_slots(dead_slots), "purge dead slots"); info!("{purge_dead_slots_measure}"); - let (_, drop_storages_measure) = measure!(drop(dead_storages), "drop storages"); + let (_, drop_storages_measure) = measure_time!(drop(dead_storages), "drop storages"); info!("{drop_storages_measure}"); // Turn logging back on after minimization @@ -342,10 +342,12 @@ impl<'a> SnapshotMinimizer<'a> { .collect(); let _ = self.accounts_db().purge_keys_exact(purge_pubkeys.iter()); - let aligned_total: u64 = AccountsDb::page_align(total_bytes as u64); let mut shrink_in_progress = None; - if aligned_total > 0 { - shrink_in_progress = Some(self.accounts_db().get_store_for_shrink(slot, aligned_total)); + if total_bytes > 0 { + shrink_in_progress = Some( + self.accounts_db() + .get_store_for_shrink(slot, total_bytes as u64), + ); let new_storage = shrink_in_progress.as_ref().unwrap().new_storage(); let accounts = [(slot, &keep_accounts[..])]; diff --git a/runtime/src/snapshot_package.rs b/runtime/src/snapshot_package.rs index a8e7d92967d3c2..1d929227109772 100644 --- a/runtime/src/snapshot_package.rs +++ b/runtime/src/snapshot_package.rs @@ -80,9 +80,10 @@ impl AccountsPackage { let accounts_delta_hash = accounts_db.get_accounts_delta_hash(slot).unwrap(); // SAFETY: Every slot *must* have a BankHashStats entry in AccountsDb. let bank_hash_stats = accounts_db.get_bank_hash_stats(slot).unwrap(); + let bank_fields_to_serialize = bank.get_fields_to_serialize(); SupplementalSnapshotInfo { status_cache_slot_deltas, - bank_fields_to_serialize: bank.get_fields_to_serialize(), + bank_fields_to_serialize, bank_hash_stats, accounts_delta_hash, epoch_accounts_hash: bank.get_epoch_accounts_hash_to_serialize(), diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index 5d407eaf0ba795..1ff4c5096d0007 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -1,7 +1,9 @@ use { crate::{ bank::{BankFieldsToSerialize, BankSlotDelta}, - serde_snapshot::{self, BankIncrementalSnapshotPersistence, SnapshotStreams}, + serde_snapshot::{ + self, BankIncrementalSnapshotPersistence, ExtraFieldsToSerialize, SnapshotStreams, + }, snapshot_archive_info::{ FullSnapshotArchiveInfo, IncrementalSnapshotArchiveInfo, SnapshotArchiveInfo, SnapshotArchiveInfoGetter, @@ -30,7 +32,7 @@ use { shared_buffer_reader::{SharedBuffer, SharedBufferReader}, utils::{move_and_async_delete_path, ACCOUNTS_RUN_DIR, ACCOUNTS_SNAPSHOT_DIR}, }, - solana_measure::{measure, measure::Measure}, + solana_measure::{measure::Measure, measure_time, measure_us}, solana_sdk::{ clock::{Epoch, Slot}, hash::Hash, @@ -40,6 +42,7 @@ use { collections::{HashMap, HashSet}, fmt, fs, io::{BufReader, BufWriter, Error as IoError, Read, Result as IoResult, Seek, Write}, + mem, num::NonZeroUsize, ops::RangeInclusive, path::{Path, PathBuf}, @@ -824,7 +827,7 @@ fn serialize_snapshot( snapshot_version: SnapshotVersion, snapshot_storages: &[Arc], slot_deltas: &[BankSlotDelta], - bank_fields: BankFieldsToSerialize, + mut bank_fields: BankFieldsToSerialize, bank_hash_stats: BankHashStats, accounts_delta_hash: AccountsDeltaHash, accounts_hash: AccountsHash, @@ -855,7 +858,7 @@ fn serialize_snapshot( bank_snapshot_path.display(), ); - let (_, measure_flush) = measure!({ + let (_, flush_storages_us) = measure_us!({ for storage in snapshot_storages { storage.flush().map_err(|err| { AddBankSnapshotError::FlushStorage(err, storage.path().to_path_buf()) @@ -867,13 +870,21 @@ fn serialize_snapshot( // constructing a bank from this directory. It acts like an archive to include the full state. // The set of the account storages files is the necessary part of this snapshot state. Hard-link them // from the operational accounts/ directory to here. - let (_, measure_hard_linking) = - measure!( - hard_link_storages_to_snapshot(&bank_snapshot_dir, slot, snapshot_storages) - .map_err(AddBankSnapshotError::HardLinkStorages)? - ); + let (_, hard_link_storages_us) = measure_us!(hard_link_storages_to_snapshot( + &bank_snapshot_dir, + slot, + snapshot_storages + ) + .map_err(AddBankSnapshotError::HardLinkStorages)?); let bank_snapshot_serializer = move |stream: &mut BufWriter| -> Result<()> { + let versioned_epoch_stakes = mem::take(&mut bank_fields.versioned_epoch_stakes); + let extra_fields = ExtraFieldsToSerialize { + lamports_per_signature: bank_fields.fee_rate_governor.lamports_per_signature, + incremental_snapshot_persistence: bank_incremental_snapshot_persistence, + epoch_accounts_hash, + versioned_epoch_stakes, + }; serde_snapshot::serialize_bank_snapshot_into( stream, bank_fields, @@ -881,26 +892,25 @@ fn serialize_snapshot( accounts_delta_hash, accounts_hash, &get_storages_to_serialize(snapshot_storages), - bank_incremental_snapshot_persistence, - epoch_accounts_hash, + extra_fields, write_version, )?; Ok(()) }; - let (bank_snapshot_consumed_size, bank_serialize) = measure!( + let (bank_snapshot_consumed_size, bank_serialize) = measure_time!( serialize_snapshot_data_file(&bank_snapshot_path, bank_snapshot_serializer) .map_err(|err| AddBankSnapshotError::SerializeBank(Box::new(err)))?, "bank serialize" ); let status_cache_path = bank_snapshot_dir.join(SNAPSHOT_STATUS_CACHE_FILENAME); - let (status_cache_consumed_size, status_cache_serialize) = measure!( + let (status_cache_consumed_size, status_cache_serialize_us) = measure_us!( snapshot_bank_utils::serialize_status_cache(slot_deltas, &status_cache_path) .map_err(|err| AddBankSnapshotError::SerializeStatusCache(Box::new(err)))? ); let version_path = bank_snapshot_dir.join(SNAPSHOT_VERSION_FILENAME); - let (_, measure_write_version_file) = measure!(fs::write( + let (_, write_version_file_us) = measure_us!(fs::write( &version_path, snapshot_version.as_str().as_bytes(), ) @@ -908,8 +918,8 @@ fn serialize_snapshot( // Mark this directory complete so it can be used. Check this flag first before selecting for deserialization. let state_complete_path = bank_snapshot_dir.join(SNAPSHOT_STATE_COMPLETE_FILENAME); - let (_, measure_write_state_complete_file) = - measure!(fs::File::create(&state_complete_path).map_err(|err| { + let (_, write_state_complete_file_us) = measure_us!(fs::File::create(&state_complete_path) + .map_err(|err| { AddBankSnapshotError::CreateStateCompleteFile(err, state_complete_path) })?); @@ -921,22 +931,14 @@ fn serialize_snapshot( ("slot", slot, i64), ("bank_size", bank_snapshot_consumed_size, i64), ("status_cache_size", status_cache_consumed_size, i64), - ("flush_storages_us", measure_flush.as_us(), i64), - ("hard_link_storages_us", measure_hard_linking.as_us(), i64), + ("flush_storages_us", flush_storages_us, i64), + ("hard_link_storages_us", hard_link_storages_us, i64), ("bank_serialize_us", bank_serialize.as_us(), i64), - ( - "status_cache_serialize_us", - status_cache_serialize.as_us(), - i64 - ), - ( - "write_version_file_us", - measure_write_version_file.as_us(), - i64 - ), + ("status_cache_serialize_us", status_cache_serialize_us, i64), + ("write_version_file_us", write_version_file_us, i64), ( "write_state_complete_file_us", - measure_write_state_complete_file.as_us(), + write_state_complete_file_us, i64 ), ("total_us", measure_everything.as_us(), i64), @@ -1044,10 +1046,8 @@ fn archive_snapshot( .map_err(E::ArchiveSnapshotsDir)?; for storage in snapshot_storages { - let path_in_archive = Path::new(ACCOUNTS_DIR).join(AccountsFile::file_name( - storage.slot(), - storage.append_vec_id(), - )); + let path_in_archive = Path::new(ACCOUNTS_DIR) + .join(AccountsFile::file_name(storage.slot(), storage.id())); match storage.accounts.internals_for_archive() { InternalsForArchive::Mmap(data) => { let mut header = tar::Header::new_gnu(); @@ -1480,7 +1480,7 @@ pub fn hard_link_storages_to_snapshot( )?; // The appendvec could be recycled, so its filename may not be consistent to the slot and id. // Use the storage slot and id to compose a consistent file name for the hard-link file. - let hardlink_filename = AccountsFile::file_name(storage.slot(), storage.append_vec_id()); + let hardlink_filename = AccountsFile::file_name(storage.slot(), storage.id()); let hard_link_path = snapshot_hardlink_dir.join(hardlink_filename); fs::hard_link(storage_path, &hard_link_path).map_err(|err| { HardLinkStoragesToSnapshotError::HardLinkStorage( @@ -1695,7 +1695,7 @@ fn unarchive_snapshot( let num_rebuilder_threads = num_cpus::get_physical() .saturating_sub(parallel_divisions) .max(1); - let (version_and_storages, measure_untar) = measure!( + let (version_and_storages, measure_untar) = measure_time!( SnapshotStorageRebuilder::rebuild_storage( file_receiver, num_rebuilder_threads, @@ -2537,10 +2537,10 @@ pub fn should_take_full_snapshot( pub fn should_take_incremental_snapshot( block_height: Slot, incremental_snapshot_archive_interval_slots: Slot, - last_full_snapshot_slot: Option, + latest_full_snapshot_slot: Option, ) -> bool { block_height % incremental_snapshot_archive_interval_slots == 0 - && last_full_snapshot_slot.is_some() + && latest_full_snapshot_slot.is_some() } /// Creates an "accounts path" directory for tests diff --git a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs index e237d7747403cb..69e6f421aae022 100644 --- a/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs +++ b/runtime/src/snapshot_utils/snapshot_storage_rebuilder.rs @@ -338,7 +338,7 @@ impl SnapshotStorageRebuilder { )?, }; - Ok((storage_entry.append_vec_id(), storage_entry)) + Ok((storage_entry.id(), storage_entry)) }) .collect::>, SnapshotError>>( )?; diff --git a/runtime/src/stake_account.rs b/runtime/src/stake_account.rs index ea4ed6dd0f624c..7bf30c088d9ea3 100644 --- a/runtime/src/stake_account.rs +++ b/runtime/src/stake_account.rs @@ -6,7 +6,7 @@ use { account_utils::StateMut, instruction::InstructionError, pubkey::Pubkey, - stake::state::{Delegation, StakeStateV2}, + stake::state::{Delegation, Stake, StakeStateV2}, }, std::marker::PhantomData, thiserror::Error, @@ -53,6 +53,13 @@ impl StakeAccount { // only wrap a stake-state which is a delegation. self.stake_state.delegation().unwrap() } + + #[inline] + pub(crate) fn stake(&self) -> Stake { + // Safe to unwrap here because StakeAccount will always + // only wrap a stake-state. + self.stake_state.stake().unwrap() + } } impl TryFrom for StakeAccount { diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index 3c0fb44e434a8b..0e4d7b6109ef41 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -79,8 +79,10 @@ impl StakesCache { // and so should be removed from cache as well. if account.lamports() == 0 { if solana_vote_program::check_id(owner) { - let mut stakes = self.0.write().unwrap(); - stakes.remove_vote_account(pubkey); + let _old_vote_account = { + let mut stakes = self.0.write().unwrap(); + stakes.remove_vote_account(pubkey) + }; } else if solana_stake_program::check_id(owner) { let mut stakes = self.0.write().unwrap(); stakes.remove_stake_delegation(pubkey, new_rate_activation_epoch); @@ -96,17 +98,31 @@ impl StakesCache { // Called to eagerly deserialize vote state let _res = vote_account.vote_state(); } - let mut stakes = self.0.write().unwrap(); - stakes.upsert_vote_account(pubkey, vote_account, new_rate_activation_epoch); + + // drop the old account after releasing the lock + let _old_vote_account = { + let mut stakes = self.0.write().unwrap(); + stakes.upsert_vote_account( + pubkey, + vote_account, + new_rate_activation_epoch, + ) + }; } Err(_) => { - let mut stakes = self.0.write().unwrap(); - stakes.remove_vote_account(pubkey) + // drop the old account after releasing the lock + let _old_vote_account = { + let mut stakes = self.0.write().unwrap(); + stakes.remove_vote_account(pubkey) + }; } } } else { - let mut stakes = self.0.write().unwrap(); - stakes.remove_vote_account(pubkey) + // drop the old account after releasing the lock + let _old_vote_account = { + let mut stakes = self.0.write().unwrap(); + stakes.remove_vote_account(pubkey) + }; }; } else if solana_stake_program::check_id(owner) { match StakeAccount::try_from(account.to_account_shared_data()) { @@ -357,13 +373,13 @@ impl Stakes { /// Sum the stakes that point to the given voter_pubkey fn calculate_stake( - &self, + stake_delegations: &ImHashMap, voter_pubkey: &Pubkey, epoch: Epoch, stake_history: &StakeHistory, new_rate_activation_epoch: Option, ) -> u64 { - self.stake_delegations + stake_delegations .values() .map(StakeAccount::delegation) .filter(|delegation| &delegation.voter_pubkey == voter_pubkey) @@ -380,8 +396,8 @@ impl Stakes { + self.vote_accounts.iter().map(get_lamports).sum::() } - fn remove_vote_account(&mut self, vote_pubkey: &Pubkey) { - self.vote_accounts.remove(vote_pubkey); + fn remove_vote_account(&mut self, vote_pubkey: &Pubkey) -> Option { + self.vote_accounts.remove(vote_pubkey).map(|(_, a)| a) } fn remove_stake_delegation( @@ -406,23 +422,20 @@ impl Stakes { vote_pubkey: &Pubkey, vote_account: VoteAccount, new_rate_activation_epoch: Option, - ) { + ) -> Option { debug_assert_ne!(vote_account.lamports(), 0u64); debug_assert!(vote_account.is_deserialized()); - // unconditionally remove existing at first; there is no dependent calculated state for - // votes, not like stakes (stake codepath maintains calculated stake value grouped by - // delegated vote pubkey) - let stake = match self.vote_accounts.remove(vote_pubkey) { - None => self.calculate_stake( + + let stake_delegations = &self.stake_delegations; + self.vote_accounts.insert(*vote_pubkey, vote_account, || { + Self::calculate_stake( + stake_delegations, vote_pubkey, self.epoch, &self.stake_history, new_rate_activation_epoch, - ), - Some((stake, _)) => stake, - }; - let entry = (stake, vote_account); - self.vote_accounts.insert(*vote_pubkey, entry); + ) + }) } fn upsert_stake_delegation( @@ -492,7 +505,7 @@ impl Stakes { pub(crate) fn highest_staked_node(&self) -> Option { let vote_account = self.vote_accounts.find_max_by_delegated_stake()?; - vote_account.node_pubkey() + vote_account.node_pubkey().copied() } } @@ -531,6 +544,23 @@ impl From> for Stakes { } } +impl From> for Stakes { + fn from(stakes: Stakes) -> Self { + let stake_delegations = stakes + .stake_delegations + .into_iter() + .map(|(pubkey, stake_account)| (pubkey, stake_account.stake())) + .collect(); + Self { + vote_accounts: stakes.vote_accounts, + stake_delegations, + unused: stakes.unused, + epoch: stakes.epoch, + stake_history: stakes.stake_history, + } + } +} + impl From> for Stakes { fn from(stakes: Stakes) -> Self { let stake_delegations = stakes diff --git a/runtime/tests/accounts.rs b/runtime/tests/accounts.rs index bcb6f7746ac905..48d195c5d13521 100644 --- a/runtime/tests/accounts.rs +++ b/runtime/tests/accounts.rs @@ -40,7 +40,7 @@ fn test_shrink_and_clean() { if exit_for_shrink.load(Ordering::Relaxed) { break; } - accounts_for_shrink.shrink_all_slots(false, None, &EpochSchedule::default()); + accounts_for_shrink.shrink_all_slots(false, &EpochSchedule::default(), None); }); let mut alive_accounts = vec![]; diff --git a/runtime/tests/stake.rs b/runtime/tests/stake.rs index 7c53e1e44a3af3..9922b8c9a5d075 100755 --- a/runtime/tests/stake.rs +++ b/runtime/tests/stake.rs @@ -142,7 +142,7 @@ fn test_stake_create_and_split_single_signature() { let staker_pubkey = staker_keypair.pubkey(); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank.clone()); let stake_address = @@ -218,7 +218,7 @@ fn test_stake_create_and_split_to_existing_system_account() { let staker_pubkey = staker_keypair.pubkey(); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let bank_client = BankClient::new_shared(bank.clone()); let stake_address = @@ -593,7 +593,7 @@ fn test_create_stake_account_from_seed() { &solana_sdk::pubkey::new_rand(), 1_000_000, ); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, _bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let mint_pubkey = mint_keypair.pubkey(); let bank_client = BankClient::new_shared(bank.clone()); diff --git a/sanitize/Cargo.toml b/sanitize/Cargo.toml index fb37ca8dc424a4..89f25ac149fbe9 100644 --- a/sanitize/Cargo.toml +++ b/sanitize/Cargo.toml @@ -9,8 +9,5 @@ homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -[dependencies] -thiserror = { workspace = true } - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/sanitize/src/lib.rs b/sanitize/src/lib.rs index 11ba9e79f1b27c..f0733f8636b63b 100644 --- a/sanitize/src/lib.rs +++ b/sanitize/src/lib.rs @@ -1,17 +1,26 @@ //! A trait for sanitizing values and members of over the wire messages. -use thiserror::Error; +use {core::fmt, std::error::Error}; -#[derive(PartialEq, Debug, Error, Eq, Clone)] +#[derive(PartialEq, Debug, Eq, Clone)] pub enum SanitizeError { - #[error("index out of bounds")] IndexOutOfBounds, - #[error("value out of bounds")] ValueOutOfBounds, - #[error("invalid value")] InvalidValue, } +impl Error for SanitizeError {} + +impl fmt::Display for SanitizeError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SanitizeError::IndexOutOfBounds => f.write_str("index out of bounds"), + SanitizeError::ValueOutOfBounds => f.write_str("value out of bounds"), + SanitizeError::InvalidValue => f.write_str("invalid value"), + } + } +} + /// A trait for sanitizing values and members of over-the-wire messages. /// /// Implementation should recursively descend through the data structure and diff --git a/scripts/cargo-clippy-nightly.sh b/scripts/cargo-clippy-nightly.sh index 12e0fc5b4ad0bb..756699408f4340 100755 --- a/scripts/cargo-clippy-nightly.sh +++ b/scripts/cargo-clippy-nightly.sh @@ -24,7 +24,7 @@ source "$here/../ci/rust-version.sh" nightly # ref: https://github.com/rust-lang/rust/issues/66287 "$here/cargo-for-all-lock-files.sh" -- \ "+${rust_nightly}" clippy \ - --workspace --all-targets --features dummy-for-ci-check -- \ + --workspace --all-targets --features dummy-for-ci-check,frozen-abi -- \ --deny=warnings \ --deny=clippy::default_trait_access \ --deny=clippy::arithmetic_side_effects \ diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index 029b1fbf27943d..ce0572b2af4652 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -86,9 +86,7 @@ if [[ $CI_OS_NAME = windows ]]; then # Limit windows to end-user command-line tools. Full validator support is not # yet available on windows BINS=( - cargo-build-bpf cargo-build-sbf - cargo-test-bpf cargo-test-sbf solana agave-install @@ -118,9 +116,7 @@ else # Speed up net.sh deploys by excluding unused binaries if [[ -z "$validatorOnly" ]]; then BINS+=( - cargo-build-bpf cargo-build-sbf - cargo-test-bpf cargo-test-sbf solana-dos agave-install-init @@ -179,40 +175,6 @@ if [[ -z "$validatorOnly" ]]; then cp -a sdk/sbf/* "$installDir"/bin/sdk/sbf fi -# Add Solidity Compiler -if [[ -z "$validatorOnly" ]]; then - base="https://github.com/hyperledger/solang/releases/download" - version="v0.3.3" - curlopt="-sSfL --retry 5 --retry-delay 2 --retry-connrefused" - - case $(uname -s) in - "Linux") - if [[ $(uname -m) == "x86_64" ]]; then - arch="x86-64" - else - arch="arm64" - fi - # shellcheck disable=SC2086 - curl $curlopt -o "$installDir/bin/solang" $base/$version/solang-linux-$arch - chmod 755 "$installDir/bin/solang" - ;; - "Darwin") - if [[ $(uname -m) == "x86_64" ]]; then - arch="intel" - else - arch="arm" - fi - # shellcheck disable=SC2086 - curl $curlopt -o "$installDir/bin/solang" $base/$version/solang-mac-$arch - chmod 755 "$installDir/bin/solang" - ;; - *) - # shellcheck disable=SC2086 - curl $curlopt -o "$installDir/bin/solang.exe" $base/$version/solang.exe - ;; - esac -fi - ( set -x # deps dir can be empty diff --git a/scripts/patch-spl-crates-for-anchor.sh b/scripts/patch-spl-crates-for-anchor.sh index 4c3dfc2ae924fd..d90b69136e140b 100644 --- a/scripts/patch-spl-crates-for-anchor.sh +++ b/scripts/patch-spl-crates-for-anchor.sh @@ -70,7 +70,10 @@ patch_crates_io() { spl-memo = { path = "$spl_dir/memo/program" } spl-pod = { path = "$spl_dir/libraries/pod" } spl-token = { path = "$spl_dir/token/program" } - spl-token-2022 = { path = "$spl_dir/token/program-2022" } + # Avoid patching spl-token-2022 to avoid forcing anchor to use 4.0.1, which + # doesn't work with the monorepo forcing 4.0.0. Allow the patching again once + # the monorepo is on 4.0.1, or relax the dependency in the monorepo. + #spl-token-2022 = { path = "$spl_dir/token/program-2022" } spl-token-group-interface = { path = "$spl_dir/token-group/interface" } spl-token-metadata-interface = { path = "$spl_dir/token-metadata/interface" } spl-tlv-account-resolution = { path = "$spl_dir/libraries/tlv-account-resolution" } diff --git a/scripts/reserve-cratesio-package-name.sh b/scripts/reserve-cratesio-package-name.sh index e172e1640094d0..8b35d554985d7d 100755 --- a/scripts/reserve-cratesio-package-name.sh +++ b/scripts/reserve-cratesio-package-name.sh @@ -12,7 +12,7 @@ USAGE: FLAGS: --help Display this help message - --no-solana-prefix Do not require \`solana-\` prefix on PACKAGE_NAME + --no-prefix Do not require \`agave-\` or \`solana-\` prefix on PACKAGE_NAME --publish Upload the reserved package. Without this flag, a dry-run is performed @@ -26,7 +26,7 @@ ARGS: EOF } -require_solana_prefix=true +require_prefix=true maybe_publish='--dry-run' positional=() while [[ -n "$1" ]]; do @@ -38,8 +38,8 @@ while [[ -n "$1" ]]; do display_help exit 0 ;; - --no-solana-prefix) - require_solana_prefix=false + --no-prefix) + require_prefix=false ;; --publish) maybe_publish='' @@ -89,9 +89,9 @@ if ! [[ "${package_name}" =~ ^[a-zA-Z0-9_-]{1,64} ]]; then exit 1 fi -if ${require_solana_prefix} && ! [[ "${package_name}" =~ ^solana- ]]; then +if ${require_prefix} && ! [[ "${package_name}" =~ ^(agave|solana)- ]]; then # shellcheck disable=SC2016 # backticks are not a command here - echo 'error: PACKAGE_NAME MUST start with `solana-`' 1>&2 + echo 'error: PACKAGE_NAME MUST start with `agave-` or `solana-`' 1>&2 display_help exit 1 fi diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index cba150eac570e7..7db5d780c76b07 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -17,8 +17,8 @@ edition = { workspace = true } program = [] default = [ - "borsh", - "full" # functionality that is not compatible or needed for on-chain programs + "borsh", + "full", # functionality that is not compatible or needed for on-chain programs ] full = [ "byteorder", @@ -34,14 +34,13 @@ full = [ "sha3", "digest", ] -borsh = ["dep:borsh", "solana-program/borsh"] -dev-context-only-utils = [ - "qualifier_attr" -] +borsh = ["dep:borsh", "solana-program/borsh", "solana-secp256k1-recover/borsh"] +dev-context-only-utils = ["qualifier_attr"] frozen-abi = [ - "dep:solana-frozen-abi", - "dep:solana-frozen-abi-macro", - "solana-program/frozen-abi", + "dep:solana-frozen-abi", + "dep:solana-frozen-abi-macro", + "solana-program/frozen-abi", + "solana-short-vec/frozen-abi", ] [dependencies] @@ -58,9 +57,12 @@ derivation-path = { workspace = true } digest = { workspace = true, optional = true } ed25519-dalek = { workspace = true, optional = true } ed25519-dalek-bip32 = { workspace = true, optional = true } -generic-array = { workspace = true, features = ["serde", "more_lengths"], optional = true } +generic-array = { workspace = true, features = [ + "serde", + "more_lengths", +], optional = true } hmac = { workspace = true } -itertools = { workspace = true } +itertools = { workspace = true } lazy_static = { workspace = true } libsecp256k1 = { workspace = true, optional = true, features = ["hmac"] } log = { workspace = true } @@ -79,11 +81,16 @@ serde_with = { workspace = true, features = ["macros"] } sha2 = { workspace = true } sha3 = { workspace = true, optional = true } siphasher = { workspace = true } +solana-bn254 = { workspace = true } +solana-decode-error = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-program = { workspace = true } +solana-program-memory = { workspace = true } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } +solana-secp256k1-recover = { workspace = true } +solana-short-vec = { workspace = true } thiserror = { workspace = true } uriparse = { workspace = true } @@ -98,6 +105,7 @@ assert_matches = { workspace = true } curve25519-dalek = { workspace = true } hex = { workspace = true } solana-logger = { workspace = true } +solana-program = { workspace = true, features = ["dev-context-only-utils"] } solana-sdk = { path = ".", features = ["dev-context-only-utils"] } static_assertions = { workspace = true } tiny-bip39 = { workspace = true } diff --git a/sdk/atomic-u64/Cargo.toml b/sdk/atomic-u64/Cargo.toml new file mode 100644 index 00000000000000..6b6d9ec51eed73 --- /dev/null +++ b/sdk/atomic-u64/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "solana-atomic-u64" +description = "Solana atomic u64 implementation. For internal use only." +documentation = "https://docs.rs/solana-atomic-u64" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[target.'cfg(not(target_pointer_width = "64"))'.dependencies] +parking_lot = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/program/src/atomic_u64.rs b/sdk/atomic-u64/src/lib.rs similarity index 60% rename from sdk/program/src/atomic_u64.rs rename to sdk/atomic-u64/src/lib.rs index ea8695d0b51263..fae0dfa33d1ee7 100644 --- a/sdk/program/src/atomic_u64.rs +++ b/sdk/atomic-u64/src/lib.rs @@ -1,17 +1,17 @@ -pub(crate) use implementation::AtomicU64; +pub use implementation::AtomicU64; #[cfg(target_pointer_width = "64")] mod implementation { use std::sync::atomic; - pub(crate) struct AtomicU64(atomic::AtomicU64); + pub struct AtomicU64(atomic::AtomicU64); impl AtomicU64 { - pub(crate) const fn new(initial: u64) -> Self { + pub const fn new(initial: u64) -> Self { Self(atomic::AtomicU64::new(initial)) } - pub(crate) fn fetch_add(&self, v: u64) -> u64 { + pub fn fetch_add(&self, v: u64) -> u64 { self.0.fetch_add(v, atomic::Ordering::Relaxed) } } @@ -21,14 +21,14 @@ mod implementation { mod implementation { use parking_lot::{const_mutex, Mutex}; - pub(crate) struct AtomicU64(Mutex); + pub struct AtomicU64(Mutex); impl AtomicU64 { - pub(crate) const fn new(initial: u64) -> Self { + pub const fn new(initial: u64) -> Self { Self(const_mutex(initial)) } - pub(crate) fn fetch_add(&self, v: u64) -> u64 { + pub fn fetch_add(&self, v: u64) -> u64 { let mut lock = self.0.lock(); let i = *lock; *lock = i + v; diff --git a/sdk/benches/short_vec.rs b/sdk/benches/short_vec.rs index 0a73a7e0eb5ddb..648ae811257cb2 100644 --- a/sdk/benches/short_vec.rs +++ b/sdk/benches/short_vec.rs @@ -1,7 +1,7 @@ #![feature(test)] extern crate test; -use {bincode::deserialize, solana_sdk::short_vec::ShortVec, test::Bencher}; +use {bincode::deserialize, solana_short_vec::ShortVec, test::Bencher}; // Return a ShortVec with 127 bytes fn create_encoded_short_vec() -> Vec { diff --git a/sdk/bpf/.gitignore b/sdk/bpf/.gitignore deleted file mode 100644 index e9ab3169be5448..00000000000000 --- a/sdk/bpf/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -/dependencies/criterion* -/dependencies/hashbrown* -/dependencies/llvm-native* -/dependencies/rust-bpf-sysroot* -/dependencies/bpf-tools* -/dependencies/sbf-tools* -/dependencies/xargo* -/dependencies/bin* -/dependencies/.crates.toml -/dependencies/.crates2.json -/syscalls.txt diff --git a/sdk/bpf/c/README.md b/sdk/bpf/c/README.md deleted file mode 100644 index f2c3a1022b0125..00000000000000 --- a/sdk/bpf/c/README.md +++ /dev/null @@ -1,42 +0,0 @@ -## Development - -### Quick start -To get started create a `makefile` containing: -```make -include path/to/bpf.mk -``` -and `src/program.c` containing: -```c -#include - -extern uint64_t entrypoint(const uint8_t *input) { - SolAccountInfo ka[1]; - SolParameters params = (SolParameters) { .ka = ka }; - - if (!sol_deserialize(input, ¶ms, SOL_ARRAY_SIZE(ka))) { - return ERROR_INVALID_ARGUMENT; - } - return SUCCESS; -} -``` - -Then run `make` to build `out/program.o`. -Run `make help` for more details. - -### Unit tests -Built-in support for unit testing is provided by the -[Criterion](https://criterion.readthedocs.io/en/master/index.html) test framework. -To get started create the file `test/example.c` containing: -```c -#include -#include "../src/program.c" - -Test(test_suite_name, test_case_name) { - cr_assert(true); -} -``` -Then run `make test`. - -### Limitations -* Programs must be fully contained within a single .c file -* No libc is available but `solana_sdk.h` provides a minimal set of primitives diff --git a/sdk/bpf/c/bpf.ld b/sdk/bpf/c/bpf.ld deleted file mode 100644 index 262fd549324e8e..00000000000000 --- a/sdk/bpf/c/bpf.ld +++ /dev/null @@ -1,24 +0,0 @@ -PHDRS -{ - text PT_LOAD ; - rodata PT_LOAD ; - data PT_LOAD ; - dynamic PT_DYNAMIC ; -} - -SECTIONS -{ - . = SIZEOF_HEADERS; - .text : { *(.text*) } :text - .rodata : { *(.rodata*) } :rodata - .data.rel.ro : { *(.data.rel.ro*) } :rodata - .dynamic : { *(.dynamic) } :dynamic - .dynsym : { *(.dynsym) } :data - .dynstr : { *(.dynstr) } :data - .rel.dyn : { *(.rel.dyn) } :data - /DISCARD/ : { - *(.eh_frame*) - *(.gnu.hash*) - *(.hash*) - } -} diff --git a/sdk/bpf/c/bpf.mk b/sdk/bpf/c/bpf.mk deleted file mode 100644 index 7f52f7b3080e30..00000000000000 --- a/sdk/bpf/c/bpf.mk +++ /dev/null @@ -1,313 +0,0 @@ -LOCAL_PATH := $(dir $(lastword $(MAKEFILE_LIST))) -INSTALL_SH := $(abspath $(LOCAL_PATH)/../scripts/install.sh) - -all: -.PHONY: help all clean - -ifneq ($(V),1) -_@ :=@ -endif - -INC_DIRS ?= -SRC_DIR ?= ./src -TEST_PREFIX ?= test_ -OUT_DIR ?= ./out -OS := $(shell uname) - -LLVM_DIR = $(LOCAL_PATH)../dependencies/bpf-tools/llvm -LLVM_SYSTEM_INC_DIRS := $(LLVM_DIR)/lib/clang/15.0.4/include -COMPILER_RT_DIR = $(LOCAL_PATH)../dependencies/bpf-tools/rust/lib/rustlib/bpfel-unknown-unknown/lib -STD_INC_DIRS := $(LLVM_DIR)/include -STD_LIB_DIRS := $(LLVM_DIR)/lib - -ifdef LLVM_DIR -CC := $(LLVM_DIR)/bin/clang -CXX := $(LLVM_DIR)/bin/clang++ -LLD := $(LLVM_DIR)/bin/ld.lld -OBJ_DUMP := $(LLVM_DIR)/bin/llvm-objdump -READ_ELF := $(LLVM_DIR)/bin/llvm-readelf -endif - -SYSTEM_INC_DIRS := \ - $(LOCAL_PATH)inc \ - $(LLVM_SYSTEM_INC_DIRS) \ - -C_FLAGS := \ - -Werror \ - -O2 \ - -fno-builtin \ - -std=c17 \ - $(addprefix -isystem,$(SYSTEM_INC_DIRS)) \ - $(addprefix -I,$(STD_INC_DIRS)) \ - $(addprefix -I,$(INC_DIRS)) \ - -ifeq ($(SOL_SBFV2),1) -C_FLAGS := \ - $(C_FLAGS) \ - -DSOL_SBFV2=1 -endif - -CXX_FLAGS := \ - $(C_FLAGS) \ - -std=c++17 \ - -BPF_C_FLAGS := \ - $(C_FLAGS) \ - -target bpf \ - -fPIC \ - -march=bpfel+solana - -BPF_CXX_FLAGS := \ - $(CXX_FLAGS) \ - -target bpf \ - -fPIC \ - -fomit-frame-pointer \ - -fno-exceptions \ - -fno-asynchronous-unwind-tables \ - -fno-unwind-tables \ - -march=bpfel+solana - -BPF_LLD_FLAGS := \ - -z notext \ - -shared \ - --Bdynamic \ - $(LOCAL_PATH)bpf.ld \ - --entry entrypoint \ - -L $(STD_LIB_DIRS) \ - -lc \ - -ifeq ($(SOL_SBFV2),1) -BPF_LLD_FLAGS := \ - $(BPF_LLD_FLAGS) \ - --pack-dyn-relocs=relr -endif - -OBJ_DUMP_FLAGS := \ - --source \ - --disassemble \ - -READ_ELF_FLAGS := \ - --all \ - -TESTFRAMEWORK_RPATH := $(abspath $(LOCAL_PATH)../dependencies/criterion/lib) -TESTFRAMEWORK_FLAGS := \ - -DSOL_TEST \ - -isystem $(LOCAL_PATH)../dependencies/criterion/include \ - -L $(LOCAL_PATH)../dependencies/criterion/lib \ - -rpath $(TESTFRAMEWORK_RPATH) \ - -lcriterion \ - -MACOS_ADJUST_TEST_DYLIB := \ -$(if $(filter $(OS),Darwin),\ - $(_@)install_name_tool -change libcriterion.3.dylib $(TESTFRAMEWORK_RPATH)/libcriterion.3.dylib, \ - : \ -) - -TEST_C_FLAGS := \ - $(C_FLAGS) \ - $(TESTFRAMEWORK_FLAGS) \ - -TEST_CXX_FLAGS := \ - $(CXX_FLAGS) \ - $(TESTFRAMEWORK_FLAGS) \ - -help: - @echo '' - @echo 'BPF Program makefile' - @echo '' - @echo 'This makefile will build BPF Programs from C or C++ source files into ELFs' - @echo '' - @echo 'Assumptions:' - @echo ' - Programs are located in the source directory: $(SRC_DIR)/' - @echo ' - Programs are named by their directory name (eg. directory name:src/foo/ -> program name:foo)' - @echo ' - Tests are located in their corresponding program directory and must being with "test_"' - @echo ' - Output files will be placed in the directory: $(OUT_DIR)' - @echo '' - @echo 'User settings' - @echo ' - The following setting are overridable on the command line, default values shown:' - @echo ' - Show commands while building: V=1' - @echo ' V=$(V)' - @echo ' - List of include directories:' - @echo ' INC_DIRS=$(INC_DIRS)' - @echo ' - List of system include directories:' - @echo ' SYSTEM_INC_DIRS=$(SYSTEM_INC_DIRS)' - @echo ' - List of standard library include directories:' - @echo ' STD_INC_DIRS=$(STD_INC_DIRS)' - @echo ' - List of standard library archive directories:' - @echo ' STD_LIB_DIRS=$(STD_LIB_DIRS)' - @echo ' - Location of source directories:' - @echo ' SRC_DIR=$(SRC_DIR)' - @echo ' - Location to place output files:' - @echo ' OUT_DIR=$(OUT_DIR)' - @echo ' - Location of LLVM:' - @echo ' LLVM_DIR=$(LLVM_DIR)' - @echo '' - @echo 'Usage:' - @echo ' - make help - This help message' - @echo ' - make all - Build all the programs and tests, run the tests' - @echo ' - make programs - Build all the programs' - @echo ' - make tests - Build and run all tests' - @echo ' - make dump_ - Dump the contents of the program to stdout' - @echo ' - make readelf_ - Display information about the ELF binary' - @echo ' - make - Build a single program by name' - @echo ' - make - Build and run a single test by name' - @echo '' - @echo 'Available programs:' - $(foreach name, $(PROGRAM_NAMES), @echo ' - $(name)'$(\n)) - @echo '' - @echo 'Available tests:' - $(foreach name, $(TEST_NAMES), @echo ' - $(name)'$(\n)) - @echo '' - @echo 'Example:' - @echo ' - Assuming a program named foo (src/foo/foo.c)' - @echo ' - make foo' - @echo ' - make dump_foo' - @echo '' - -define C_RULE -$1: $2 - @echo "[cc] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CC) $(BPF_C_FLAGS) -o $1 -c $2 -endef - -define CC_RULE -$1: $2 - @echo "[cxx] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CXX) $(BPF_CXX_FLAGS) -o $1 -c $2 -endef - -define D_RULE -$1: $2 $(LOCAL_PATH)/bpf.mk - @echo "[GEN] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CC) -M -MT '$(basename $1).o' $(BPF_C_FLAGS) $2 | sed 's,\($(basename $1)\)\.o[ :]*,\1.o $1 : ,g' > $1 -endef - -define DXX_RULE -$1: $2 $(LOCAL_PATH)/bpf.mk - @echo "[GEN] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CXX) -M -MT '$(basename $1).o' $(BPF_CXX_FLAGS) $2 | sed 's,\($(basename $1)\)\.o[ :]*,\1.o $1 : ,g' > $1 -endef - -define O_RULE -$1: $2 - @echo "[llc] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(LLC) $(BPF_LLC_FLAGS) -o $1 $2 -endef - -define SO_RULE -$1: $2 - @echo "[lld] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(LLD) $(BPF_LLD_FLAGS) -o $1 $2 $(COMPILER_RT_DIR)/libcompiler_builtins-*.rlib -ifeq (,$(wildcard $(subst .so,-keypair.json,$1))) - $(_@)solana-keygen new --no-passphrase --silent -o $(subst .so,-keypair.json,$1) -endif - @echo To deploy this program: - @echo $$$$ solana program deploy $(abspath $1) -endef - -define TEST_C_RULE -$1: $2 - @echo "[test cc] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CC) $(TEST_C_FLAGS) -o $1 $2 - $(_@)$(MACOS_ADJUST_TEST_DYLIB) $1 -endef - -define TEST_CC_RULE -$1: $2 - @echo "[test cxx] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CXX) $(TEST_CXX_FLAGS) -o $1 $2 - $(_@)$(MACOS_ADJUST_TEST_DYLIB) $1 -endef - -define TEST_D_RULE -$1: $2 $(LOCAL_PATH)/bpf.mk - @echo "[GEN] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CC) -M -MT '$(basename $1)' $(TEST_C_FLAGS) $2 | sed 's,\($(basename $1)\)[ :]*,\1 $1 : ,g' > $1 -endef - -define TEST_DXX_RULE -$1: $2 $(LOCAL_PATH)/bpf.mk - @echo "[GEN] $1 ($2)" - $(_@)mkdir -p $(dir $1) - $(_@)$(CXX) -M -MT '$(basename $1)' $(TEST_CXX_FLAGS) $2 | sed 's,\($(basename $1)\)[ :]*,\1 $1 : ,g' > $1 -endef - -define TEST_EXEC_RULE -$1: $2 - LD_LIBRARY_PATH=$(TESTFRAMEWORK_RPATH) \ - $2$(\n) -endef - -.PHONY: $(INSTALL_SH) -$(INSTALL_SH): - $(_@)$(INSTALL_SH) - -PROGRAM_NAMES := $(notdir $(basename $(wildcard $(SRC_DIR)/*))) - -define \n - - -endef - -all: programs tests - -$(foreach PROGRAM, $(PROGRAM_NAMES), \ - $(eval -include $(wildcard $(OUT_DIR)/$(PROGRAM)/*.d)) \ - \ - $(eval $(PROGRAM): %: $(addprefix $(OUT_DIR)/, %.so)) \ - $(eval $(PROGRAM)_SRCS := \ - $(addprefix $(SRC_DIR)/$(PROGRAM)/, \ - $(filter-out $(TEST_PREFIX)%,$(notdir $(wildcard $(SRC_DIR)/$(PROGRAM)/*.c $(SRC_DIR)/$(PROGRAM)/*.cc))))) \ - $(eval $(PROGRAM)_OBJS := $(subst $(SRC_DIR), $(OUT_DIR), \ - $(patsubst %.c,%.o, \ - $(patsubst %.cc,%.o,$($(PROGRAM)_SRCS))))) \ - $(eval $($(PROGRAM)_SRCS): $(INSTALL_SH)) \ - $(eval $(call SO_RULE,$(OUT_DIR)/$(PROGRAM).so,$($(PROGRAM)_OBJS))) \ - $(foreach _,$(filter %.c,$($(PROGRAM)_SRCS)), \ - $(eval $(call D_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%.d)),$_)) \ - $(eval $(call C_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%.o)),$_))) \ - $(foreach _,$(filter %.cc,$($(PROGRAM)_SRCS)), \ - $(eval $(call DXX_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%.d)),$_)) \ - $(eval $(call CC_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%.o)),$_))) \ - \ - $(eval TESTS := $(notdir $(basename $(wildcard $(SRC_DIR)/$(PROGRAM)/$(TEST_PREFIX)*.c)))) \ - $(eval $(TESTS) : %: $(addprefix $(OUT_DIR)/$(PROGRAM)/, %)) \ - $(eval TEST_NAMES := $(TEST_NAMES) $(TESTS)) \ - $(foreach TEST, $(TESTS), \ - $(eval $(TEST)_SRCS := \ - $(addprefix $(SRC_DIR)/$(PROGRAM)/, \ - $(notdir $(wildcard $(SRC_DIR)/$(PROGRAM)/$(TEST).c $(SRC_DIR)/$(PROGRAM)/$(TEST).cc)))) \ - $(eval $($(TEST)_SRCS): $(INSTALL_SH)) \ - $(foreach _,$(filter %.c,$($(TEST)_SRCS)), \ - $(eval $(call TEST_D_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%.d)),$_)) \ - $(eval $(call TEST_C_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.c=%)),$_))) \ - $(foreach _,$(filter %.cc, $($(TEST)_SRCS)), \ - $(eval $(call TEST_DXX_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%.d)),$_)) \ - $(eval $(call TEST_CC_RULE,$(subst $(SRC_DIR),$(OUT_DIR),$(_:%.cc=%)),$_))) \ - $(eval $(call TEST_EXEC_RULE,$(TEST),$(addprefix $(OUT_DIR)/$(PROGRAM)/, $(TEST)))) \ - ) \ -) - -.PHONY: $(PROGRAM_NAMES) -programs: $(PROGRAM_NAMES) - -.PHONY: $(TEST_NAMES) -tests: $(TEST_NAMES) - -dump_%: % - $(_@)$(OBJ_DUMP) $(OBJ_DUMP_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .so, $<)) - -readelf_%: % - $(_@)$(READ_ELF) $(READ_ELF_FLAGS) $(addprefix $(OUT_DIR)/, $(addsuffix .so, $<)) - -clean: - rm -rf $(OUT_DIR) diff --git a/sdk/bpf/c/inc/deserialize_deprecated.h b/sdk/bpf/c/inc/deserialize_deprecated.h deleted file mode 100644 index db9c6de8aa402c..00000000000000 --- a/sdk/bpf/c/inc/deserialize_deprecated.h +++ /dev/null @@ -1 +0,0 @@ -#include \ No newline at end of file diff --git a/sdk/bpf/c/inc/sol/alt_bn128.h b/sdk/bpf/c/inc/sol/alt_bn128.h deleted file mode 100644 index 1d49b454c1fa9e..00000000000000 --- a/sdk/bpf/c/inc/sol/alt_bn128.h +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once -/** - * @brief Solana bn128 elliptic curve addition, multiplication, and pairing -**/ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Addition on elliptic curves alt_bn128 - * - * @param group_op ... - * @param input ... - * @param input_size ... - * @param result 64 byte array to hold the result. ... - * @return 0 if executed successfully - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/sbf/c/inc/sol/inc/alt_bn128.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_alt_bn128( - const uint64_t *group_op, - const uint8_t *input, - const uint64_t input_size, - uint8_t *result -); -#else -typedef uint64_t(*sol_alt_bn128_pointer_type)( - const uint64_t *group_op, - const uint8_t *input, - const uint64_t input_size, - uint8_t *result -); -static uint64_t sol_alt_bn128( - const uint64_t *group_op arg1, - const uint8_t *input arg2, - const uint64_t input_size arg3, - uint8_t *result - arg4) { - sol_alt_bn128_pointer_type sol_alt_bn128_pointer = (sol_alt_bn128_pointer_type) 2551807235; - return sol_alt_bn128_pointer(arg1, arg2, arg3, arg4); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/assert.h b/sdk/bpf/c/inc/sol/assert.h deleted file mode 100644 index 77217d1025989b..00000000000000 --- a/sdk/bpf/c/inc/sol/assert.h +++ /dev/null @@ -1,56 +0,0 @@ -#pragma once -/** - * @brief Solana assert and panic utilities - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Panics - * - * Prints the line number where the panic occurred and then causes - * the BPF VM to immediately halt execution. No accounts' data are updated - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/assert.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -void sol_panic_(const char *, uint64_t, uint64_t, uint64_t); -#else -typedef void(*sol_panic__pointer_type)(const char *, uint64_t, uint64_t, uint64_t); -static void sol_panic_(const char * arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) { - sol_panic__pointer_type sol_panic__pointer = (sol_panic__pointer_type) 1751159739; - sol_panic__pointer(arg1, arg2, arg3, arg4); -} -#endif -#define sol_panic() sol_panic_(__FILE__, sizeof(__FILE__), __LINE__, 0) - -/** - * Asserts - */ -#define sol_assert(expr) \ -if (!(expr)) { \ - sol_panic(); \ -} - -#ifdef SOL_TEST -/** - * Stub functions when building tests - */ -#include -#include - -void sol_panic_(const char *file, uint64_t len, uint64_t line, uint64_t column) { - printf("Panic in %s at %d:%d\n", file, line, column); - abort(); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/big_mod_exp.h b/sdk/bpf/c/inc/sol/big_mod_exp.h deleted file mode 100644 index b90c66a1930b80..00000000000000 --- a/sdk/bpf/c/inc/sol/big_mod_exp.h +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once -/** - * @brief Solana big_mod_exp system call -**/ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Big integer modular exponentiation - * - * @param bytes Pointer to BigModExpParam struct - * @param result 32 byte array to hold the result - * @return 0 if executed successfully - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/big_mod_exp.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_big_mod_exp(const uint8_t *, uint8_t *); -#else -typedef uint64_t(*sol_big_mod_exp_pointer_type)(const uint8_t *, uint8_t *); -static uint64_t sol_big_mod_exp(const uint8_t * arg1, uint8_t * arg2) { - sol_big_mod_exp_pointer_type sol_big_mod_exp_pointer = (sol_big_mod_exp_pointer_type) 2014202901; - return sol_big_mod_exp_pointer(arg1, arg2); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/blake3.h b/sdk/bpf/c/inc/sol/blake3.h deleted file mode 100644 index 04c12989e98400..00000000000000 --- a/sdk/bpf/c/inc/sol/blake3.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once -/** - * @brief Solana Blake3 system call - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Length of a Blake3 hash result - */ -#define BLAKE3_RESULT_LENGTH 32 - -/** - * Blake3 - * - * @param bytes Array of byte arrays - * @param bytes_len Number of byte arrays - * @param result 32 byte array to hold the result - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/blake3.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_blake3(const SolBytes *, int, const uint8_t *); -#else -typedef uint64_t(*sol_blake3_pointer_type)(const SolBytes *, int, const uint8_t *); -static uint64_t sol_blake3(const SolBytes * arg1, int arg2, const uint8_t * arg3) { - sol_blake3_pointer_type sol_blake3_pointer = (sol_blake3_pointer_type) 390877474; - return sol_blake3_pointer(arg1, arg2, arg3); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/constants.h b/sdk/bpf/c/inc/sol/constants.h deleted file mode 100644 index dcb7785bd1344c..00000000000000 --- a/sdk/bpf/c/inc/sol/constants.h +++ /dev/null @@ -1,18 +0,0 @@ -#pragma once -/** - * @brief Solana constants - */ - -/** - * The Solana runtime provides a memory region that is available to programs at - * a fixed virtual address and length. The builtin functions `sol_calloc` and - * `sol_free` call into the Solana runtime to allocate from this memory region - * for heap operations. Because the memory region is directly available to - * programs another option is a program can implement their own heap directly on - * top of that region. If a program chooses to implement their own heap they - * should not call the builtin heap functions because they will conflict. - * `HEAP_START_ADDRESS` and `HEAP_LENGTH` specify the memory region's start - * virtual address and length. - */ -#define HEAP_START_ADDRESS (uint64_t)0x300000000 -#define HEAP_LENGTH (uint64_t)(32 * 1024) diff --git a/sdk/bpf/c/inc/sol/cpi.h b/sdk/bpf/c/inc/sol/cpi.h deleted file mode 100644 index b3748cff2240f9..00000000000000 --- a/sdk/bpf/c/inc/sol/cpi.h +++ /dev/null @@ -1,138 +0,0 @@ -#pragma once -/** - * @brief Solana Cross-Program Invocation - */ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Maximum CPI instruction data size. 10 KiB was chosen to ensure that CPI - * instructions are not more limited than transaction instructions if the size - * of transactions is doubled in the future. - */ -static const uint64_t MAX_CPI_INSTRUCTION_DATA_LEN = 10240; - -/** - * Maximum CPI instruction accounts. 255 was chosen to ensure that instruction - * accounts are always within the maximum instruction account limit for BPF - * program instructions. - */ -static const uint8_t MAX_CPI_INSTRUCTION_ACCOUNTS = 255; - -/** - * Maximum number of account info structs that can be used in a single CPI - * invocation. A limit on account info structs is effectively the same as - * limiting the number of unique accounts. 128 was chosen to match the max - * number of locked accounts per transaction (MAX_TX_ACCOUNT_LOCKS). - */ -static const uint16_t MAX_CPI_ACCOUNT_INFOS = 128; - -/** - * Account Meta - */ -typedef struct { - SolPubkey *pubkey; /** An account's public key */ - bool is_writable; /** True if the `pubkey` can be loaded as a read-write account */ - bool is_signer; /** True if an Instruction requires a Transaction signature matching `pubkey` */ -} SolAccountMeta; - -/** - * Instruction - */ -typedef struct { - SolPubkey *program_id; /** Pubkey of the instruction processor that executes this instruction */ - SolAccountMeta *accounts; /** Metadata for what accounts should be passed to the instruction processor */ - uint64_t account_len; /** Number of SolAccountMetas */ - uint8_t *data; /** Opaque data passed to the instruction processor */ - uint64_t data_len; /** Length of the data in bytes */ -} SolInstruction; - -/** - * Internal cross-program invocation function - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/cpi.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_invoke_signed_c( - const SolInstruction *, - const SolAccountInfo *, - int, - const SolSignerSeeds *, - int -); -#else -typedef uint64_t(*sol_invoke_signed_c_pointer_type)( - const SolInstruction *, - const SolAccountInfo *, - int, - const SolSignerSeeds *, - int -); -static uint64_t sol_invoke_signed_c( - const SolInstruction * arg1, - const SolAccountInfo * arg2, - int arg3, - const SolSignerSeeds * arg4, - int - arg5) { - sol_invoke_signed_c_pointer_type sol_invoke_signed_c_pointer = (sol_invoke_signed_c_pointer_type) 2720767109; - return sol_invoke_signed_c_pointer(arg1, arg2, arg3, arg4, arg5); -} -#endif - -/** - * Invoke another program and sign for some of the keys - * - * @param instruction Instruction to process - * @param account_infos Accounts used by instruction - * @param account_infos_len Length of account_infos array - * @param seeds Seed bytes used to sign program accounts - * @param seeds_len Length of the seeds array - */ -static uint64_t sol_invoke_signed( - const SolInstruction *instruction, - const SolAccountInfo *account_infos, - int account_infos_len, - const SolSignerSeeds *signers_seeds, - int signers_seeds_len -) { - return sol_invoke_signed_c( - instruction, - account_infos, - account_infos_len, - signers_seeds, - signers_seeds_len - ); -} -/** - * Invoke another program - * - * @param instruction Instruction to process - * @param account_infos Accounts used by instruction - * @param account_infos_len Length of account_infos array -*/ -static uint64_t sol_invoke( - const SolInstruction *instruction, - const SolAccountInfo *account_infos, - int account_infos_len -) { - const SolSignerSeeds signers_seeds[] = {{}}; - return sol_invoke_signed( - instruction, - account_infos, - account_infos_len, - signers_seeds, - 0 - ); -} - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/deserialize.h b/sdk/bpf/c/inc/sol/deserialize.h deleted file mode 100644 index e5060549581dbe..00000000000000 --- a/sdk/bpf/c/inc/sol/deserialize.h +++ /dev/null @@ -1,137 +0,0 @@ -#pragma once -/** - * @brief Solana BPF loader deserializer to be used when deploying - * a program with `BPFLoader2111111111111111111111111111111111` or - * `BPFLoaderUpgradeab1e11111111111111111111111` - */ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Maximum number of bytes a program may add to an account during a single realloc - */ -#define MAX_PERMITTED_DATA_INCREASE (1024 * 10) - -/** - * De-serializes the input parameters into usable types - * - * Use this function to deserialize the buffer passed to the program entrypoint - * into usable types. This function does not perform copy deserialization, - * instead it populates the pointers and lengths in SolAccountInfo and data so - * that any modification to lamports or account data take place on the original - * buffer. Doing so also eliminates the need to serialize back into the buffer - * at the end of the program. - * - * @param input Source buffer containing serialized input parameters - * @param params Pointer to a SolParameters structure - * @return Boolean true if successful. - */ -static bool sol_deserialize( - const uint8_t *input, - SolParameters *params, - uint64_t ka_num -) { - if (NULL == input || NULL == params) { - return false; - } - params->ka_num = *(uint64_t *) input; - input += sizeof(uint64_t); - - for (int i = 0; i < params->ka_num; i++) { - uint8_t dup_info = input[0]; - input += sizeof(uint8_t); - - if (i >= ka_num) { - if (dup_info == UINT8_MAX) { - input += sizeof(uint8_t); - input += sizeof(uint8_t); - input += sizeof(uint8_t); - input += 4; // padding - input += sizeof(SolPubkey); - input += sizeof(SolPubkey); - input += sizeof(uint64_t); - uint64_t data_len = *(uint64_t *) input; - input += sizeof(uint64_t); - input += data_len; - input += MAX_PERMITTED_DATA_INCREASE; - input = (uint8_t*)(((uint64_t)input + 8 - 1) & ~(8 - 1)); // padding - input += sizeof(uint64_t); - } else { - input += 7; // padding - } - continue; - } - if (dup_info == UINT8_MAX) { - // is signer? - params->ka[i].is_signer = *(uint8_t *) input != 0; - input += sizeof(uint8_t); - - // is writable? - params->ka[i].is_writable = *(uint8_t *) input != 0; - input += sizeof(uint8_t); - - // executable? - params->ka[i].executable = *(uint8_t *) input; - input += sizeof(uint8_t); - - input += 4; // padding - - // key - params->ka[i].key = (SolPubkey *) input; - input += sizeof(SolPubkey); - - // owner - params->ka[i].owner = (SolPubkey *) input; - input += sizeof(SolPubkey); - - // lamports - params->ka[i].lamports = (uint64_t *) input; - input += sizeof(uint64_t); - - // account data - params->ka[i].data_len = *(uint64_t *) input; - input += sizeof(uint64_t); - params->ka[i].data = (uint8_t *) input; - input += params->ka[i].data_len; - input += MAX_PERMITTED_DATA_INCREASE; - input = (uint8_t*)(((uint64_t)input + 8 - 1) & ~(8 - 1)); // padding - - // rent epoch - params->ka[i].rent_epoch = *(uint64_t *) input; - input += sizeof(uint64_t); - } else { - params->ka[i].is_signer = params->ka[dup_info].is_signer; - params->ka[i].is_writable = params->ka[dup_info].is_writable; - params->ka[i].executable = params->ka[dup_info].executable; - params->ka[i].key = params->ka[dup_info].key; - params->ka[i].owner = params->ka[dup_info].owner; - params->ka[i].lamports = params->ka[dup_info].lamports; - params->ka[i].data_len = params->ka[dup_info].data_len; - params->ka[i].data = params->ka[dup_info].data; - params->ka[i].rent_epoch = params->ka[dup_info].rent_epoch; - input += 7; // padding - } - } - - params->data_len = *(uint64_t *) input; - input += sizeof(uint64_t); - params->data = input; - input += params->data_len; - - params->program_id = (SolPubkey *) input; - input += sizeof(SolPubkey); - - return true; -} - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/deserialize_deprecated.h b/sdk/bpf/c/inc/sol/deserialize_deprecated.h deleted file mode 100644 index 4a38eaed678530..00000000000000 --- a/sdk/bpf/c/inc/sol/deserialize_deprecated.h +++ /dev/null @@ -1,119 +0,0 @@ -#pragma once -/** - * @brief Solana deprecated BPF loader deserializer to be used when deploying - * a program with `BPFLoader1111111111111111111111111111111111` - */ - - #include - #include - #include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * De-serializes the input parameters into usable types - * - * Use this function to deserialize the buffer passed to the program entrypoint - * into usable types. This function does not perform copy deserialization, - * instead it populates the pointers and lengths in SolAccountInfo and data so - * that any modification to lamports or account data take place on the original - * buffer. Doing so also eliminates the need to serialize back into the buffer - * at the end of the program. - * - * @param input Source buffer containing serialized input parameters - * @param params Pointer to a SolParameters structure - * @return Boolean true if successful. - */ -static bool sol_deserialize_deprecated( - const uint8_t *input, - SolParameters *params, - uint64_t ka_num -) { - if (NULL == input || NULL == params) { - return false; - } - params->ka_num = *(uint64_t *) input; - input += sizeof(uint64_t); - - for (int i = 0; i < params->ka_num; i++) { - uint8_t dup_info = input[0]; - input += sizeof(uint8_t); - - if (i >= ka_num) { - if (dup_info == UINT8_MAX) { - input += sizeof(uint8_t); - input += sizeof(uint8_t); - input += sizeof(SolPubkey); - input += sizeof(uint64_t); - input += *(uint64_t *) input; - input += sizeof(uint64_t); - input += sizeof(SolPubkey); - input += sizeof(uint8_t); - input += sizeof(uint64_t); - } - continue; - } - if (dup_info == UINT8_MAX) { - // is signer? - params->ka[i].is_signer = *(uint8_t *) input != 0; - input += sizeof(uint8_t); - - // is writable? - params->ka[i].is_writable = *(uint8_t *) input != 0; - input += sizeof(uint8_t); - - // key - params->ka[i].key = (SolPubkey *) input; - input += sizeof(SolPubkey); - - // lamports - params->ka[i].lamports = (uint64_t *) input; - input += sizeof(uint64_t); - - // account data - params->ka[i].data_len = *(uint64_t *) input; - input += sizeof(uint64_t); - params->ka[i].data = (uint8_t *) input; - input += params->ka[i].data_len; - - // owner - params->ka[i].owner = (SolPubkey *) input; - input += sizeof(SolPubkey); - - // executable? - params->ka[i].executable = *(uint8_t *) input; - input += sizeof(uint8_t); - - // rent epoch - params->ka[i].rent_epoch = *(uint64_t *) input; - input += sizeof(uint64_t); - } else { - params->ka[i].is_signer = params->ka[dup_info].is_signer; - params->ka[i].key = params->ka[dup_info].key; - params->ka[i].lamports = params->ka[dup_info].lamports; - params->ka[i].data_len = params->ka[dup_info].data_len; - params->ka[i].data = params->ka[dup_info].data; - params->ka[i].owner = params->ka[dup_info].owner; - params->ka[i].executable = params->ka[dup_info].executable; - params->ka[i].rent_epoch = params->ka[dup_info].rent_epoch; - } - } - - params->data_len = *(uint64_t *) input; - input += sizeof(uint64_t); - params->data = input; - input += params->data_len; - - params->program_id = (SolPubkey *) input; - input += sizeof(SolPubkey); - - return true; -} - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/entrypoint.h b/sdk/bpf/c/inc/sol/entrypoint.h deleted file mode 100644 index d3ab21a953912f..00000000000000 --- a/sdk/bpf/c/inc/sol/entrypoint.h +++ /dev/null @@ -1,53 +0,0 @@ -#pragma once -/** - * @brief Solana program entrypoint - */ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Keyed Account - */ -typedef struct { - SolPubkey *key; /** Public key of the account */ - uint64_t *lamports; /** Number of lamports owned by this account */ - uint64_t data_len; /** Length of data in bytes */ - uint8_t *data; /** On-chain data within this account */ - SolPubkey *owner; /** Program that owns this account */ - uint64_t rent_epoch; /** The epoch at which this account will next owe rent */ - bool is_signer; /** Transaction was signed by this account's key? */ - bool is_writable; /** Is the account writable? */ - bool executable; /** This account's data contains a loaded program (and is now read-only) */ -} SolAccountInfo; - -/** - * Structure that the program's entrypoint input data is deserialized into. - */ -typedef struct { - SolAccountInfo* ka; /** Pointer to an array of SolAccountInfo, must already - point to an array of SolAccountInfos */ - uint64_t ka_num; /** Number of SolAccountInfo entries in `ka` */ - const uint8_t *data; /** pointer to the instruction data */ - uint64_t data_len; /** Length in bytes of the instruction data */ - const SolPubkey *program_id; /** program_id of the currently executing program */ -} SolParameters; - -/** - * Program instruction entrypoint - * - * @param input Buffer of serialized input parameters. Use sol_deserialize() to decode - * @return 0 if the instruction executed successfully - */ -uint64_t entrypoint(const uint8_t *input); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/alt_bn128.inc b/sdk/bpf/c/inc/sol/inc/alt_bn128.inc deleted file mode 100644 index 8ba4a925021cd2..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/alt_bn128.inc +++ /dev/null @@ -1,67 +0,0 @@ -#pragma once -/** - * @brief Solana bn128 elliptic curve addition, multiplication, and pairing -**/ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Output length for the add operation. - */ -#define ALT_BN128_ADDITION_OUTPUT_LEN 64 - -/** - * Output length for the add operation. - */ -#define ALT_BN128_MULTIPLICATION_OUTPUT_LEN 64 - -/** - * Output length for pairing operation. - */ -#define ALT_BN128_PAIRING_OUTPUT_LEN 32 - -/** - * Add operation. - */ -#define ALT_BN128_ADD 0 - -/** - * Subtraction operation. - */ -#define ALT_BN128_SUB 1 - -/** - * Multiplication operation. - */ -#define ALT_BN128_MUL 2 - -/** - * Pairing operation. - */ -#define ALT_BN128_PAIRING 3 - -/** - * Addition on elliptic curves alt_bn128 - * - * @param group_op ... - * @param input ... - * @param input_size ... - * @param result 64 byte array to hold the result. ... - * @return 0 if executed successfully - */ -@SYSCALL uint64_t sol_alt_bn128_group_op( - const uint64_t group_op, - const uint8_t *input, - const uint64_t input_size, - uint8_t *result -); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/assert.inc b/sdk/bpf/c/inc/sol/inc/assert.inc deleted file mode 100644 index fa523115e2f73a..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/assert.inc +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once -/** - * @brief Solana assert and panic utilities - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - - -/** - * Panics - * - * Prints the line number where the panic occurred and then causes - * the BPF VM to immediately halt execution. No accounts' data are updated - */ -@SYSCALL void sol_panic_(const char *, uint64_t, uint64_t, uint64_t); -#define sol_panic() sol_panic_(__FILE__, sizeof(__FILE__), __LINE__, 0) - -/** - * Asserts - */ -#define sol_assert(expr) \ -if (!(expr)) { \ - sol_panic(); \ -} - -#ifdef SOL_TEST -/** - * Stub functions when building tests - */ -#include -#include - -void sol_panic_(const char *file, uint64_t len, uint64_t line, uint64_t column) { - printf("Panic in %s at %d:%d\n", file, line, column); - abort(); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/big_mod_exp.inc b/sdk/bpf/c/inc/sol/inc/big_mod_exp.inc deleted file mode 100644 index ce5c6656296d3f..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/big_mod_exp.inc +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once -/** - * @brief Solana big_mod_exp system call -**/ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Big integer modular exponentiation - * - * @param bytes Pointer to BigModExpParam struct - * @param result 32 byte array to hold the result - * @return 0 if executed successfully - */ -@SYSCALL uint64_t sol_big_mod_exp(const uint8_t *, uint8_t *); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/blake3.inc b/sdk/bpf/c/inc/sol/inc/blake3.inc deleted file mode 100644 index b2cfd7b3362a46..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/blake3.inc +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once -/** - * @brief Solana Blake3 system call - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Length of a Blake3 hash result - */ -#define BLAKE3_RESULT_LENGTH 32 - -/** - * Blake3 - * - * @param bytes Array of byte arrays - * @param bytes_len Number of byte arrays - * @param result 32 byte array to hold the result - */ -@SYSCALL uint64_t sol_blake3(const SolBytes *, int, const uint8_t *); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/cpi.inc b/sdk/bpf/c/inc/sol/inc/cpi.inc deleted file mode 100644 index 41ce4fb01a691b..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/cpi.inc +++ /dev/null @@ -1,117 +0,0 @@ -#pragma once -/** - * @brief Solana Cross-Program Invocation - */ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Maximum CPI instruction data size. 10 KiB was chosen to ensure that CPI - * instructions are not more limited than transaction instructions if the size - * of transactions is doubled in the future. - */ -static const uint64_t MAX_CPI_INSTRUCTION_DATA_LEN = 10240; - -/** - * Maximum CPI instruction accounts. 255 was chosen to ensure that instruction - * accounts are always within the maximum instruction account limit for BPF - * program instructions. - */ -static const uint8_t MAX_CPI_INSTRUCTION_ACCOUNTS = 255; - -/** - * Maximum number of account info structs that can be used in a single CPI - * invocation. A limit on account info structs is effectively the same as - * limiting the number of unique accounts. 128 was chosen to match the max - * number of locked accounts per transaction (MAX_TX_ACCOUNT_LOCKS). - */ -static const uint16_t MAX_CPI_ACCOUNT_INFOS = 128; - -/** - * Account Meta - */ -typedef struct { - SolPubkey *pubkey; /** An account's public key */ - bool is_writable; /** True if the `pubkey` can be loaded as a read-write account */ - bool is_signer; /** True if an Instruction requires a Transaction signature matching `pubkey` */ -} SolAccountMeta; - -/** - * Instruction - */ -typedef struct { - SolPubkey *program_id; /** Pubkey of the instruction processor that executes this instruction */ - SolAccountMeta *accounts; /** Metadata for what accounts should be passed to the instruction processor */ - uint64_t account_len; /** Number of SolAccountMetas */ - uint8_t *data; /** Opaque data passed to the instruction processor */ - uint64_t data_len; /** Length of the data in bytes */ -} SolInstruction; - -/** - * Internal cross-program invocation function - */ -@SYSCALL uint64_t sol_invoke_signed_c( - const SolInstruction *, - const SolAccountInfo *, - int, - const SolSignerSeeds *, - int -); - -/** - * Invoke another program and sign for some of the keys - * - * @param instruction Instruction to process - * @param account_infos Accounts used by instruction - * @param account_infos_len Length of account_infos array - * @param seeds Seed bytes used to sign program accounts - * @param seeds_len Length of the seeds array - */ -static uint64_t sol_invoke_signed( - const SolInstruction *instruction, - const SolAccountInfo *account_infos, - int account_infos_len, - const SolSignerSeeds *signers_seeds, - int signers_seeds_len -) { - return sol_invoke_signed_c( - instruction, - account_infos, - account_infos_len, - signers_seeds, - signers_seeds_len - ); -} -/** - * Invoke another program - * - * @param instruction Instruction to process - * @param account_infos Accounts used by instruction - * @param account_infos_len Length of account_infos array -*/ -static uint64_t sol_invoke( - const SolInstruction *instruction, - const SolAccountInfo *account_infos, - int account_infos_len -) { - const SolSignerSeeds signers_seeds[] = {{}}; - return sol_invoke_signed( - instruction, - account_infos, - account_infos_len, - signers_seeds, - 0 - ); -} - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/keccak.inc b/sdk/bpf/c/inc/sol/inc/keccak.inc deleted file mode 100644 index fc882372c1112d..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/keccak.inc +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once -/** - * @brief Solana keccak system call -**/ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Length of a Keccak hash result - */ -#define KECCAK_RESULT_LENGTH 32 - -/** - * Keccak - * - * @param bytes Array of byte arrays - * @param bytes_len Number of byte arrays - * @param result 32 byte array to hold the result - */ -@SYSCALL uint64_t sol_keccak256(const SolBytes *, int, uint8_t *); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/log.inc b/sdk/bpf/c/inc/sol/inc/log.inc deleted file mode 100644 index c6f7e05da19d61..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/log.inc +++ /dev/null @@ -1,103 +0,0 @@ -#pragma once -/** - * @brief Solana logging utilities - */ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Prints a string to stdout - */ -@SYSCALL void sol_log_(const char *, uint64_t); -#define sol_log(message) sol_log_(message, sol_strlen(message)) - -/** - * Prints a 64 bit values represented in hexadecimal to stdout - */ -@SYSCALL void sol_log_64_(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); -#define sol_log_64 sol_log_64_ - -/** - * Prints the current compute unit consumption to stdout - */ -@SYSCALL void sol_log_compute_units_(); -#define sol_log_compute_units() sol_log_compute_units_() - -/** - * Prints the hexadecimal representation of an array - * - * @param array The array to print - */ -static void sol_log_array(const uint8_t *array, int len) { - for (int j = 0; j < len; j++) { - sol_log_64(0, 0, 0, j, array[j]); - } -} - -/** - * Print the base64 representation of some arrays. - */ -@SYSCALL void sol_log_data(SolBytes *, uint64_t); - -/** - * Prints the program's input parameters - * - * @param params Pointer to a SolParameters structure - */ -static void sol_log_params(const SolParameters *params) { - sol_log("- Program identifier:"); - sol_log_pubkey(params->program_id); - - sol_log("- Number of KeyedAccounts"); - sol_log_64(0, 0, 0, 0, params->ka_num); - for (int i = 0; i < params->ka_num; i++) { - sol_log(" - Is signer"); - sol_log_64(0, 0, 0, 0, params->ka[i].is_signer); - sol_log(" - Is writable"); - sol_log_64(0, 0, 0, 0, params->ka[i].is_writable); - sol_log(" - Key"); - sol_log_pubkey(params->ka[i].key); - sol_log(" - Lamports"); - sol_log_64(0, 0, 0, 0, *params->ka[i].lamports); - sol_log(" - data"); - sol_log_array(params->ka[i].data, params->ka[i].data_len); - sol_log(" - Owner"); - sol_log_pubkey(params->ka[i].owner); - sol_log(" - Executable"); - sol_log_64(0, 0, 0, 0, params->ka[i].executable); - sol_log(" - Rent Epoch"); - sol_log_64(0, 0, 0, 0, params->ka[i].rent_epoch); - } - sol_log("- Instruction data\0"); - sol_log_array(params->data, params->data_len); -} - -#ifdef SOL_TEST -/** - * Stub functions when building tests - */ -#include - -void sol_log_(const char *s, uint64_t len) { - printf("Program log: %s\n", s); -} -void sol_log_64(uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) { - printf("Program log: %llu, %llu, %llu, %llu, %llu\n", arg1, arg2, arg3, arg4, arg5); -} - -void sol_log_compute_units_() { - printf("Program consumption: __ units remaining\n"); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/pubkey.inc b/sdk/bpf/c/inc/sol/inc/pubkey.inc deleted file mode 100644 index 5e86e8d39675f3..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/pubkey.inc +++ /dev/null @@ -1,107 +0,0 @@ -#pragma once -/** - * @brief Solana Public key - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Size of Public key in bytes - */ -#define SIZE_PUBKEY 32 - -/** - * Public key - */ -typedef struct { - uint8_t x[SIZE_PUBKEY]; -} SolPubkey; - -/** - * Prints the hexadecimal representation of a public key - * - * @param key The public key to print - */ -@SYSCALL void sol_log_pubkey(const SolPubkey *); - -/** - * Compares two public keys - * - * @param one First public key - * @param two Second public key - * @return true if the same - */ -static bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) { - for (int i = 0; i < sizeof(*one); i++) { - if (one->x[i] != two->x[i]) { - return false; - } - } - return true; -} - -/** - * Seed used to create a program address or passed to sol_invoke_signed - */ -typedef struct { - const uint8_t *addr; /** Seed bytes */ - uint64_t len; /** Length of the seed bytes */ -} SolSignerSeed; - -/** - * Seeds used by a signer to create a program address or passed to - * sol_invoke_signed - */ -typedef struct { - const SolSignerSeed *addr; /** An array of a signer's seeds */ - uint64_t len; /** Number of seeds */ -} SolSignerSeeds; - -/** - * Create a program address - * - * @param seeds Seed bytes used to sign program accounts - * @param seeds_len Length of the seeds array - * @param program_id Program id of the signer - * @param program_address Program address created, filled on return - */ -@SYSCALL uint64_t sol_create_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *); - -/** - * Try to find a program address and return corresponding bump seed - * - * @param seeds Seed bytes used to sign program accounts - * @param seeds_len Length of the seeds array - * @param program_id Program id of the signer - * @param program_address Program address created, filled on return - * @param bump_seed Bump seed required to create a valid program address - */ -@SYSCALL uint64_t sol_try_find_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *, uint8_t *); - -#ifdef SOL_TEST -/** - * Stub functions when building tests - */ -#include - -void sol_log_pubkey( - const SolPubkey *pubkey -) { - printf("Program log: "); - for (int i = 0; i < SIZE_PUBKEY; i++) { - printf("%02 ", pubkey->x[i]); - } - printf("\n"); -} - -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/return_data.inc b/sdk/bpf/c/inc/sol/inc/return_data.inc deleted file mode 100644 index 9c571f489aeceb..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/return_data.inc +++ /dev/null @@ -1,41 +0,0 @@ -#pragma once -/** - * @brief Solana return data system calls -**/ - -#include -#include - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * Maximum size of return data - */ -#define MAX_RETURN_DATA 1024 - -/** - * Set the return data - * - * @param bytes byte array to set - * @param bytes_len length of byte array. This may not exceed MAX_RETURN_DATA. - */ -@SYSCALL void sol_set_return_data(const uint8_t *, uint64_t); - -/** - * Get the return data - * - * @param bytes byte buffer - * @param bytes_len maximum length of buffer - * @param program_id the program_id which set the return data. Only set if there was some return data (the function returns non-zero). - * @param result length of return data (may exceed bytes_len if the return data is longer) - */ -@SYSCALL uint64_t sol_get_return_data(uint8_t *, uint64_t, SolPubkey *); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/secp256k1.inc b/sdk/bpf/c/inc/sol/inc/secp256k1.inc deleted file mode 100644 index e42ac5fb94371b..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/secp256k1.inc +++ /dev/null @@ -1,41 +0,0 @@ -#pragma once -/** - * @brief Solana secp256k1 system call - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** Length of a secp256k1 recover input hash */ -#define SECP256K1_RECOVER_HASH_LENGTH 32 -/** Length of a secp256k1 input signature */ -#define SECP256K1_RECOVER_SIGNATURE_LENGTH 64 -/** Length of a secp256k1 recover result */ -#define SECP256K1_RECOVER_RESULT_LENGTH 64 - -/** The hash provided to a sol_secp256k1_recover is invalid */ -#define SECP256K1_RECOVER_ERROR_INVALID_HASH 1 -/** The recovery_id provided to a sol_secp256k1_recover is invalid */ -#define SECP256K1_RECOVER_ERROR_INVALID_RECOVERY_ID 2 -/** The signature provided to a sol_secp256k1_recover is invalid */ -#define SECP256K1_RECOVER_ERROR_INVALID_SIGNATURE 3 - -/** - * Recover public key from a signed message. - * - * @param hash Hashed message - * @param recovery_id Tag used for public key recovery from signatures. Can be 0 or 1 - * @param signature An ECDSA signature - * @param result 64 byte array to hold the result. A recovered public key - * @return 0 if executed successfully - */ -@SYSCALL uint64_t sol_secp256k1_recover(const uint8_t *, uint64_t, const uint8_t *, uint8_t *); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/inc/sha.inc b/sdk/bpf/c/inc/sol/inc/sha.inc deleted file mode 100644 index 8acc35cc687868..00000000000000 --- a/sdk/bpf/c/inc/sol/inc/sha.inc +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once -/** - * @brief Solana sha system call - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Length of a sha256 hash result - */ -#define SHA256_RESULT_LENGTH 32 - -/** - * Sha256 - * - * @param bytes Array of byte arrays - * @param bytes_len Number of byte arrays - * @param result 32 byte array to hold the result - */ -@SYSCALL uint64_t sol_sha256(const SolBytes *, int, uint8_t *); - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/keccak.h b/sdk/bpf/c/inc/sol/keccak.h deleted file mode 100644 index 213eb4cae18ba6..00000000000000 --- a/sdk/bpf/c/inc/sol/keccak.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once -/** - * @brief Solana keccak system call -**/ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Length of a Keccak hash result - */ -#define KECCAK_RESULT_LENGTH 32 - -/** - * Keccak - * - * @param bytes Array of byte arrays - * @param bytes_len Number of byte arrays - * @param result 32 byte array to hold the result - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/keccak.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_keccak256(const SolBytes *, int, uint8_t *); -#else -typedef uint64_t(*sol_keccak256_pointer_type)(const SolBytes *, int, uint8_t *); -static uint64_t sol_keccak256(const SolBytes * arg1, int arg2, uint8_t * arg3) { - sol_keccak256_pointer_type sol_keccak256_pointer = (sol_keccak256_pointer_type) 3615046331; - return sol_keccak256_pointer(arg1, arg2, arg3); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/log.h b/sdk/bpf/c/inc/sol/log.h deleted file mode 100644 index 6dd594e14b4521..00000000000000 --- a/sdk/bpf/c/inc/sol/log.h +++ /dev/null @@ -1,139 +0,0 @@ -#pragma once -/** - * @brief Solana logging utilities - */ - -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Prints a string to stdout - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -void sol_log_(const char *, uint64_t); -#else -typedef void(*sol_log__pointer_type)(const char *, uint64_t); -static void sol_log_(const char * arg1, uint64_t arg2) { - sol_log__pointer_type sol_log__pointer = (sol_log__pointer_type) 544561597; - sol_log__pointer(arg1, arg2); -} -#endif -#define sol_log(message) sol_log_(message, sol_strlen(message)) - -/** - * Prints a 64 bit values represented in hexadecimal to stdout - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -void sol_log_64_(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); -#else -typedef void(*sol_log_64__pointer_type)(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); -static void sol_log_64_(uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) { - sol_log_64__pointer_type sol_log_64__pointer = (sol_log_64__pointer_type) 1546269048; - sol_log_64__pointer(arg1, arg2, arg3, arg4, arg5); -} -#endif -#define sol_log_64 sol_log_64_ - -/** - * Prints the current compute unit consumption to stdout - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -void sol_log_compute_units_(); -#else -typedef void(*sol_log_compute_units__pointer_type)(); -static void sol_log_compute_units_() { - sol_log_compute_units__pointer_type sol_log_compute_units__pointer = (sol_log_compute_units__pointer_type) 1387942038; - sol_log_compute_units__pointer(); -} -#endif -#define sol_log_compute_units() sol_log_compute_units_() - -/** - * Prints the hexadecimal representation of an array - * - * @param array The array to print - */ -static void sol_log_array(const uint8_t *array, int len) { - for (int j = 0; j < len; j++) { - sol_log_64(0, 0, 0, j, array[j]); - } -} - -/** - * Print the base64 representation of some arrays. - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/log.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -void sol_log_data(SolBytes *, uint64_t); -#else -typedef void(*sol_log_data_pointer_type)(SolBytes *, uint64_t); -static void sol_log_data(SolBytes * arg1, uint64_t arg2) { - sol_log_data_pointer_type sol_log_data_pointer = (sol_log_data_pointer_type) 1930933300; - sol_log_data_pointer(arg1, arg2); -} -#endif - -/** - * Prints the program's input parameters - * - * @param params Pointer to a SolParameters structure - */ -static void sol_log_params(const SolParameters *params) { - sol_log("- Program identifier:"); - sol_log_pubkey(params->program_id); - - sol_log("- Number of KeyedAccounts"); - sol_log_64(0, 0, 0, 0, params->ka_num); - for (int i = 0; i < params->ka_num; i++) { - sol_log(" - Is signer"); - sol_log_64(0, 0, 0, 0, params->ka[i].is_signer); - sol_log(" - Is writable"); - sol_log_64(0, 0, 0, 0, params->ka[i].is_writable); - sol_log(" - Key"); - sol_log_pubkey(params->ka[i].key); - sol_log(" - Lamports"); - sol_log_64(0, 0, 0, 0, *params->ka[i].lamports); - sol_log(" - data"); - sol_log_array(params->ka[i].data, params->ka[i].data_len); - sol_log(" - Owner"); - sol_log_pubkey(params->ka[i].owner); - sol_log(" - Executable"); - sol_log_64(0, 0, 0, 0, params->ka[i].executable); - sol_log(" - Rent Epoch"); - sol_log_64(0, 0, 0, 0, params->ka[i].rent_epoch); - } - sol_log("- Instruction data\0"); - sol_log_array(params->data, params->data_len); -} - -#ifdef SOL_TEST -/** - * Stub functions when building tests - */ -#include - -void sol_log_(const char *s, uint64_t len) { - printf("Program log: %s\n", s); -} -void sol_log_64(uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5) { - printf("Program log: %llu, %llu, %llu, %llu, %llu\n", arg1, arg2, arg3, arg4, arg5); -} - -void sol_log_compute_units_() { - printf("Program consumption: __ units remaining\n"); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/pubkey.h b/sdk/bpf/c/inc/sol/pubkey.h deleted file mode 100644 index a05ae210d58785..00000000000000 --- a/sdk/bpf/c/inc/sol/pubkey.h +++ /dev/null @@ -1,134 +0,0 @@ -#pragma once -/** - * @brief Solana Public key - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Size of Public key in bytes - */ -#define SIZE_PUBKEY 32 - -/** - * Public key - */ -typedef struct { - uint8_t x[SIZE_PUBKEY]; -} SolPubkey; - -/** - * Prints the hexadecimal representation of a public key - * - * @param key The public key to print - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/pubkey.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -void sol_log_pubkey(const SolPubkey *); -#else -typedef void(*sol_log_pubkey_pointer_type)(const SolPubkey *); -static void sol_log_pubkey(const SolPubkey * arg1) { - sol_log_pubkey_pointer_type sol_log_pubkey_pointer = (sol_log_pubkey_pointer_type) 2129692874; - sol_log_pubkey_pointer(arg1); -} -#endif - -/** - * Compares two public keys - * - * @param one First public key - * @param two Second public key - * @return true if the same - */ -static bool SolPubkey_same(const SolPubkey *one, const SolPubkey *two) { - for (int i = 0; i < sizeof(*one); i++) { - if (one->x[i] != two->x[i]) { - return false; - } - } - return true; -} - -/** - * Seed used to create a program address or passed to sol_invoke_signed - */ -typedef struct { - const uint8_t *addr; /** Seed bytes */ - uint64_t len; /** Length of the seed bytes */ -} SolSignerSeed; - -/** - * Seeds used by a signer to create a program address or passed to - * sol_invoke_signed - */ -typedef struct { - const SolSignerSeed *addr; /** An array of a signer's seeds */ - uint64_t len; /** Number of seeds */ -} SolSignerSeeds; - -/** - * Create a program address - * - * @param seeds Seed bytes used to sign program accounts - * @param seeds_len Length of the seeds array - * @param program_id Program id of the signer - * @param program_address Program address created, filled on return - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/pubkey.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_create_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *); -#else -typedef uint64_t(*sol_create_program_address_pointer_type)(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *); -static uint64_t sol_create_program_address(const SolSignerSeed * arg1, int arg2, const SolPubkey * arg3, SolPubkey * arg4) { - sol_create_program_address_pointer_type sol_create_program_address_pointer = (sol_create_program_address_pointer_type) 2474062396; - return sol_create_program_address_pointer(arg1, arg2, arg3, arg4); -} -#endif - -/** - * Try to find a program address and return corresponding bump seed - * - * @param seeds Seed bytes used to sign program accounts - * @param seeds_len Length of the seeds array - * @param program_id Program id of the signer - * @param program_address Program address created, filled on return - * @param bump_seed Bump seed required to create a valid program address - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/pubkey.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_try_find_program_address(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *, uint8_t *); -#else -typedef uint64_t(*sol_try_find_program_address_pointer_type)(const SolSignerSeed *, int, const SolPubkey *, SolPubkey *, uint8_t *); -static uint64_t sol_try_find_program_address(const SolSignerSeed * arg1, int arg2, const SolPubkey * arg3, SolPubkey * arg4, uint8_t * arg5) { - sol_try_find_program_address_pointer_type sol_try_find_program_address_pointer = (sol_try_find_program_address_pointer_type) 1213221432; - return sol_try_find_program_address_pointer(arg1, arg2, arg3, arg4, arg5); -} -#endif - -#ifdef SOL_TEST -/** - * Stub functions when building tests - */ -#include - -void sol_log_pubkey( - const SolPubkey *pubkey -) { - printf("Program log: "); - for (int i = 0; i < SIZE_PUBKEY; i++) { - printf("%02 ", pubkey->x[i]); - } - printf("\n"); -} - -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/return_data.h b/sdk/bpf/c/inc/sol/return_data.h deleted file mode 100644 index 6afb13513a4b3d..00000000000000 --- a/sdk/bpf/c/inc/sol/return_data.h +++ /dev/null @@ -1,59 +0,0 @@ -#pragma once -/** - * @brief Solana return data system calls -**/ - -#include -#include - -#ifdef __cplusplus -extern "C" -{ -#endif - -/** - * Maximum size of return data - */ -#define MAX_RETURN_DATA 1024 - -/** - * Set the return data - * - * @param bytes byte array to set - * @param bytes_len length of byte array. This may not exceed MAX_RETURN_DATA. - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/return_data.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -void sol_set_return_data(const uint8_t *, uint64_t); -#else -typedef void(*sol_set_return_data_pointer_type)(const uint8_t *, uint64_t); -static void sol_set_return_data(const uint8_t * arg1, uint64_t arg2) { - sol_set_return_data_pointer_type sol_set_return_data_pointer = (sol_set_return_data_pointer_type) 2720453611; - sol_set_return_data_pointer(arg1, arg2); -} -#endif - -/** - * Get the return data - * - * @param bytes byte buffer - * @param bytes_len maximum length of buffer - * @param program_id the program_id which set the return data. Only set if there was some return data (the function returns non-zero). - * @param result length of return data (may exceed bytes_len if the return data is longer) - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/return_data.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_get_return_data(uint8_t *, uint64_t, SolPubkey *); -#else -typedef uint64_t(*sol_get_return_data_pointer_type)(uint8_t *, uint64_t, SolPubkey *); -static uint64_t sol_get_return_data(uint8_t * arg1, uint64_t arg2, SolPubkey * arg3) { - sol_get_return_data_pointer_type sol_get_return_data_pointer = (sol_get_return_data_pointer_type) 1562527204; - return sol_get_return_data_pointer(arg1, arg2, arg3); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/secp256k1.h b/sdk/bpf/c/inc/sol/secp256k1.h deleted file mode 100644 index f973defe0e3f78..00000000000000 --- a/sdk/bpf/c/inc/sol/secp256k1.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once -/** - * @brief Solana secp256k1 system call - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** Length of a secp256k1 recover input hash */ -#define SECP256K1_RECOVER_HASH_LENGTH 32 -/** Length of a secp256k1 input signature */ -#define SECP256K1_RECOVER_SIGNATURE_LENGTH 64 -/** Length of a secp256k1 recover result */ -#define SECP256K1_RECOVER_RESULT_LENGTH 64 - -/** The hash provided to a sol_secp256k1_recover is invalid */ -#define SECP256K1_RECOVER_ERROR_INVALID_HASH 1 -/** The recovery_id provided to a sol_secp256k1_recover is invalid */ -#define SECP256K1_RECOVER_ERROR_INVALID_RECOVERY_ID 2 -/** The signature provided to a sol_secp256k1_recover is invalid */ -#define SECP256K1_RECOVER_ERROR_INVALID_SIGNATURE 3 - -/** - * Recover public key from a signed message. - * - * @param hash Hashed message - * @param recovery_id Tag used for public key recovery from signatures. Can be 0 or 1 - * @param signature An ECDSA signature - * @param result 64 byte array to hold the result. A recovered public key - * @return 0 if executed successfully - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/secp256k1.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_secp256k1_recover(const uint8_t *, uint64_t, const uint8_t *, uint8_t *); -#else -typedef uint64_t(*sol_secp256k1_recover_pointer_type)(const uint8_t *, uint64_t, const uint8_t *, uint8_t *); -static uint64_t sol_secp256k1_recover(const uint8_t * arg1, uint64_t arg2, const uint8_t * arg3, uint8_t * arg4) { - sol_secp256k1_recover_pointer_type sol_secp256k1_recover_pointer = (sol_secp256k1_recover_pointer_type) 400819024; - return sol_secp256k1_recover_pointer(arg1, arg2, arg3, arg4); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/sha.h b/sdk/bpf/c/inc/sol/sha.h deleted file mode 100644 index ad776e8e914713..00000000000000 --- a/sdk/bpf/c/inc/sol/sha.h +++ /dev/null @@ -1,39 +0,0 @@ -#pragma once -/** - * @brief Solana sha system call - */ - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Length of a sha256 hash result - */ -#define SHA256_RESULT_LENGTH 32 - -/** - * Sha256 - * - * @param bytes Array of byte arrays - * @param bytes_len Number of byte arrays - * @param result 32 byte array to hold the result - */ -/* DO NOT MODIFY THIS GENERATED FILE. INSTEAD CHANGE sdk/bpf/c/inc/sol/inc/sha.inc AND RUN `cargo run --bin gen-headers` */ -#ifndef SOL_SBFV2 -uint64_t sol_sha256(const SolBytes *, int, uint8_t *); -#else -typedef uint64_t(*sol_sha256_pointer_type)(const SolBytes *, int, uint8_t *); -static uint64_t sol_sha256(const SolBytes * arg1, int arg2, uint8_t * arg3) { - sol_sha256_pointer_type sol_sha256_pointer = (sol_sha256_pointer_type) 301243782; - return sol_sha256_pointer(arg1, arg2, arg3); -} -#endif - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/string.h b/sdk/bpf/c/inc/sol/string.h deleted file mode 100644 index 18de272c9bcbb6..00000000000000 --- a/sdk/bpf/c/inc/sol/string.h +++ /dev/null @@ -1,116 +0,0 @@ -#pragma once -/** - * @brief Solana string and memory system calls and utilities - */ - -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Copies memory - */ -static void *sol_memcpy(void *dst, const void *src, int len) { - for (int i = 0; i < len; i++) { - *((uint8_t *)dst + i) = *((const uint8_t *)src + i); - } - return dst; -} - -/** - * Compares memory - */ -static int sol_memcmp(const void *s1, const void *s2, int n) { - for (int i = 0; i < n; i++) { - uint8_t diff = *((uint8_t *)s1 + i) - *((const uint8_t *)s2 + i); - if (diff) { - return diff; - } - } - return 0; -} - -/** - * Fill a byte string with a byte value - */ -static void *sol_memset(void *b, int c, size_t len) { - uint8_t *a = (uint8_t *) b; - while (len > 0) { - *a = c; - a++; - len--; - } - return b; -} - -/** - * Find length of string - */ -static size_t sol_strlen(const char *s) { - size_t len = 0; - while (*s) { - len++; - s++; - } - return len; -} - -/** - * Alloc zero-initialized memory - */ -static void *sol_calloc(size_t nitems, size_t size) { - // Bump allocator - uint64_t* pos_ptr = (uint64_t*)HEAP_START_ADDRESS; - - uint64_t pos = *pos_ptr; - if (pos == 0) { - /** First time, set starting position */ - pos = HEAP_START_ADDRESS + HEAP_LENGTH; - } - - uint64_t bytes = (uint64_t)(nitems * size); - if (size == 0 || - !(nitems == 0 || size == 0) && - !(nitems == bytes / size)) { - /** Overflow */ - return NULL; - } - if (pos < bytes) { - /** Saturated */ - pos = 0; - } else { - pos -= bytes; - } - - uint64_t align = size; - align--; - align |= align >> 1; - align |= align >> 2; - align |= align >> 4; - align |= align >> 8; - align |= align >> 16; - align |= align >> 32; - align++; - pos &= ~(align - 1); - if (pos < HEAP_START_ADDRESS + sizeof(uint8_t*)) { - return NULL; - } - *pos_ptr = pos; - return (void*)pos; -} - -/** - * Deallocates the memory previously allocated by sol_calloc - */ -static void sol_free(void *ptr) { - // I'm a bump allocator, I don't free -} - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/sol/types.h b/sdk/bpf/c/inc/sol/types.h deleted file mode 100644 index ea565f1f940c70..00000000000000 --- a/sdk/bpf/c/inc/sol/types.h +++ /dev/null @@ -1,141 +0,0 @@ -#pragma once -/** - * @brief Solana types for BPF programs - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Pick up static_assert if C11 or greater - * - * Inlined here until is available - */ -#if (defined _ISOC11_SOURCE || (defined __STDC_VERSION__ && __STDC_VERSION__ >= 201112L)) && !defined (__cplusplus) -#undef static_assert -#define static_assert _Static_assert -#endif - -/** - * Numeric types - */ -#ifndef __LP64__ -#error LP64 data model required -#endif - -typedef signed char int8_t; -typedef unsigned char uint8_t; -typedef signed short int16_t; -typedef unsigned short uint16_t; -typedef signed int int32_t; -typedef unsigned int uint32_t; -typedef signed long int int64_t; -typedef unsigned long int uint64_t; -typedef int64_t ssize_t; -typedef uint64_t size_t; - -#if defined (__cplusplus) || defined(static_assert) -static_assert(sizeof(int8_t) == 1); -static_assert(sizeof(uint8_t) == 1); -static_assert(sizeof(int16_t) == 2); -static_assert(sizeof(uint16_t) == 2); -static_assert(sizeof(int32_t) == 4); -static_assert(sizeof(uint32_t) == 4); -static_assert(sizeof(int64_t) == 8); -static_assert(sizeof(uint64_t) == 8); -#endif - -/** - * Minimum of signed integral types - */ -#define INT8_MIN (-128) -#define INT16_MIN (-32767-1) -#define INT32_MIN (-2147483647-1) -#define INT64_MIN (-9223372036854775807L-1) - -/** - * Maximum of signed integral types - */ -#define INT8_MAX (127) -#define INT16_MAX (32767) -#define INT32_MAX (2147483647) -#define INT64_MAX (9223372036854775807L) - -/** - * Maximum of unsigned integral types - */ -#define UINT8_MAX (255) -#define UINT16_MAX (65535) -#define UINT32_MAX (4294967295U) -#define UINT64_MAX (18446744073709551615UL) - -/** - * NULL - */ -#define NULL 0 - -/** Indicates the instruction was processed successfully */ -#define SUCCESS 0 - -/** - * Builtin program status values occupy the upper 32 bits of the program return - * value. Programs may define their own error values but they must be confined - * to the lower 32 bits. - */ -#define TO_BUILTIN(error) ((uint64_t)(error) << 32) - -/** Note: Not applicable to program written in C */ -#define ERROR_CUSTOM_ZERO TO_BUILTIN(1) -/** The arguments provided to a program instruction where invalid */ -#define ERROR_INVALID_ARGUMENT TO_BUILTIN(2) -/** An instruction's data contents was invalid */ -#define ERROR_INVALID_INSTRUCTION_DATA TO_BUILTIN(3) -/** An account's data contents was invalid */ -#define ERROR_INVALID_ACCOUNT_DATA TO_BUILTIN(4) -/** An account's data was too small */ -#define ERROR_ACCOUNT_DATA_TOO_SMALL TO_BUILTIN(5) -/** An account's balance was too small to complete the instruction */ -#define ERROR_INSUFFICIENT_FUNDS TO_BUILTIN(6) -/** The account did not have the expected program id */ -#define ERROR_INCORRECT_PROGRAM_ID TO_BUILTIN(7) -/** A signature was required but not found */ -#define ERROR_MISSING_REQUIRED_SIGNATURES TO_BUILTIN(8) -/** An initialize instruction was sent to an account that has already been initialized */ -#define ERROR_ACCOUNT_ALREADY_INITIALIZED TO_BUILTIN(9) -/** An attempt to operate on an account that hasn't been initialized */ -#define ERROR_UNINITIALIZED_ACCOUNT TO_BUILTIN(10) -/** The instruction expected additional account keys */ -#define ERROR_NOT_ENOUGH_ACCOUNT_KEYS TO_BUILTIN(11) -/** Note: Not applicable to program written in C */ -#define ERROR_ACCOUNT_BORROW_FAILED TO_BUILTIN(12) -/** The length of the seed is too long for address generation */ -#define MAX_SEED_LENGTH_EXCEEDED TO_BUILTIN(13) -/** Provided seeds do not result in a valid address */ -#define INVALID_SEEDS TO_BUILTIN(14) - -/** - * Boolean type - */ -#ifndef __cplusplus -#include -#endif - -/** - * Computes the number of elements in an array - */ -#define SOL_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) - -/** - * Byte array pointer and string - */ -typedef struct { - const uint8_t *addr; /** bytes */ - uint64_t len; /** number of bytes*/ -} SolBytes; - -#ifdef __cplusplus -} -#endif - -/**@}*/ diff --git a/sdk/bpf/c/inc/solana_sdk.h b/sdk/bpf/c/inc/solana_sdk.h deleted file mode 100644 index 98476758fd432b..00000000000000 --- a/sdk/bpf/c/inc/solana_sdk.h +++ /dev/null @@ -1,23 +0,0 @@ -#pragma once -/** - * @brief Solana C-based BPF program types and utility functions - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/**@}*/ diff --git a/sdk/bpf/c/inc/stdio.h b/sdk/bpf/c/inc/stdio.h deleted file mode 100644 index cd3fda27071dd0..00000000000000 --- a/sdk/bpf/c/inc/stdio.h +++ /dev/null @@ -1,4 +0,0 @@ -#pragma once -typedef void *FILE; - -int printf(const char * restrictformat, ... ); diff --git a/sdk/bpf/c/inc/stdlib.h b/sdk/bpf/c/inc/stdlib.h deleted file mode 100644 index 5d2570c5fed5d7..00000000000000 --- a/sdk/bpf/c/inc/stdlib.h +++ /dev/null @@ -1,2 +0,0 @@ -#pragma once -#include diff --git a/sdk/bpf/c/inc/string.h b/sdk/bpf/c/inc/string.h deleted file mode 100644 index 17cc5489c98fa4..00000000000000 --- a/sdk/bpf/c/inc/string.h +++ /dev/null @@ -1,7 +0,0 @@ -#pragma once -#include - -#define memcpy sol_memcpy -#define memset sol_memset -#define strlen sol_strlen - diff --git a/sdk/bpf/c/inc/sys/param.h b/sdk/bpf/c/inc/sys/param.h deleted file mode 100644 index 6f70f09beec221..00000000000000 --- a/sdk/bpf/c/inc/sys/param.h +++ /dev/null @@ -1 +0,0 @@ -#pragma once diff --git a/sdk/bpf/c/inc/wchar.h b/sdk/bpf/c/inc/wchar.h deleted file mode 100644 index 6f70f09beec221..00000000000000 --- a/sdk/bpf/c/inc/wchar.h +++ /dev/null @@ -1 +0,0 @@ -#pragma once diff --git a/sdk/bpf/env.sh b/sdk/bpf/env.sh deleted file mode 100644 index 3bdf0555f877a6..00000000000000 --- a/sdk/bpf/env.sh +++ /dev/null @@ -1,16 +0,0 @@ -# -# Configures the BPF SDK environment -# - -if [ -z "$bpf_sdk" ]; then - bpf_sdk=. -fi - -# Ensure the sdk is installed -"$bpf_sdk"/scripts/install.sh - -# Use the SDK's version of llvm to build the compiler-builtins for BPF -export CC="$bpf_sdk/dependencies/bpf-tools/llvm/bin/clang" -export AR="$bpf_sdk/dependencies/bpf-tools/llvm/bin/llvm-ar" -export OBJDUMP="$bpf_sdk/dependencies/bpf-tools/llvm/bin/llvm-objdump" -export OBJCOPY="$bpf_sdk/dependencies/bpf-tools/llvm/bin/llvm-objcopy" diff --git a/sdk/bpf/scripts/dump.sh b/sdk/bpf/scripts/dump.sh deleted file mode 100755 index 98686c4f14847f..00000000000000 --- a/sdk/bpf/scripts/dump.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bash - -bpf_sdk=$(cd "$(dirname "$0")/.." && pwd) -# shellcheck source=sdk/bpf/env.sh -source "$bpf_sdk"/env.sh - -so=$1 -dump=$2 -if [[ -z $so ]] || [[ -z $dump ]]; then - echo "Usage: $0 bpf-program.so dump.txt" >&2 - exit 1 -fi - -if [[ ! -r $so ]]; then - echo "Error: File not found or readable: $so" >&2 - exit 1 -fi - -if ! command -v rustfilt > /dev/null; then - echo "Error: rustfilt not found. It can be installed by running: cargo install rustfilt" >&2 - exit 1 -fi - -set -e -out_dir=$(dirname "$dump") -if [[ ! -d $out_dir ]]; then - mkdir -p "$out_dir" -fi -dump_mangled=$dump.mangled - -( - set -ex - ls -la "$so" > "$dump_mangled" - "$bpf_sdk"/dependencies/bpf-tools/llvm/bin/llvm-readelf -aW "$so" >>"$dump_mangled" - "$OBJDUMP" --print-imm-hex --source --disassemble "$so" >> "$dump_mangled" - sed s/://g < "$dump_mangled" | rustfilt > "$dump" -) -rm -f "$dump_mangled" - -if [[ ! -f "$dump" ]]; then - echo "Error: Failed to create $dump" >&2 - exit 1 -fi - -echo >&2 -echo "Wrote $dump" >&2 diff --git a/sdk/bpf/scripts/install.sh b/sdk/bpf/scripts/install.sh deleted file mode 100755 index b7b59362c07a38..00000000000000 --- a/sdk/bpf/scripts/install.sh +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env bash - -mkdir -p "$(dirname "$0")"/../dependencies -cd "$(dirname "$0")"/../dependencies - -unameOut="$(uname -s)" -case "${unameOut}" in - Linux*) - criterion_suffix= - machine=linux;; - Darwin*) - criterion_suffix= - machine=osx;; - MINGW*) - criterion_suffix=-mingw - machine=windows;; - *) - criterion_suffix= - machine=linux -esac -unameOut="$(uname -m)" -case "${unameOut}" in - arm64*) - arch=aarch64;; - *) - arch=x86_64 -esac - -download() { - declare url="$1/$2/$3" - declare filename=$3 - declare wget_args=( - "$url" -O "$filename" - "--progress=dot:giga" - "--retry-connrefused" - "--read-timeout=30" - ) - declare curl_args=( - -L "$url" -o "$filename" - ) - if hash wget 2>/dev/null; then - wget_or_curl="wget ${wget_args[*]}" - elif hash curl 2>/dev/null; then - wget_or_curl="curl ${curl_args[*]}" - else - echo "Error: Neither curl nor wget were found" >&2 - return 1 - fi - - set -x - if $wget_or_curl; then - tar --strip-components 1 -jxf "$filename" || return 1 - { set +x; } 2>/dev/null - rm -rf "$filename" - return 0 - fi - return 1 -} - -get() { - declare version=$1 - declare dirname=$2 - declare job=$3 - declare cache_root=~/.cache/solana - declare cache_dirname="$cache_root/$version/$dirname" - declare cache_partial_dirname="$cache_dirname"_partial - - if [[ -r $cache_dirname ]]; then - ln -sf "$cache_dirname" "$dirname" || return 1 - return 0 - fi - - rm -rf "$cache_partial_dirname" || return 1 - mkdir -p "$cache_partial_dirname" || return 1 - pushd "$cache_partial_dirname" - - if $job; then - popd - mv "$cache_partial_dirname" "$cache_dirname" || return 1 - ln -sf "$cache_dirname" "$dirname" || return 1 - return 0 - fi - popd - return 1 -} - -# Install Criterion -if [[ $machine == "linux" ]]; then - version=v2.3.3 -else - version=v2.3.2 -fi -if [[ ! -e criterion-$version.md || ! -e criterion ]]; then - ( - set -e - rm -rf criterion* - job="download \ - https://github.com/Snaipe/Criterion/releases/download \ - $version \ - criterion-$version-$machine$criterion_suffix-x86_64.tar.bz2 \ - criterion" - get $version criterion "$job" - ) - exitcode=$? - if [[ $exitcode -ne 0 ]]; then - exit 1 - fi - touch criterion-$version.md -fi - -# Install Rust-BPF -version=v1.41 -if [[ ! -e bpf-tools-$version.md || ! -e bpf-tools ]]; then - ( - set -e - rm -rf bpf-tools* - rm -rf xargo - job="download \ - https://github.com/anza-xyz/platform-tools/releases/download \ - $version \ - platform-tools-${machine}-${arch}.tar.bz2 \ - bpf-tools" - get $version bpf-tools "$job" - ) - exitcode=$? - if [[ $exitcode -ne 0 ]]; then - exit 1 - fi - touch bpf-tools-$version.md - set -ex - ./bpf-tools/rust/bin/rustc --version - ./bpf-tools/rust/bin/rustc --print sysroot - set +e - rustup toolchain uninstall bpf - set -e - rustup toolchain link bpf bpf-tools/rust -fi - -exit 0 diff --git a/sdk/bpf/scripts/objcopy.sh b/sdk/bpf/scripts/objcopy.sh deleted file mode 100755 index c818991ccffd3b..00000000000000 --- a/sdk/bpf/scripts/objcopy.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env bash - -bpf_sdk=$(cd "$(dirname "$0")/.." && pwd) -# shellcheck source=sdk/bpf/env.sh -source "$bpf_sdk"/env.sh -exec "$bpf_sdk"/dependencies/bpf-tools/llvm/bin/llvm-objcopy "$@" diff --git a/sdk/bpf/scripts/package.sh b/sdk/bpf/scripts/package.sh deleted file mode 100755 index 040ac1cb1efef6..00000000000000 --- a/sdk/bpf/scripts/package.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -set -ex - -cd "$(dirname "$0")"/../../.. - -echo --- Creating bpf-sdk tarball - -rm -rf bpf-sdk.tar.bz2 bpf-sdk/ -mkdir bpf-sdk/ -cp LICENSE bpf-sdk/ - -( - ci/crate-version.sh sdk/Cargo.toml - git rev-parse HEAD -) > bpf-sdk/version.txt - -cp -a sdk/bpf/* bpf-sdk/ - -tar jvcf bpf-sdk.tar.bz2 bpf-sdk/ diff --git a/sdk/bpf/scripts/strip.sh b/sdk/bpf/scripts/strip.sh deleted file mode 100755 index eafa4fd822057d..00000000000000 --- a/sdk/bpf/scripts/strip.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -so=$1 -if [[ ! -r $so ]]; then - echo "Error: file not found: $so" - exit 1 -fi -so_stripped=$2 -if [[ -z $so_stripped ]]; then - echo "Usage: $0 unstripped.so stripped.so" - exit 1 -fi - -bpf_sdk=$(cd "$(dirname "$0")/.." && pwd) -# shellcheck source=sdk/bpf/env.sh -source "$bpf_sdk"/env.sh - -set -e -out_dir=$(dirname "$so_stripped") -if [[ ! -d $out_dir ]]; then - mkdir -p "$out_dir" -fi -"$bpf_sdk"/dependencies/bpf-tools/llvm/bin/llvm-objcopy --strip-all "$so" "$so_stripped" diff --git a/sdk/cargo-build-bpf/.gitignore b/sdk/cargo-build-bpf/.gitignore deleted file mode 100644 index 03314f77b5aa45..00000000000000 --- a/sdk/cargo-build-bpf/.gitignore +++ /dev/null @@ -1 +0,0 @@ -Cargo.lock diff --git a/sdk/cargo-build-bpf/Cargo.toml b/sdk/cargo-build-bpf/Cargo.toml deleted file mode 100644 index a609ee13c9c3d2..00000000000000 --- a/sdk/cargo-build-bpf/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "solana-cargo-build-bpf" -description = "Compile a local package and all of its dependencies using the Solana SBF SDK" -publish = false -version = { workspace = true } -authors = { workspace = true } -repository = { workspace = true } -homepage = { workspace = true } -license = { workspace = true } -edition = { workspace = true } - -[dependencies] -log = { workspace = true, features = ["std"] } -solana-logger = { workspace = true } - -[features] -program = [] - -[[bin]] -name = "cargo-build-bpf" -path = "src/main.rs" diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs deleted file mode 100644 index 3635901eeceb4d..00000000000000 --- a/sdk/cargo-build-bpf/src/main.rs +++ /dev/null @@ -1,58 +0,0 @@ -use { - log::*, - std::{ - env, - path::PathBuf, - process::{exit, Command, Stdio}, - }, -}; - -fn main() { - solana_logger::setup(); - warn!("cargo-build-bpf is deprecated. Please, use cargo-build-sbf"); - let mut args = env::args() - .map(|x| { - let s = x; - s.replace("--bpf", "--sbf") - }) - .collect::>(); - let program = if let Some(arg0) = args.first() { - let arg0 = arg0.replace("build-bpf", "build-sbf"); - args.remove(0); - PathBuf::from(arg0) - } else { - PathBuf::from("cargo-build-sbf") - }; - // When run as a cargo subcommand, the first program argument is the subcommand name. - // Remove it - if let Some(arg0) = args.first() { - if arg0 == "build-bpf" { - args.remove(0); - } - } - info!("cargo-build-bpf child: {}", program.display()); - for a in &args { - info!(" {}", a); - } - let child = Command::new(&program) - .args(&args) - .stdout(Stdio::piped()) - .spawn() - .unwrap_or_else(|err| { - error!("Failed to execute {}: {}", program.display(), err); - exit(1); - }); - - let output = child.wait_with_output().expect("failed to wait on child"); - info!( - "{}", - output - .stdout - .as_slice() - .iter() - .map(|&c| c as char) - .collect::() - ); - let code = output.status.code().unwrap_or(1); - exit(code); -} diff --git a/sdk/cargo-build-sbf/src/main.rs b/sdk/cargo-build-sbf/src/main.rs index 0da59ff230b385..6d02499c8fbc6d 100644 --- a/sdk/cargo-build-sbf/src/main.rs +++ b/sdk/cargo-build-sbf/src/main.rs @@ -156,7 +156,7 @@ fn find_installed_platform_tools() -> Vec { } fn get_latest_platform_tools_version() -> Result { - let url = "https://github.com/solana-labs/platform-tools/releases/latest"; + let url = "https://github.com/anza-xyz/platform-tools/releases/latest"; let resp = reqwest::blocking::get(url).map_err(|err| format!("Failed to GET {url}: {err}"))?; let path = std::path::Path::new(resp.url().path()); let version = path.file_name().unwrap().to_string_lossy().to_string(); @@ -618,7 +618,7 @@ fn build_solana_package( install_if_missing( config, package, - "https://github.com/solana-labs/platform-tools/releases/download", + "https://github.com/anza-xyz/platform-tools/releases/download", platform_tools_download_file_name.as_str(), &target_path, ) @@ -913,7 +913,7 @@ fn main() { // The following line is scanned by CI configuration script to // separate cargo caches according to the version of platform-tools. - let platform_tools_version = String::from("v1.41"); + let platform_tools_version = String::from("v1.42"); let rust_base_version = get_base_rust_version(platform_tools_version.as_str()); let version = format!( "{}\nplatform-tools {}\n{}", diff --git a/sdk/cargo-test-bpf/src/main.rs b/sdk/cargo-test-bpf/src/main.rs deleted file mode 100644 index 21b8735787de19..00000000000000 --- a/sdk/cargo-test-bpf/src/main.rs +++ /dev/null @@ -1,61 +0,0 @@ -use std::{ - env, - path::PathBuf, - process::{exit, Command, Stdio}, -}; - -fn main() { - println!("Warning: cargo-test-bpf is deprecated. Please, use cargo-test-sbf"); - let mut args = env::args() - .map(|x| { - let s = x; - s.replace("--bpf", "--sbf") - }) - .collect::>(); - if let Ok(cargo_build_bpf) = env::var("CARGO_BUILD_BPF") { - let cargo_build_sbf = cargo_build_bpf.replace("build-bpf", "build-sbf"); - env::set_var("CARGO_BUILD_SBF", cargo_build_sbf); - } - let program = if let Some(arg0) = args.first() { - let cargo_test_sbf = arg0.replace("test-bpf", "test-sbf"); - let cargo_build_sbf = cargo_test_sbf.replace("test-sbf", "build-sbf"); - env::set_var("CARGO_BUILD_SBF", cargo_build_sbf); - args.remove(0); - PathBuf::from(cargo_test_sbf) - } else { - PathBuf::from("cargo-test-sbf") - }; - // When run as a cargo subcommand, the first program argument is the subcommand name. - // Remove it - if let Some(arg0) = args.first() { - if arg0 == "test-bpf" { - args.remove(0); - } - } - print!("cargo-test-bpf child: {}", program.display()); - for a in &args { - print!(" {a}"); - } - println!(); - let child = Command::new(&program) - .args(&args) - .stdout(Stdio::piped()) - .spawn() - .unwrap_or_else(|err| { - eprintln!("Failed to execute {}: {}", program.display(), err); - exit(1); - }); - - let output = child.wait_with_output().expect("failed to wait on child"); - println!( - "{}", - output - .stdout - .as_slice() - .iter() - .map(|&c| c as char) - .collect::() - ); - let code = output.status.code().unwrap_or(1); - exit(code); -} diff --git a/sdk/decode-error/Cargo.toml b/sdk/decode-error/Cargo.toml new file mode 100644 index 00000000000000..13fb6370791604 --- /dev/null +++ b/sdk/decode-error/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "solana-decode-error" +description = "Solana DecodeError Trait" +documentation = "https://docs.rs/solana-decode-error" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +num-traits = { workspace = true } + +[dev-dependencies] +num-derive = { workspace = true } diff --git a/sdk/program/src/decode_error.rs b/sdk/decode-error/src/lib.rs similarity index 100% rename from sdk/program/src/decode_error.rs rename to sdk/decode-error/src/lib.rs diff --git a/sdk/docker-solana/Dockerfile b/sdk/docker-solana/Dockerfile index 81f2ed9d0d22d1..03baa60fe5d060 100644 --- a/sdk/docker-solana/Dockerfile +++ b/sdk/docker-solana/Dockerfile @@ -33,7 +33,7 @@ EXPOSE 8008/udp # tpu_vote EXPOSE 8009/udp -RUN apt update && \ +RUN apt-get update && \ apt-get install -y bzip2 libssl-dev ca-certificates && \ rm -rf /var/lib/apt/lists/* diff --git a/sdk/msg/Cargo.toml b/sdk/msg/Cargo.toml new file mode 100644 index 00000000000000..afa4a94d07c790 --- /dev/null +++ b/sdk/msg/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "solana-msg" +description = "Solana msg macro." +documentation = "https://docs.rs/solana-msg" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[target.'cfg(target_os = "solana")'.dependencies] +solana-define-syscall = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/sdk/msg/src/lib.rs b/sdk/msg/src/lib.rs new file mode 100644 index 00000000000000..f686d03aeed4b9 --- /dev/null +++ b/sdk/msg/src/lib.rs @@ -0,0 +1,51 @@ +#[cfg(target_os = "solana")] +use solana_define_syscall::define_syscall; +/// Print a message to the log. +/// +/// Supports simple strings as well as Rust [format strings][fs]. When passed a +/// single expression it will be passed directly to [`sol_log`]. The expression +/// must have type `&str`, and is typically used for logging static strings. +/// When passed something other than an expression, particularly +/// a sequence of expressions, the tokens will be passed through the +/// [`format!`] macro before being logged with `sol_log`. +/// +/// [fs]: https://doc.rust-lang.org/std/fmt/ +/// [`format!`]: https://doc.rust-lang.org/std/fmt/fn.format.html +/// +/// Note that Rust's formatting machinery is relatively CPU-intensive +/// for constrained environments like the Solana VM. +/// +/// # Examples +/// +/// ``` +/// use solana_msg::msg; +/// +/// // The fast form +/// msg!("verifying multisig"); +/// +/// // With formatting +/// let err = "not enough signers"; +/// msg!("multisig failed: {}", err); +/// ``` +#[macro_export] +macro_rules! msg { + ($msg:expr) => { + $crate::sol_log($msg) + }; + ($($arg:tt)*) => ($crate::sol_log(&format!($($arg)*))); +} + +#[cfg(target_os = "solana")] +define_syscall!(fn sol_log_(message: *const u8, len: u64)); + +/// Print a string to the log. +#[inline] +pub fn sol_log(message: &str) { + #[cfg(target_os = "solana")] + unsafe { + sol_log_(message.as_ptr(), message.len() as u64); + } + + #[cfg(not(target_os = "solana"))] + println!("{message}"); +} diff --git a/sdk/program-memory/Cargo.toml b/sdk/program-memory/Cargo.toml new file mode 100644 index 00000000000000..f47f6adfd792ab --- /dev/null +++ b/sdk/program-memory/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "solana-program-memory" +description = "Basic low-level memory operations for Solana." +documentation = "https://docs.rs/solana-program-memory" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +num-traits = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[target.'cfg(target_os = "solana")'.dependencies] +solana-define-syscall = { workspace = true } diff --git a/sdk/program/src/program_memory.rs b/sdk/program-memory/src/lib.rs similarity index 54% rename from sdk/program/src/program_memory.rs rename to sdk/program-memory/src/lib.rs index f0b023fa82ad53..d8bf1d47f7b2d7 100644 --- a/sdk/program/src/program_memory.rs +++ b/sdk/program-memory/src/lib.rs @@ -3,6 +3,72 @@ //! Within the SBF environment, these are implemented as syscalls and executed by //! the runtime in native code. +#[cfg(target_os = "solana")] +pub mod syscalls { + use solana_define_syscall::define_syscall; + define_syscall!(fn sol_memcpy_(dst: *mut u8, src: *const u8, n: u64)); + define_syscall!(fn sol_memmove_(dst: *mut u8, src: *const u8, n: u64)); + define_syscall!(fn sol_memcmp_(s1: *const u8, s2: *const u8, n: u64, result: *mut i32)); + define_syscall!(fn sol_memset_(s: *mut u8, c: u8, n: u64)); +} + +/// Check that two regions do not overlap. +/// +/// Hidden to share with bpf_loader without being part of the API surface. +#[doc(hidden)] +pub fn is_nonoverlapping(src: N, src_len: N, dst: N, dst_len: N) -> bool +where + N: Ord + num_traits::SaturatingSub, +{ + // If the absolute distance between the ptrs is at least as big as the size of the other, + // they do not overlap. + if src > dst { + src.saturating_sub(&dst) >= dst_len + } else { + dst.saturating_sub(&src) >= src_len + } +} + +#[cfg(not(target_os = "solana"))] +#[allow(clippy::arithmetic_side_effects)] +pub mod stubs { + use super::is_nonoverlapping; + /// # Safety + pub unsafe fn sol_memcpy(dst: *mut u8, src: *const u8, n: usize) { + // cannot be overlapping + assert!( + is_nonoverlapping(src as usize, n, dst as usize, n), + "memcpy does not support overlapping regions" + ); + std::ptr::copy_nonoverlapping(src, dst, n); + } + /// # Safety + pub unsafe fn sol_memmove(dst: *mut u8, src: *const u8, n: usize) { + std::ptr::copy(src, dst, n); + } + /// # Safety + pub unsafe fn sol_memcmp(s1: *const u8, s2: *const u8, n: usize, result: *mut i32) { + let mut i = 0; + while i < n { + let a = *s1.add(i); + let b = *s2.add(i); + if a != b { + *result = a as i32 - b as i32; + return; + } + i += 1; + } + *result = 0 + } + /// # Safety + pub unsafe fn sol_memset(s: *mut u8, c: u8, n: usize) { + let s = std::slice::from_raw_parts_mut(s, n); + for val in s.iter_mut().take(n) { + *val = c; + } + } +} + /// Like C `memcpy`. /// /// # Arguments @@ -35,11 +101,13 @@ pub fn sol_memcpy(dst: &mut [u8], src: &[u8], n: usize) { #[cfg(target_os = "solana")] unsafe { - crate::syscalls::sol_memcpy_(dst.as_mut_ptr(), src.as_ptr(), n as u64); + syscalls::sol_memcpy_(dst.as_mut_ptr(), src.as_ptr(), n as u64); } #[cfg(not(target_os = "solana"))] - crate::program_stubs::sol_memcpy(dst.as_mut_ptr(), src.as_ptr(), n); + unsafe { + stubs::sol_memcpy(dst.as_mut_ptr(), src.as_ptr(), n); + } } /// Like C `memmove`. @@ -64,10 +132,10 @@ pub fn sol_memcpy(dst: &mut [u8], src: &[u8], n: usize) { #[inline] pub unsafe fn sol_memmove(dst: *mut u8, src: *mut u8, n: usize) { #[cfg(target_os = "solana")] - crate::syscalls::sol_memmove_(dst, src, n as u64); + syscalls::sol_memmove_(dst, src, n as u64); #[cfg(not(target_os = "solana"))] - crate::program_stubs::sol_memmove(dst, src, n); + stubs::sol_memmove(dst, src, n); } /// Like C `memcmp`. @@ -100,11 +168,13 @@ pub fn sol_memcmp(s1: &[u8], s2: &[u8], n: usize) -> i32 { #[cfg(target_os = "solana")] unsafe { - crate::syscalls::sol_memcmp_(s1.as_ptr(), s2.as_ptr(), n as u64, &mut result as *mut i32); + syscalls::sol_memcmp_(s1.as_ptr(), s2.as_ptr(), n as u64, &mut result as *mut i32); } #[cfg(not(target_os = "solana"))] - crate::program_stubs::sol_memcmp(s1.as_ptr(), s2.as_ptr(), n, &mut result as *mut i32); + unsafe { + stubs::sol_memcmp(s1.as_ptr(), s2.as_ptr(), n, &mut result as *mut i32); + } result } @@ -137,9 +207,31 @@ pub fn sol_memcmp(s1: &[u8], s2: &[u8], n: usize) -> i32 { pub fn sol_memset(s: &mut [u8], c: u8, n: usize) { #[cfg(target_os = "solana")] unsafe { - crate::syscalls::sol_memset_(s.as_mut_ptr(), c, n as u64); + syscalls::sol_memset_(s.as_mut_ptr(), c, n as u64); } #[cfg(not(target_os = "solana"))] - crate::program_stubs::sol_memset(s.as_mut_ptr(), c, n); + unsafe { + stubs::sol_memset(s.as_mut_ptr(), c, n); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_is_nonoverlapping() { + for dst in 0..8 { + assert!(is_nonoverlapping(10, 3, dst, 3)); + } + for dst in 8..13 { + assert!(!is_nonoverlapping(10, 3, dst, 3)); + } + for dst in 13..20 { + assert!(is_nonoverlapping(10, 3, dst, 3)); + } + assert!(is_nonoverlapping::(255, 3, 254, 1)); + assert!(!is_nonoverlapping::(255, 2, 254, 3)); + } } diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index a798bcada04608..e6c76f6d2a42a5 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -9,7 +9,7 @@ repository = { workspace = true } homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -rust-version = "1.75.0" # solana platform-tools rust version +rust-version = "1.75.0" # solana platform-tools rust version [dependencies] bincode = { workspace = true } @@ -25,15 +25,22 @@ log = { workspace = true } memoffset = { workspace = true } num-derive = { workspace = true } num-traits = { workspace = true, features = ["i128"] } +qualifier_attr = { workspace = true, optional = true } serde = { workspace = true } serde_bytes = { workspace = true } serde_derive = { workspace = true } sha2 = { workspace = true } sha3 = { workspace = true } +solana-atomic-u64 = { workspace = true } +solana-decode-error = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } +solana-msg = { workspace = true } +solana-program-memory = { workspace = true } solana-sanitize = { workspace = true } solana-sdk-macro = { workspace = true } +solana-secp256k1-recover = { workspace = true } +solana-short-vec = { workspace = true } thiserror = { workspace = true } # This is currently needed to build on-chain programs reliably. @@ -47,14 +54,9 @@ getrandom = { workspace = true, features = ["custom"] } solana-define-syscall = { workspace = true } [target.'cfg(not(target_os = "solana"))'.dependencies] -ark-bn254 = { workspace = true } -ark-ec = { workspace = true } -ark-ff = { workspace = true } -ark-serialize = { workspace = true } base64 = { workspace = true, features = ["alloc", "std"] } bitflags = { workspace = true } curve25519-dalek = { workspace = true } -libsecp256k1 = { workspace = true } num-bigint = { workspace = true } rand = { workspace = true } @@ -94,4 +96,5 @@ crate-type = ["cdylib", "rlib"] [features] default = ["borsh"] borsh = ["dep:borsh", "dep:borsh0-10"] -frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] +dev-context-only-utils = ["dep:qualifier_attr"] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", "solana-short-vec/frozen-abi"] diff --git a/sdk/program/src/account_info.rs b/sdk/program/src/account_info.rs index 8be88349b0fd22..485195381a9f64 100644 --- a/sdk/program/src/account_info.rs +++ b/sdk/program/src/account_info.rs @@ -3,8 +3,9 @@ use { crate::{ clock::Epoch, debug_account_data::*, entrypoint::MAX_PERMITTED_DATA_INCREASE, - program_error::ProgramError, program_memory::sol_memset, pubkey::Pubkey, + program_error::ProgramError, pubkey::Pubkey, }, + solana_program_memory::sol_memset, std::{ cell::{Ref, RefCell, RefMut}, fmt, diff --git a/sdk/program/src/blake3.rs b/sdk/program/src/blake3.rs index 90e19b19e74679..7bbb90768da050 100644 --- a/sdk/program/src/blake3.rs +++ b/sdk/program/src/blake3.rs @@ -102,7 +102,7 @@ impl Hash { /// unique Hash for tests and benchmarks. pub fn new_unique() -> Self { - use crate::atomic_u64::AtomicU64; + use solana_atomic_u64::AtomicU64; static I: AtomicU64 = AtomicU64::new(1); let mut b = [0u8; HASH_BYTES]; diff --git a/sdk/program/src/borsh0_9.rs b/sdk/program/src/borsh0_9.rs deleted file mode 100644 index d7d1e97013f898..00000000000000 --- a/sdk/program/src/borsh0_9.rs +++ /dev/null @@ -1,44 +0,0 @@ -#![allow(clippy::arithmetic_side_effects)] -//! Utilities for the [borsh] serialization format, version 0.9. -//! -//! This file is provided for backwards compatibility with types that still use -//! borsh 0.9, even though this crate canonically uses borsh 0.10. -//! -//! [borsh]: https://borsh.io/ -use { - crate::borsh::{ - impl_get_instance_packed_len, impl_get_packed_len_v0, impl_try_from_slice_unchecked, - }, - borsh0_9::maybestd::io, -}; - -impl_get_packed_len_v0!( - borsh0_9, - #[deprecated( - since = "1.17.0", - note = "Please upgrade to Borsh 1.X and use `borsh1::get_packed_len` instead" - )] -); -impl_try_from_slice_unchecked!( - borsh0_9, - io, - #[deprecated( - since = "1.17.0", - note = "Please upgrade to Borsh 1.X and use `borsh1::try_from_slice_unchecked` instead" - )] -); -impl_get_instance_packed_len!( - borsh0_9, - io, - #[deprecated( - since = "1.17.0", - note = "Please upgrade to Borsh 1.X and use `borsh1::get_instance_packed_len` instead" - )] -); - -#[cfg(test)] -#[allow(deprecated)] -mod tests { - use {crate::borsh::impl_tests, borsh0_9::maybestd::io}; - impl_tests!(borsh0_9, io); -} diff --git a/sdk/program/src/bpf_loader_upgradeable.rs b/sdk/program/src/bpf_loader_upgradeable.rs index d0f95ffe166db5..82e9292fde2429 100644 --- a/sdk/program/src/bpf_loader_upgradeable.rs +++ b/sdk/program/src/bpf_loader_upgradeable.rs @@ -82,42 +82,6 @@ impl UpgradeableLoaderState { pub const fn size_of_programdata(program_len: usize) -> usize { Self::size_of_programdata_metadata().saturating_add(program_len) } - - /// Length of a Buffer account's data. - #[deprecated(since = "1.11.0", note = "Please use `size_of_buffer` instead")] - pub fn buffer_len(program_len: usize) -> Result { - Ok(Self::size_of_buffer(program_len)) - } - - /// Offset into the Buffer account's data of the program bits. - #[deprecated( - since = "1.11.0", - note = "Please use `size_of_buffer_metadata` instead" - )] - pub fn buffer_data_offset() -> Result { - Ok(Self::size_of_buffer_metadata()) - } - - /// Length of a Program account's data. - #[deprecated(since = "1.11.0", note = "Please use `size_of_program` instead")] - pub fn program_len() -> Result { - Ok(Self::size_of_program()) - } - - /// Length of a ProgramData account's data. - #[deprecated(since = "1.11.0", note = "Please use `size_of_programdata` instead")] - pub fn programdata_len(program_len: usize) -> Result { - Ok(Self::size_of_programdata(program_len)) - } - - /// Offset into the ProgramData account's data of the program bits. - #[deprecated( - since = "1.11.0", - note = "Please use `size_of_programdata_metadata` instead" - )] - pub fn programdata_data_offset() -> Result { - Ok(Self::size_of_programdata_metadata()) - } } /// Returns the program data address for a program ID @@ -425,24 +389,6 @@ mod tests { assert_eq!(UpgradeableLoaderState::size_of_program() as u64, size); } - #[test] - #[allow(deprecated)] - fn test_account_lengths() { - assert_eq!( - 4, - serialized_size(&UpgradeableLoaderState::Uninitialized).unwrap() - ); - assert_eq!(36, UpgradeableLoaderState::program_len().unwrap()); - assert_eq!( - 45, - UpgradeableLoaderState::programdata_data_offset().unwrap() - ); - assert_eq!( - 45 + 42, - UpgradeableLoaderState::programdata_len(42).unwrap() - ); - } - fn assert_is_instruction( is_instruction_fn: F, expected_instruction: UpgradeableLoaderInstruction, diff --git a/sdk/program/src/clock.rs b/sdk/program/src/clock.rs index e19c4c84486ced..5cf609d3000c26 100644 --- a/sdk/program/src/clock.rs +++ b/sdk/program/src/clock.rs @@ -33,10 +33,6 @@ static_assertions::const_assert_eq!(MS_PER_TICK, 6); /// The number of milliseconds per tick (6). pub const MS_PER_TICK: u64 = 1000 / DEFAULT_TICKS_PER_SECOND; -#[deprecated(since = "1.15.0", note = "Please use DEFAULT_MS_PER_SLOT instead")] -/// The expected duration of a slot (400 milliseconds). -pub const SLOT_MS: u64 = DEFAULT_MS_PER_SLOT; - // At 160 ticks/s, 64 ticks per slot implies that leader rotation and voting will happen // every 400 ms. A fast voting cadence ensures faster finality and convergence pub const DEFAULT_TICKS_PER_SLOT: u64 = 64; diff --git a/sdk/program/src/example_mocks.rs b/sdk/program/src/example_mocks.rs index ebde291ced11bd..b528812e36f6b3 100644 --- a/sdk/program/src/example_mocks.rs +++ b/sdk/program/src/example_mocks.rs @@ -274,44 +274,5 @@ pub mod solana_sdk { } } - #[deprecated( - since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table` instead" - )] - pub use crate::address_lookup_table as address_lookup_table_account; -} - -#[deprecated( - since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table` instead" -)] -pub mod solana_address_lookup_table_program { - pub use crate::address_lookup_table::program::{check_id, id, ID}; - - pub mod state { - use { - crate::{instruction::InstructionError, pubkey::Pubkey}, - std::borrow::Cow, - }; - - pub struct AddressLookupTable<'a> { - pub addresses: Cow<'a, [Pubkey]>, - } - - impl<'a> AddressLookupTable<'a> { - pub fn serialize_for_tests(self) -> Result, InstructionError> { - let mut data = vec![]; - self.addresses.iter().for_each(|address| { - data.extend_from_slice(address.as_ref()); - }); - Ok(data) - } - - pub fn deserialize(data: &'a [u8]) -> Result, InstructionError> { - Ok(Self { - addresses: Cow::Borrowed(bytemuck::try_cast_slice(data).unwrap()), - }) - } - } - } + pub use crate::address_lookup_table; } diff --git a/sdk/program/src/fee_calculator.rs b/sdk/program/src/fee_calculator.rs index 361e00c98b6b47..5d753e4acaed3a 100644 --- a/sdk/program/src/fee_calculator.rs +++ b/sdk/program/src/fee_calculator.rs @@ -1,10 +1,7 @@ //! Calculation of transaction fees. #![allow(clippy::arithmetic_side_effects)] -use { - crate::{clock::DEFAULT_MS_PER_SLOT, ed25519_program, message::Message, secp256k1_program}, - log::*, -}; +use {crate::clock::DEFAULT_MS_PER_SLOT, log::*}; #[repr(C)] #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -24,29 +21,6 @@ impl FeeCalculator { lamports_per_signature, } } - - #[deprecated( - since = "1.9.0", - note = "Please do not use, will no longer be available in the future" - )] - pub fn calculate_fee(&self, message: &Message) -> u64 { - let mut num_signatures: u64 = 0; - for instruction in &message.instructions { - let program_index = instruction.program_id_index as usize; - // Message may not be sanitized here - if program_index < message.account_keys.len() { - let id = message.account_keys[program_index]; - if (secp256k1_program::check_id(&id) || ed25519_program::check_id(&id)) - && !instruction.data.is_empty() - { - num_signatures += instruction.data[0] as u64; - } - } - } - - self.lamports_per_signature - * (u64::from(message.header.num_required_signatures) + num_signatures) - } } #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -188,10 +162,7 @@ impl FeeRateGovernor { #[cfg(test)] mod tests { - use { - super::*, - crate::{pubkey::Pubkey, system_instruction}, - }; + use super::*; #[test] fn test_fee_rate_governor_burn() { @@ -205,64 +176,6 @@ mod tests { assert_eq!(fee_rate_governor.burn(2), (0, 2)); } - #[test] - #[allow(deprecated)] - fn test_fee_calculator_calculate_fee() { - // Default: no fee. - let message = Message::default(); - assert_eq!(FeeCalculator::default().calculate_fee(&message), 0); - - // No signature, no fee. - assert_eq!(FeeCalculator::new(1).calculate_fee(&message), 0); - - // One signature, a fee. - let pubkey0 = Pubkey::from([0; 32]); - let pubkey1 = Pubkey::from([1; 32]); - let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); - let message = Message::new(&[ix0], Some(&pubkey0)); - assert_eq!(FeeCalculator::new(2).calculate_fee(&message), 2); - - // Two signatures, double the fee. - let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); - let ix1 = system_instruction::transfer(&pubkey1, &pubkey0, 1); - let message = Message::new(&[ix0, ix1], Some(&pubkey0)); - assert_eq!(FeeCalculator::new(2).calculate_fee(&message), 4); - } - - #[test] - #[allow(deprecated)] - fn test_fee_calculator_calculate_fee_secp256k1() { - use crate::instruction::Instruction; - let pubkey0 = Pubkey::from([0; 32]); - let pubkey1 = Pubkey::from([1; 32]); - let ix0 = system_instruction::transfer(&pubkey0, &pubkey1, 1); - let mut secp_instruction = Instruction { - program_id: crate::secp256k1_program::id(), - accounts: vec![], - data: vec![], - }; - let mut secp_instruction2 = Instruction { - program_id: crate::secp256k1_program::id(), - accounts: vec![], - data: vec![1], - }; - - let message = Message::new( - &[ - ix0.clone(), - secp_instruction.clone(), - secp_instruction2.clone(), - ], - Some(&pubkey0), - ); - assert_eq!(FeeCalculator::new(1).calculate_fee(&message), 2); - - secp_instruction.data = vec![0]; - secp_instruction2.data = vec![10]; - let message = Message::new(&[ix0, secp_instruction, secp_instruction2], Some(&pubkey0)); - assert_eq!(FeeCalculator::new(1).calculate_fee(&message), 11); - } - #[test] fn test_fee_rate_governor_derived_default() { solana_logger::setup(); diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index db2d9ff840dab1..27967c850376bb 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -136,7 +136,7 @@ impl Hash { /// unique Hash for tests and benchmarks. pub fn new_unique() -> Self { - use crate::atomic_u64::AtomicU64; + use solana_atomic_u64::AtomicU64; static I: AtomicU64 = AtomicU64::new(1); let mut b = [0u8; HASH_BYTES]; diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index 2e63a29ad56c2e..2a686c75dec2e6 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -18,11 +18,8 @@ use crate::wasm_bindgen; #[cfg(feature = "borsh")] use borsh::BorshSerialize; use { - crate::{pubkey::Pubkey, short_vec}, - bincode::serialize, - serde::Serialize, - solana_sanitize::Sanitize, - thiserror::Error, + crate::pubkey::Pubkey, bincode::serialize, serde::Serialize, solana_sanitize::Sanitize, + solana_short_vec as short_vec, thiserror::Error, }; /// Reasons the runtime might have rejected an instruction. @@ -518,14 +515,6 @@ impl Instruction { data: data.to_vec(), } } - - #[deprecated( - since = "1.6.0", - note = "Please use another Instruction constructor instead, such as `Instruction::new_with_borsh`" - )] - pub fn new(program_id: Pubkey, data: &T, accounts: Vec) -> Self { - Self::new_with_bincode(program_id, data, accounts) - } } /// Addition that returns [`InstructionError::InsufficientFunds`] on overflow. diff --git a/sdk/program/src/keccak.rs b/sdk/program/src/keccak.rs index f246944b921a91..680d9bc63b2a16 100644 --- a/sdk/program/src/keccak.rs +++ b/sdk/program/src/keccak.rs @@ -100,7 +100,7 @@ impl Hash { /// unique Hash for tests and benchmarks. pub fn new_unique() -> Self { - use crate::atomic_u64::AtomicU64; + use solana_atomic_u64::AtomicU64; static I: AtomicU64 = AtomicU64::new(1); let mut b = [0u8; HASH_BYTES]; diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 1a0a7537f70dec..591c4563a973ae 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -471,8 +471,6 @@ extern crate self as solana_program; pub mod account_info; pub mod address_lookup_table; -pub mod alt_bn128; -pub(crate) mod atomic_u64; pub mod big_mod_exp; pub mod blake3; #[cfg(feature = "borsh")] @@ -487,7 +485,6 @@ pub mod bpf_loader_upgradeable; pub mod clock; pub mod compute_units; pub mod debug_account_data; -pub mod decode_error; pub mod ed25519_program; pub mod entrypoint; pub mod entrypoint_deprecated; @@ -512,7 +509,6 @@ pub mod native_token; pub mod nonce; pub mod program; pub mod program_error; -pub mod program_memory; pub mod program_option; pub mod program_pack; pub mod program_stubs; @@ -520,10 +516,8 @@ pub mod program_utils; pub mod pubkey; pub mod rent; pub mod secp256k1_program; -pub mod secp256k1_recover; pub mod serde_varint; pub mod serialize_utils; -pub mod short_vec; pub mod slot_hashes; pub mod slot_history; pub mod stable_layout; @@ -536,16 +530,15 @@ pub mod sysvar; pub mod vote; pub mod wasm; -#[deprecated( - since = "1.17.0", - note = "Please use `solana_sdk::address_lookup_table::AddressLookupTableAccount` instead" -)] -pub mod address_lookup_table_account { - pub use crate::address_lookup_table::AddressLookupTableAccount; -} - +pub use solana_msg::msg; +#[deprecated(since = "2.1.0", note = "Use `solana-program-memory` crate instead")] +pub use solana_program_memory as program_memory; #[deprecated(since = "2.1.0", note = "Use `solana-sanitize` crate instead")] pub use solana_sanitize as sanitize; +#[deprecated(since = "2.1.0", note = "Use `solana-secp256k1-recover` crate instead")] +pub use solana_secp256k1_recover as secp256k1_recover; +#[deprecated(since = "2.1.0", note = "Use `solana-short-vec` crate instead")] +pub use solana_short_vec as short_vec; #[cfg(target_arch = "wasm32")] pub use wasm_bindgen::prelude::wasm_bindgen; @@ -600,6 +593,8 @@ pub mod sdk_ids { } } +#[deprecated(since = "2.1.0", note = "Use `solana-decode-error` crate instead")] +pub use solana_decode_error as decode_error; /// Same as [`declare_id`] except that it reports that this ID has been deprecated. pub use solana_sdk_macro::program_declare_deprecated_id as declare_deprecated_id; /// Convenience macro to declare a static public key and functions to interact with it. diff --git a/sdk/program/src/log.rs b/sdk/program/src/log.rs index 4f3463f8dc1201..049e286c2a624a 100644 --- a/sdk/program/src/log.rs +++ b/sdk/program/src/log.rs @@ -34,71 +34,7 @@ //! [`Pubkey::log`]: crate::pubkey::Pubkey::log use crate::account_info::AccountInfo; - -/// Print a message to the log. -#[macro_export] -#[deprecated(since = "1.4.14", note = "Please use `msg` macro instead")] -macro_rules! info { - ($msg:expr) => { - $crate::log::sol_log($msg) - }; - ($arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) => { - $crate::log::sol_log_64( - $arg1 as u64, - $arg2 as u64, - $arg3 as u64, - $arg4 as u64, - $arg5 as u64, - ) - }; -} - -/// Print a message to the log. -/// -/// Supports simple strings as well as Rust [format strings][fs]. When passed a -/// single expression it will be passed directly to [`sol_log`]. The expression -/// must have type `&str`, and is typically used for logging static strings. -/// When passed something other than an expression, particularly -/// a sequence of expressions, the tokens will be passed through the -/// [`format!`] macro before being logged with `sol_log`. -/// -/// [fs]: https://doc.rust-lang.org/std/fmt/ -/// [`format!`]: https://doc.rust-lang.org/std/fmt/fn.format.html -/// -/// Note that Rust's formatting machinery is relatively CPU-intensive -/// for constrained environments like the Solana VM. -/// -/// # Examples -/// -/// ``` -/// use solana_program::msg; -/// -/// // The fast form -/// msg!("verifying multisig"); -/// -/// // With formatting -/// let err = "not enough signers"; -/// msg!("multisig failed: {}", err); -/// ``` -#[macro_export] -macro_rules! msg { - ($msg:expr) => { - $crate::log::sol_log($msg) - }; - ($($arg:tt)*) => ($crate::log::sol_log(&format!($($arg)*))); -} - -/// Print a string to the log. -#[inline] -pub fn sol_log(message: &str) { - #[cfg(target_os = "solana")] - unsafe { - crate::syscalls::sol_log_(message.as_ptr(), message.len() as u64); - } - - #[cfg(not(target_os = "solana"))] - crate::program_stubs::sol_log(message); -} +pub use solana_msg::{msg, sol_log}; /// Print 64-bit values represented as hexadecimal to the log. #[inline] diff --git a/sdk/program/src/message/account_keys.rs b/sdk/program/src/message/account_keys.rs index 6f80c3c68e6186..e7bb569d03643b 100644 --- a/sdk/program/src/message/account_keys.rs +++ b/sdk/program/src/message/account_keys.rs @@ -33,7 +33,7 @@ impl<'a> AccountKeys<'a> { /// Returns an iterator of account key segments. The ordering of segments /// affects how account indexes from compiled instructions are resolved and /// so should not be changed. - fn key_segment_iter(&self) -> impl Iterator { + fn key_segment_iter(&self) -> impl Iterator + Clone { if let Some(dynamic_keys) = self.dynamic_keys { [ self.static_keys, @@ -77,7 +77,7 @@ impl<'a> AccountKeys<'a> { } /// Iterator for the addresses of the loaded accounts for a message - pub fn iter(&self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator + Clone { self.key_segment_iter().flatten() } diff --git a/sdk/program/src/message/compiled_keys.rs b/sdk/program/src/message/compiled_keys.rs index 7e9b19a10591e1..a9964c33448be2 100644 --- a/sdk/program/src/message/compiled_keys.rs +++ b/sdk/program/src/message/compiled_keys.rs @@ -1,6 +1,6 @@ #[cfg(not(target_os = "solana"))] use crate::{ - address_lookup_table_account::AddressLookupTableAccount, + address_lookup_table::AddressLookupTableAccount, message::v0::{LoadedAddresses, MessageAddressTableLookup}, }; use { diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index 5f5e808f90cfea..4c1d4c5a9da418 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -22,9 +22,10 @@ use { instruction::{CompiledInstruction, Instruction}, message::{compiled_keys::CompiledKeys, MessageHeader}, pubkey::Pubkey, - short_vec, system_instruction, system_program, sysvar, + system_instruction, system_program, sysvar, }, solana_sanitize::{Sanitize, SanitizeError}, + solana_short_vec as short_vec, std::{collections::HashSet, convert::TryFrom, str::FromStr}, }; @@ -122,7 +123,7 @@ fn compile_instructions(ixs: &[Instruction], keys: &[Pubkey]) -> Vec (Vec<&Pubkey>, Vec<&Pubkey>) { - let mut writable_keys = vec![]; - let mut readonly_keys = vec![]; - for (i, key) in self.account_keys.iter().enumerate() { - if self.is_maybe_writable(i, None) { - writable_keys.push(key); - } else { - readonly_keys.push(key); - } - } - (writable_keys, readonly_keys) - } - - #[deprecated] - pub fn deserialize_instruction( - index: usize, - data: &[u8], - ) -> Result { - #[allow(deprecated)] - sysvar::instructions::load_instruction_at(index, data) - } - pub fn signer_keys(&self) -> Vec<&Pubkey> { // Clamp in case we're working on un-`sanitize()`ed input let last_key = self @@ -907,36 +885,6 @@ mod tests { assert!(!message.is_account_maybe_reserved(2, None)); } - #[test] - fn test_get_account_keys_by_lock_type() { - let program_id = Pubkey::default(); - let id0 = Pubkey::new_unique(); - let id1 = Pubkey::new_unique(); - let id2 = Pubkey::new_unique(); - let id3 = Pubkey::new_unique(); - let message = Message::new( - &[ - Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id0, false)]), - Instruction::new_with_bincode(program_id, &0, vec![AccountMeta::new(id1, true)]), - Instruction::new_with_bincode( - program_id, - &0, - vec![AccountMeta::new_readonly(id2, false)], - ), - Instruction::new_with_bincode( - program_id, - &0, - vec![AccountMeta::new_readonly(id3, true)], - ), - ], - Some(&id1), - ); - assert_eq!( - message.get_account_keys_by_lock_type(), - (vec![&id1, &id0], vec![&id3, &program_id, &id2]) - ); - } - #[test] fn test_program_ids() { let key0 = Pubkey::new_unique(); diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 06bb90ad8395fb..33b8c3fc3495d7 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -370,7 +370,18 @@ impl SanitizedMessage { }) } + #[deprecated( + since = "2.1.0", + note = "Please use `SanitizedMessage::num_total_signatures` instead." + )] pub fn num_signatures(&self) -> u64 { + self.num_total_signatures() + } + + /// Returns the total number of signatures in the message. + /// This includes required transaction signatures as well as any + /// pre-compile signatures that are attached in instructions. + pub fn num_total_signatures(&self) -> u64 { self.get_signature_details().total_signatures() } diff --git a/sdk/program/src/message/versions/mod.rs b/sdk/program/src/message/versions/mod.rs index d7dd3d6bc23dbd..53b95d96acf490 100644 --- a/sdk/program/src/message/versions/mod.rs +++ b/sdk/program/src/message/versions/mod.rs @@ -4,7 +4,6 @@ use { instruction::CompiledInstruction, message::{legacy::Message as LegacyMessage, v0::MessageAddressTableLookup, MessageHeader}, pubkey::Pubkey, - short_vec, }, serde::{ de::{self, Deserializer, SeqAccess, Unexpected, Visitor}, @@ -12,6 +11,7 @@ use { }, serde_derive::{Deserialize, Serialize}, solana_sanitize::{Sanitize, SanitizeError}, + solana_short_vec as short_vec, std::{collections::HashSet, fmt}, }; @@ -33,7 +33,7 @@ pub const MESSAGE_VERSION_PREFIX: u8 = 0x80; /// format. #[cfg_attr( feature = "frozen-abi", - frozen_abi(digest = "G4EAiqmGgBprgf5ePYemLJcoFfx4R7rhC1Weo2FVJ7fn"), + frozen_abi(digest = "8wyn6rxrJ1WwsUJkVxtDH9VEmd7djwqMfBLL3EpuY7H4"), derive(AbiEnumVisitor, AbiExample) )] #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/sdk/program/src/message/versions/v0/mod.rs b/sdk/program/src/message/versions/v0/mod.rs index 76c866bf6a602a..9eaad5a9b39944 100644 --- a/sdk/program/src/message/versions/v0/mod.rs +++ b/sdk/program/src/message/versions/v0/mod.rs @@ -12,7 +12,7 @@ pub use loaded::*; use { crate::{ - address_lookup_table_account::AddressLookupTableAccount, + address_lookup_table::AddressLookupTableAccount, bpf_loader_upgradeable, hash::Hash, instruction::{CompiledInstruction, Instruction}, @@ -21,9 +21,9 @@ use { AccountKeys, MessageHeader, MESSAGE_VERSION_PREFIX, }, pubkey::Pubkey, - short_vec, }, solana_sanitize::SanitizeError, + solana_short_vec as short_vec, std::collections::HashSet, }; @@ -200,7 +200,7 @@ impl Message { /// use solana_rpc_client::rpc_client::RpcClient; /// use solana_program::address_lookup_table::{self, state::{AddressLookupTable, LookupTableMeta}}; /// use solana_sdk::{ - /// address_lookup_table_account::AddressLookupTableAccount, + /// address_lookup_table::AddressLookupTableAccount, /// instruction::{AccountMeta, Instruction}, /// message::{VersionedMessage, v0}, /// pubkey::Pubkey, diff --git a/sdk/program/src/program_error.rs b/sdk/program/src/program_error.rs index f80827ca6c953c..f225561a52d389 100644 --- a/sdk/program/src/program_error.rs +++ b/sdk/program/src/program_error.rs @@ -4,8 +4,9 @@ #[cfg(feature = "borsh")] use borsh::io::Error as BorshIoError; use { - crate::{decode_error::DecodeError, instruction::InstructionError, msg, pubkey::PubkeyError}, + crate::{instruction::InstructionError, msg, pubkey::PubkeyError}, num_traits::{FromPrimitive, ToPrimitive}, + solana_decode_error::DecodeError, std::convert::TryFrom, thiserror::Error, }; diff --git a/sdk/program/src/program_stubs.rs b/sdk/program/src/program_stubs.rs index e06f6b1d201a42..77dabd37c3418c 100644 --- a/sdk/program/src/program_stubs.rs +++ b/sdk/program/src/program_stubs.rs @@ -8,6 +8,7 @@ use { program_error::UNSUPPORTED_SYSVAR, pubkey::Pubkey, }, base64::{prelude::BASE64_STANDARD, Engine}, + solana_program_memory::stubs, std::sync::{Arc, RwLock}, }; @@ -21,7 +22,6 @@ pub fn set_syscall_stubs(syscall_stubs: Box) -> Box Option<(Pubkey, Vec)> { None @@ -206,30 +188,6 @@ pub(crate) fn sol_get_epoch_stake(vote_address: *const u8) -> u64 { .sol_get_epoch_stake(vote_address) } -pub(crate) fn sol_memcpy(dst: *mut u8, src: *const u8, n: usize) { - unsafe { - SYSCALL_STUBS.read().unwrap().sol_memcpy(dst, src, n); - } -} - -pub(crate) fn sol_memmove(dst: *mut u8, src: *const u8, n: usize) { - unsafe { - SYSCALL_STUBS.read().unwrap().sol_memmove(dst, src, n); - } -} - -pub(crate) fn sol_memcmp(s1: *const u8, s2: *const u8, n: usize, result: *mut i32) { - unsafe { - SYSCALL_STUBS.read().unwrap().sol_memcmp(s1, s2, n, result); - } -} - -pub(crate) fn sol_memset(s: *mut u8, c: u8, n: usize) { - unsafe { - SYSCALL_STUBS.read().unwrap().sol_memset(s, c, n); - } -} - pub(crate) fn sol_get_return_data() -> Option<(Pubkey, Vec)> { SYSCALL_STUBS.read().unwrap().sol_get_return_data() } @@ -259,40 +217,3 @@ pub(crate) fn sol_get_epoch_rewards_sysvar(var_addr: *mut u8) -> u64 { .unwrap() .sol_get_epoch_rewards_sysvar(var_addr) } - -/// Check that two regions do not overlap. -/// -/// Hidden to share with bpf_loader without being part of the API surface. -#[doc(hidden)] -pub fn is_nonoverlapping(src: N, src_len: N, dst: N, dst_len: N) -> bool -where - N: Ord + num_traits::SaturatingSub, -{ - // If the absolute distance between the ptrs is at least as big as the size of the other, - // they do not overlap. - if src > dst { - src.saturating_sub(&dst) >= dst_len - } else { - dst.saturating_sub(&src) >= src_len - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_is_nonoverlapping() { - for dst in 0..8 { - assert!(is_nonoverlapping(10, 3, dst, 3)); - } - for dst in 8..13 { - assert!(!is_nonoverlapping(10, 3, dst, 3)); - } - for dst in 13..20 { - assert!(is_nonoverlapping(10, 3, dst, 3)); - } - assert!(is_nonoverlapping::(255, 3, 254, 1)); - assert!(!is_nonoverlapping::(255, 2, 254, 3)); - } -} diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 206efd18f41e8e..5d3433c1247f90 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -9,9 +9,10 @@ use arbitrary::Arbitrary; #[cfg(feature = "borsh")] use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use { - crate::{decode_error::DecodeError, hash::hashv}, + crate::hash::hashv, bytemuck_derive::{Pod, Zeroable}, num_derive::{FromPrimitive, ToPrimitive}, + solana_decode_error::DecodeError, std::{ convert::{Infallible, TryFrom}, fmt, mem, @@ -180,28 +181,13 @@ pub fn bytes_are_curve_point>(_bytes: T) -> bool { } impl Pubkey { - #[deprecated( - since = "1.14.14", - note = "Please use 'Pubkey::from' or 'Pubkey::try_from' instead" - )] - pub fn new(pubkey_vec: &[u8]) -> Self { - Self::try_from(pubkey_vec).expect("Slice must be the same length as a Pubkey") - } - pub const fn new_from_array(pubkey_array: [u8; 32]) -> Self { Self(pubkey_array) } - #[deprecated(since = "1.3.9", note = "Please use 'Pubkey::new_unique' instead")] - #[cfg(not(target_os = "solana"))] - pub fn new_rand() -> Self { - // Consider removing Pubkey::new_rand() entirely in the v1.5 or v1.6 timeframe - Pubkey::from(rand::random::<[u8; 32]>()) - } - /// unique Pubkey for tests and benchmarks. pub fn new_unique() -> Self { - use crate::atomic_u64::AtomicU64; + use solana_atomic_u64::AtomicU64; static I: AtomicU64 = AtomicU64::new(1); let mut b = [0u8; 32]; diff --git a/sdk/program/src/serde_varint.rs b/sdk/program/src/serde_varint.rs index 419dfe209d4502..7df84540a5f12a 100644 --- a/sdk/program/src/serde_varint.rs +++ b/sdk/program/src/serde_varint.rs @@ -120,7 +120,7 @@ impl_var_int!(u64); #[cfg(test)] mod tests { - use {crate::short_vec::ShortU16, rand::Rng}; + use {rand::Rng, solana_short_vec::ShortU16}; #[derive(Debug, Eq, PartialEq, Serialize, Deserialize)] struct Dummy { diff --git a/sdk/program/src/serialize_utils/cursor.rs b/sdk/program/src/serialize_utils/cursor.rs index 6e78d88ef9a73a..3d4dedd092ed3a 100644 --- a/sdk/program/src/serialize_utils/cursor.rs +++ b/sdk/program/src/serialize_utils/cursor.rs @@ -1,6 +1,12 @@ use { - crate::{instruction::InstructionError, pubkey::Pubkey}, - std::io::{Cursor, Read}, + crate::{ + instruction::InstructionError, + pubkey::{Pubkey, PUBKEY_BYTES}, + }, + std::{ + io::{BufRead as _, Cursor, Read}, + ptr, + }, }; pub(crate) fn read_u8>(cursor: &mut Cursor) -> Result { @@ -50,6 +56,27 @@ pub(crate) fn read_i64>(cursor: &mut Cursor) -> Result, + pubkey: *mut Pubkey, +) -> Result<(), InstructionError> { + match cursor.fill_buf() { + Ok(buf) if buf.len() >= PUBKEY_BYTES => { + // Safety: `buf` is guaranteed to be at least `PUBKEY_BYTES` bytes + // long. Pubkey a #[repr(transparent)] wrapper around a byte array, + // so this is a byte to byte copy and it's safe. + unsafe { + ptr::copy_nonoverlapping(buf.as_ptr(), pubkey as *mut u8, PUBKEY_BYTES); + } + + cursor.consume(PUBKEY_BYTES); + } + _ => return Err(InstructionError::InvalidAccountData), + } + + Ok(()) +} + pub(crate) fn read_pubkey>( cursor: &mut Cursor, ) -> Result { diff --git a/sdk/program/src/stake/instruction.rs b/sdk/program/src/stake/instruction.rs index e2a5b056e70618..89357050e93ca8 100644 --- a/sdk/program/src/stake/instruction.rs +++ b/sdk/program/src/stake/instruction.rs @@ -1,13 +1,16 @@ -#[allow(deprecated)] -use crate::stake::config; +// Remove the following `allow` when the `Redelegate` variant is renamed to +// `Unused` starting from v3. +// Required to avoid warnings from uses of deprecated types during trait derivations. +#![allow(deprecated)] + use { crate::{ clock::{Epoch, UnixTimestamp}, - decode_error::DecodeError, instruction::{AccountMeta, Instruction}, program_error::ProgramError, pubkey::Pubkey, stake::{ + config, program::id, state::{Authorized, Lockup, StakeAuthorize, StakeStateV2}, }, @@ -16,6 +19,7 @@ use { log::*, num_derive::{FromPrimitive, ToPrimitive}, serde_derive::{Deserialize, Serialize}, + solana_decode_error::DecodeError, thiserror::Error, }; @@ -306,6 +310,7 @@ pub enum StakeInstruction { /// 3. `[]` Unused account, formerly the stake config /// 4. `[SIGNER]` Stake authority /// + #[deprecated(since = "2.1.0", note = "Redelegate will not be enabled")] Redelegate, /// Move stake between accounts with the same authorities and lockups, using Staker authority. @@ -726,7 +731,6 @@ pub fn delegate_stake( AccountMeta::new_readonly(*vote_pubkey, false), AccountMeta::new_readonly(sysvar::clock::id(), false), AccountMeta::new_readonly(sysvar::stake_history::id(), false), - #[allow(deprecated)] // For backwards compatibility we pass the stake config, although this account is unused AccountMeta::new_readonly(config::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), @@ -832,7 +836,6 @@ fn _redelegate( AccountMeta::new(*stake_pubkey, false), AccountMeta::new(*uninitialized_stake_pubkey, false), AccountMeta::new_readonly(*vote_pubkey, false), - #[allow(deprecated)] // For backwards compatibility we pass the stake config, although this account is unused AccountMeta::new_readonly(config::id(), false), AccountMeta::new_readonly(*authorized_pubkey, true), @@ -840,6 +843,7 @@ fn _redelegate( Instruction::new_with_bincode(id(), &StakeInstruction::Redelegate, account_metas) } +#[deprecated(since = "2.1.0", note = "Redelegate will not be enabled")] pub fn redelegate( stake_pubkey: &Pubkey, authorized_pubkey: &Pubkey, @@ -858,6 +862,7 @@ pub fn redelegate( ] } +#[deprecated(since = "2.1.0", note = "Redelegate will not be enabled")] pub fn redelegate_with_seed( stake_pubkey: &Pubkey, authorized_pubkey: &Pubkey, diff --git a/sdk/program/src/stake/stake_flags.rs b/sdk/program/src/stake/stake_flags.rs index 0da426f12f6387..072305d48d619d 100644 --- a/sdk/program/src/stake/stake_flags.rs +++ b/sdk/program/src/stake/stake_flags.rs @@ -65,6 +65,10 @@ impl borsh0_10::ser::BorshSerialize for StakeFlags { /// Currently, only bit 1 is used. The other 7 bits are reserved for future usage. impl StakeFlags { /// Stake must be fully activated before deactivation is allowed (bit 1). + #[deprecated( + since = "2.1.0", + note = "This flag will be removed because it was only used for `redelegate`, which will not be enabled." + )] pub const MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED: Self = Self { bits: 0b0000_0001 }; @@ -102,6 +106,7 @@ mod test { use super::*; #[test] + #[allow(deprecated)] fn test_stake_flags() { let mut f = StakeFlags::empty(); assert!(!f.contains(StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED)); diff --git a/sdk/program/src/stake/state.rs b/sdk/program/src/stake/state.rs index b2a03d68c3d672..22fc5ea44645c6 100644 --- a/sdk/program/src/stake/state.rs +++ b/sdk/program/src/stake/state.rs @@ -1071,6 +1071,7 @@ mod test { }, lockup: Lockup::default(), })); + #[allow(deprecated)] check_borsh_serialization(StakeStateV2::Stake( Meta { rent_exempt_reserve: 1, @@ -1152,6 +1153,7 @@ mod test { assert_eq!(bincode_serialized[FLAG_OFFSET], expected); assert_eq!(borsh_serialized[FLAG_OFFSET], expected); }; + #[allow(deprecated)] check_flag( StakeFlags::MUST_FULLY_ACTIVATE_BEFORE_DEACTIVATION_IS_PERMITTED, 1, diff --git a/sdk/program/src/syscalls/definitions.rs b/sdk/program/src/syscalls/definitions.rs index 5988cbb76d4073..fd22296e03ff59 100644 --- a/sdk/program/src/syscalls/definitions.rs +++ b/sdk/program/src/syscalls/definitions.rs @@ -1,5 +1,17 @@ #[cfg(target_feature = "static-syscalls")] pub use solana_define_syscall::sys_hash; +#[deprecated(since = "2.1.0", note = "Use `solana-msg::sol_log` instead.")] +pub use solana_msg::sol_log; +#[deprecated( + since = "2.1.0", + note = "Use `solana_program_memory::syscalls` instead" +)] +pub use solana_program_memory::syscalls::{sol_memcmp_, sol_memcpy_, sol_memmove_, sol_memset_}; +#[deprecated( + since = "2.1.0", + note = "Use `solana_secp256k1_recover::sol_secp256k1_recover` instead" +)] +pub use solana_secp256k1_recover::sol_secp256k1_recover; use { crate::{ instruction::{AccountMeta, ProcessedSiblingInstruction}, @@ -7,8 +19,6 @@ use { }, solana_define_syscall::define_syscall, }; - -define_syscall!(fn sol_log_(message: *const u8, len: u64)); define_syscall!(fn sol_log_64_(arg1: u64, arg2: u64, arg3: u64, arg4: u64, arg5: u64)); define_syscall!(fn sol_log_compute_units_()); define_syscall!(fn sol_log_pubkey(pubkey_addr: *const u8)); @@ -16,12 +26,7 @@ define_syscall!(fn sol_create_program_address(seeds_addr: *const u8, seeds_len: define_syscall!(fn sol_try_find_program_address(seeds_addr: *const u8, seeds_len: u64, program_id_addr: *const u8, address_bytes_addr: *const u8, bump_seed_addr: *const u8) -> u64); define_syscall!(fn sol_sha256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); define_syscall!(fn sol_keccak256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); -define_syscall!(fn sol_secp256k1_recover(hash: *const u8, recovery_id: u64, signature: *const u8, result: *mut u8) -> u64); define_syscall!(fn sol_blake3(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64); -define_syscall!(fn sol_memcpy_(dst: *mut u8, src: *const u8, n: u64)); -define_syscall!(fn sol_memmove_(dst: *mut u8, src: *const u8, n: u64)); -define_syscall!(fn sol_memcmp_(s1: *const u8, s2: *const u8, n: u64, result: *mut i32)); -define_syscall!(fn sol_memset_(s: *mut u8, c: u8, n: u64)); define_syscall!(fn sol_invoke_signed_c(instruction_addr: *const u8, account_infos_addr: *const u8, account_infos_len: u64, signers_seeds_addr: *const u8, signers_seeds_len: u64) -> u64); define_syscall!(fn sol_invoke_signed_rust(instruction_addr: *const u8, account_infos_addr: *const u8, account_infos_len: u64, signers_seeds_addr: *const u8, signers_seeds_len: u64) -> u64); define_syscall!(fn sol_set_return_data(data: *const u8, length: u64)); diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 47fc97fbe0f69b..a1c301b85cdab9 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -42,7 +42,6 @@ #[allow(deprecated)] use { crate::{ - decode_error::DecodeError, instruction::{AccountMeta, Instruction}, nonce, pubkey::Pubkey, @@ -50,6 +49,7 @@ use { sysvar::{recent_blockhashes, rent}, }, num_derive::{FromPrimitive, ToPrimitive}, + solana_decode_error::DecodeError, thiserror::Error, }; diff --git a/sdk/program/src/sysvar/instructions.rs b/sdk/program/src/sysvar/instructions.rs index 855b14b54a7e30..9bc1d001c349d5 100644 --- a/sdk/program/src/sysvar/instructions.rs +++ b/sdk/program/src/sysvar/instructions.rs @@ -29,6 +29,8 @@ #![allow(clippy::arithmetic_side_effects)] +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::qualifiers; #[cfg(not(target_os = "solana"))] use { crate::serialize_utils::{append_slice, append_u16, append_u8}, @@ -149,11 +151,10 @@ fn serialize_instructions(instructions: &[BorrowedInstruction]) -> Vec { /// `Transaction`. /// /// `data` is the instructions sysvar account data. -#[deprecated( - since = "1.8.0", - note = "Unsafe because the sysvar accounts address is not checked, please use `load_current_index_checked` instead" -)] -pub fn load_current_index(data: &[u8]) -> u16 { +/// +/// Unsafe because the sysvar accounts address is not checked; only used +/// internally after such a check. +fn load_current_index(data: &[u8]) -> u16 { let mut instr_fixed_data = [0u8; 2]; let len = data.len(); instr_fixed_data.copy_from_slice(&data[len - 2..len]); @@ -174,10 +175,8 @@ pub fn load_current_index_checked( } let instruction_sysvar = instruction_sysvar_account_info.try_borrow_data()?; - let mut instr_fixed_data = [0u8; 2]; - let len = instruction_sysvar.len(); - instr_fixed_data.copy_from_slice(&instruction_sysvar[len - 2..len]); - Ok(u16::from_le_bytes(instr_fixed_data)) + let index = load_current_index(&instruction_sysvar); + Ok(index) } /// Store the current `Instruction`'s index in the instructions sysvar data. @@ -234,11 +233,11 @@ fn deserialize_instruction(index: usize, data: &[u8]) -> Result Result { +/// +/// Unsafe because the sysvar accounts address is not checked; only used +/// internally after such a check. +#[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] +fn load_instruction_at(index: usize, data: &[u8]) -> Result { deserialize_instruction(index, data) } @@ -257,7 +256,7 @@ pub fn load_instruction_at_checked( } let instruction_sysvar = instruction_sysvar_account_info.try_borrow_data()?; - deserialize_instruction(index, &instruction_sysvar).map_err(|err| match err { + load_instruction_at(index, &instruction_sysvar).map_err(|err| match err { SanitizeError::IndexOutOfBounds => ProgramError::InvalidArgument, _ => ProgramError::InvalidInstructionData, }) @@ -278,13 +277,11 @@ pub fn get_instruction_relative( } let instruction_sysvar = instruction_sysvar_account_info.data.borrow(); - #[allow(deprecated)] let current_index = load_current_index(&instruction_sysvar) as i64; let index = current_index.saturating_add(index_relative_to_current); if index < 0 { return Err(ProgramError::InvalidArgument); } - #[allow(deprecated)] load_instruction_at( current_index.saturating_add(index_relative_to_current) as usize, &instruction_sysvar, diff --git a/sdk/program/src/vote/error.rs b/sdk/program/src/vote/error.rs index a5ec60ce5232fa..1883186f6e2b65 100644 --- a/sdk/program/src/vote/error.rs +++ b/sdk/program/src/vote/error.rs @@ -1,8 +1,8 @@ //! Vote program errors use { - crate::decode_error::DecodeError, num_derive::{FromPrimitive, ToPrimitive}, + solana_decode_error::DecodeError, thiserror::Error, }; diff --git a/sdk/program/src/vote/state/mod.rs b/sdk/program/src/vote/state/mod.rs index 1342fb9deea4a3..abac8f5abff61f 100644 --- a/sdk/program/src/vote/state/mod.rs +++ b/sdk/program/src/vote/state/mod.rs @@ -18,9 +18,14 @@ use { sysvar::clock::Clock, vote::{authorized_voters::AuthorizedVoters, error::VoteError}, }, - bincode::{serialize_into, serialized_size, ErrorKind}, + bincode::{serialize_into, ErrorKind}, serde_derive::{Deserialize, Serialize}, - std::{collections::VecDeque, fmt::Debug, io::Cursor}, + std::{ + collections::VecDeque, + fmt::Debug, + io::Cursor, + mem::{self, MaybeUninit}, + }, }; mod vote_state_0_23_5; @@ -323,6 +328,7 @@ const MAX_ITEMS: usize = 32; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(test, derive(Arbitrary))] pub struct CircBuf { buf: [I; MAX_ITEMS], /// next pointer @@ -368,23 +374,6 @@ impl CircBuf { } } -#[cfg(test)] -impl<'a, I: Default + Copy> Arbitrary<'a> for CircBuf -where - I: Arbitrary<'a>, -{ - fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { - let mut circbuf = Self::default(); - - let len = u.arbitrary_len::()?; - for _ in 0..len { - circbuf.append(I::arbitrary(u)?); - } - - Ok(circbuf) - } -} - #[cfg_attr( feature = "frozen-abi", frozen_abi(digest = "EeenjJaSrm9hRM39gK6raRNtzG61hnk7GciUCJJRDUSQ"), @@ -475,8 +464,11 @@ impl VoteState { 3762 // see test_vote_state_size_of. } - // we retain bincode deserialize for not(target_os = "solana") - // because the hand-written parser does not support V0_23_5 + // NOTE we retain `bincode::deserialize` for `not(target_os = "solana")` pending testing on mainnet-beta + // once that testing is done, `VoteState::deserialize_into` may be used for all targets + // conversion of V0_23_5 to current must be handled specially, however + // because it inserts a null voter into `authorized_voters` + // which `VoteStateVersions::is_uninitialized` erroneously reports as initialized pub fn deserialize(input: &[u8]) -> Result { #[cfg(not(target_os = "solana"))] { @@ -492,26 +484,108 @@ impl VoteState { } } - /// Deserializes the input buffer into the provided `VoteState` + /// Deserializes the input `VoteStateVersions` buffer directly into the provided `VoteState`. + /// + /// In a SBPF context, V0_23_5 is not supported, but in non-SBPF, all versions are supported for + /// compatibility with `bincode::deserialize`. /// - /// This function exists to deserialize `VoteState` in a BPF context without going above - /// the compute limit, and must be kept up to date with `bincode::deserialize`. + /// On success, `vote_state` reflects the state of the input data. On failure, `vote_state` is + /// reset to `VoteState::default()`. pub fn deserialize_into( input: &[u8], vote_state: &mut VoteState, ) -> Result<(), InstructionError> { - let minimum_size = - serialized_size(vote_state).map_err(|_| InstructionError::InvalidAccountData)?; - if (input.len() as u64) < minimum_size { - return Err(InstructionError::InvalidAccountData); + // Rebind vote_state to *mut VoteState so that the &mut binding isn't + // accessible anymore, preventing accidental use after this point. + // + // NOTE: switch to ptr::from_mut() once platform-tools moves to rustc >= 1.76 + let vote_state = vote_state as *mut VoteState; + + // Safety: vote_state is valid to_drop (see drop_in_place() docs). After + // dropping, the pointer is treated as uninitialized and only accessed + // through ptr::write, which is safe as per drop_in_place docs. + unsafe { + std::ptr::drop_in_place(vote_state); + } + + // This is to reset vote_state to VoteState::default() if deserialize fails or panics. + struct DropGuard { + vote_state: *mut VoteState, } + impl Drop for DropGuard { + fn drop(&mut self) { + // Safety: + // + // Deserialize failed or panicked so at this point vote_state is uninitialized. We + // must write a new _valid_ value into it or after returning (or unwinding) from + // this function the caller is left with an uninitialized `&mut VoteState`, which is + // UB (references must always be valid). + // + // This is always safe and doesn't leak memory because deserialize_into_ptr() writes + // into the fields that heap alloc only when it returns Ok(). + unsafe { + self.vote_state.write(VoteState::default()); + } + } + } + + let guard = DropGuard { vote_state }; + + let res = VoteState::deserialize_into_ptr(input, vote_state); + if res.is_ok() { + mem::forget(guard); + } + + res + } + + /// Deserializes the input `VoteStateVersions` buffer directly into the provided + /// `MaybeUninit`. + /// + /// In a SBPF context, V0_23_5 is not supported, but in non-SBPF, all versions are supported for + /// compatibility with `bincode::deserialize`. + /// + /// On success, `vote_state` is fully initialized and can be converted to `VoteState` using + /// [MaybeUninit::assume_init]. On failure, `vote_state` may still be uninitialized and must not + /// be converted to `VoteState`. + pub fn deserialize_into_uninit( + input: &[u8], + vote_state: &mut MaybeUninit, + ) -> Result<(), InstructionError> { + VoteState::deserialize_into_ptr(input, vote_state.as_mut_ptr()) + } + + fn deserialize_into_ptr( + input: &[u8], + vote_state: *mut VoteState, + ) -> Result<(), InstructionError> { let mut cursor = Cursor::new(input); let variant = read_u32(&mut cursor)?; match variant { - // V0_23_5. not supported; these should not exist on mainnet - 0 => Err(InstructionError::InvalidAccountData), + // V0_23_5. not supported for bpf targets; these should not exist on mainnet + // supported for non-bpf targets for backwards compatibility + 0 => { + #[cfg(not(target_os = "solana"))] + { + // Safety: vote_state is valid as it comes from `&mut MaybeUninit` or + // `&mut VoteState`. In the first case, the value is uninitialized so we write() + // to avoid dropping invalid data; in the latter case, we `drop_in_place()` + // before writing so the value has already been dropped and we just write a new + // one in place. + unsafe { + vote_state.write( + bincode::deserialize::(input) + .map(|versioned| versioned.convert_to_current()) + .map_err(|_| InstructionError::InvalidAccountData)?, + ); + } + Ok(()) + } + #[cfg(target_os = "solana")] + Err(InstructionError::InvalidAccountData) + } // V1_14_11. substantially different layout and data from V0_23_5 1 => deserialize_vote_state_into(&mut cursor, vote_state, false), // Current. the only difference from V1_14_11 is the addition of a slot-latency to each vote @@ -519,10 +593,6 @@ impl VoteState { _ => Err(InstructionError::InvalidAccountData), }?; - if cursor.position() > input.len() as u64 { - return Err(InstructionError::InvalidAccountData); - } - Ok(()) } @@ -895,10 +965,11 @@ pub mod serde_compact_vote_state_update { super::*, crate::{ clock::{Slot, UnixTimestamp}, - serde_varint, short_vec, + serde_varint, vote::state::Lockout, }, serde::{Deserialize, Deserializer, Serialize, Serializer}, + solana_short_vec as short_vec, }; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -992,10 +1063,11 @@ pub mod serde_tower_sync { super::*, crate::{ clock::{Slot, UnixTimestamp}, - serde_varint, short_vec, + serde_varint, vote::state::Lockout, }, serde::{Deserialize, Deserializer, Serialize, Serializer}, + solana_short_vec as short_vec, }; #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] @@ -1087,7 +1159,7 @@ pub mod serde_tower_sync { #[cfg(test)] mod tests { - use {super::*, itertools::Itertools, rand::Rng}; + use {super::*, bincode::serialized_size, itertools::Itertools, rand::Rng}; #[test] fn test_vote_serialize() { @@ -1138,23 +1210,125 @@ mod tests { } #[test] - fn test_vote_deserialize_into_nopanic() { - // base case + fn test_vote_deserialize_into_error() { + let target_vote_state = VoteState::new_rand_for_tests(Pubkey::new_unique(), 42); + let mut vote_state_buf = + bincode::serialize(&VoteStateVersions::new_current(target_vote_state.clone())).unwrap(); + let len = vote_state_buf.len(); + vote_state_buf.truncate(len - 1); + let mut test_vote_state = VoteState::default(); - let e = VoteState::deserialize_into(&[], &mut test_vote_state).unwrap_err(); + VoteState::deserialize_into(&vote_state_buf, &mut test_vote_state).unwrap_err(); + assert_eq!(test_vote_state, VoteState::default()); + } + + #[test] + fn test_vote_deserialize_into_uninit() { + // base case + let target_vote_state = VoteState::default(); + let vote_state_buf = + bincode::serialize(&VoteStateVersions::new_current(target_vote_state.clone())).unwrap(); + + let mut test_vote_state = MaybeUninit::uninit(); + VoteState::deserialize_into_uninit(&vote_state_buf, &mut test_vote_state).unwrap(); + let test_vote_state = unsafe { test_vote_state.assume_init() }; + + assert_eq!(target_vote_state, test_vote_state); + + // variant + // provide 4x the minimum struct size in bytes to ensure we typically touch every field + let struct_bytes_x4 = std::mem::size_of::() * 4; + for _ in 0..1000 { + let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); + let mut unstructured = Unstructured::new(&raw_data); + + let target_vote_state_versions = + VoteStateVersions::arbitrary(&mut unstructured).unwrap(); + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + let target_vote_state = target_vote_state_versions.convert_to_current(); + + let mut test_vote_state = MaybeUninit::uninit(); + VoteState::deserialize_into_uninit(&vote_state_buf, &mut test_vote_state).unwrap(); + let test_vote_state = unsafe { test_vote_state.assume_init() }; + + assert_eq!(target_vote_state, test_vote_state); + } + } + + #[test] + fn test_vote_deserialize_into_uninit_nopanic() { + // base case + let mut test_vote_state = MaybeUninit::uninit(); + let e = VoteState::deserialize_into_uninit(&[], &mut test_vote_state).unwrap_err(); assert_eq!(e, InstructionError::InvalidAccountData); // variant - let serialized_len_x4 = serialized_size(&test_vote_state).unwrap() * 4; + let serialized_len_x4 = serialized_size(&VoteState::default()).unwrap() * 4; let mut rng = rand::thread_rng(); for _ in 0..1000 { let raw_data_length = rng.gen_range(1..serialized_len_x4); - let raw_data: Vec = (0..raw_data_length).map(|_| rng.gen::()).collect(); + let mut raw_data: Vec = (0..raw_data_length).map(|_| rng.gen::()).collect(); + + // pure random data will ~never have a valid enum tag, so lets help it out + if raw_data_length >= 4 && rng.gen::() { + let tag = rng.gen::() % 3; + raw_data[0] = tag; + raw_data[1] = 0; + raw_data[2] = 0; + raw_data[3] = 0; + } // it is extremely improbable, though theoretically possible, for random bytes to be syntactically valid - // so we only check that the deserialize function does not panic - let mut test_vote_state = VoteState::default(); - let _ = VoteState::deserialize_into(&raw_data, &mut test_vote_state); + // so we only check that the parser does not panic and that it succeeds or fails exactly in line with bincode + let mut test_vote_state = MaybeUninit::uninit(); + let test_res = VoteState::deserialize_into_uninit(&raw_data, &mut test_vote_state); + let bincode_res = bincode::deserialize::(&raw_data) + .map(|versioned| versioned.convert_to_current()); + + if test_res.is_err() { + assert!(bincode_res.is_err()); + } else { + let test_vote_state = unsafe { test_vote_state.assume_init() }; + assert_eq!(test_vote_state, bincode_res.unwrap()); + } + } + } + + #[test] + fn test_vote_deserialize_into_uninit_ill_sized() { + // provide 4x the minimum struct size in bytes to ensure we typically touch every field + let struct_bytes_x4 = std::mem::size_of::() * 4; + for _ in 0..1000 { + let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); + let mut unstructured = Unstructured::new(&raw_data); + + let original_vote_state_versions = + VoteStateVersions::arbitrary(&mut unstructured).unwrap(); + let original_buf = bincode::serialize(&original_vote_state_versions).unwrap(); + + let mut truncated_buf = original_buf.clone(); + let mut expanded_buf = original_buf.clone(); + + truncated_buf.resize(original_buf.len() - 8, 0); + expanded_buf.resize(original_buf.len() + 8, 0); + + // truncated fails + let mut test_vote_state = MaybeUninit::uninit(); + let test_res = VoteState::deserialize_into_uninit(&truncated_buf, &mut test_vote_state); + let bincode_res = bincode::deserialize::(&truncated_buf) + .map(|versioned| versioned.convert_to_current()); + + assert!(test_res.is_err()); + assert!(bincode_res.is_err()); + + // expanded succeeds + let mut test_vote_state = MaybeUninit::uninit(); + VoteState::deserialize_into_uninit(&expanded_buf, &mut test_vote_state).unwrap(); + let bincode_res = bincode::deserialize::(&expanded_buf) + .map(|versioned| versioned.convert_to_current()); + + let test_vote_state = unsafe { test_vote_state.assume_init() }; + assert_eq!(test_vote_state, bincode_res.unwrap()); } } diff --git a/sdk/program/src/vote/state/vote_state_0_23_5.rs b/sdk/program/src/vote/state/vote_state_0_23_5.rs index ae3b9207fe494e..efc3e89bcf0e37 100644 --- a/sdk/program/src/vote/state/vote_state_0_23_5.rs +++ b/sdk/program/src/vote/state/vote_state_0_23_5.rs @@ -1,9 +1,12 @@ #![allow(clippy::arithmetic_side_effects)] use super::*; +#[cfg(test)] +use arbitrary::{Arbitrary, Unstructured}; const MAX_ITEMS: usize = 32; #[derive(Debug, Default, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(test, derive(Arbitrary))] pub struct VoteState0_23_5 { /// the node that votes in this account pub node_pubkey: Pubkey, @@ -35,6 +38,7 @@ pub struct VoteState0_23_5 { } #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[cfg_attr(test, derive(Arbitrary))] pub struct CircBuf { pub buf: [I; MAX_ITEMS], /// next pointer @@ -59,3 +63,46 @@ impl CircBuf { self.buf[self.idx] = item; } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_vote_deserialize_0_23_5() { + // base case + let target_vote_state = VoteState0_23_5::default(); + let target_vote_state_versions = VoteStateVersions::V0_23_5(Box::new(target_vote_state)); + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + + let mut test_vote_state = MaybeUninit::uninit(); + VoteState::deserialize_into_uninit(&vote_state_buf, &mut test_vote_state).unwrap(); + let test_vote_state = unsafe { test_vote_state.assume_init() }; + + assert_eq!( + target_vote_state_versions.convert_to_current(), + test_vote_state + ); + + // variant + // provide 4x the minimum struct size in bytes to ensure we typically touch every field + let struct_bytes_x4 = std::mem::size_of::() * 4; + for _ in 0..100 { + let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); + let mut unstructured = Unstructured::new(&raw_data); + + let arbitrary_vote_state = VoteState0_23_5::arbitrary(&mut unstructured).unwrap(); + let target_vote_state_versions = + VoteStateVersions::V0_23_5(Box::new(arbitrary_vote_state)); + + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + let target_vote_state = target_vote_state_versions.convert_to_current(); + + let mut test_vote_state = MaybeUninit::uninit(); + VoteState::deserialize_into_uninit(&vote_state_buf, &mut test_vote_state).unwrap(); + let test_vote_state = unsafe { test_vote_state.assume_init() }; + + assert_eq!(target_vote_state, test_vote_state); + } + } +} diff --git a/sdk/program/src/vote/state/vote_state_1_14_11.rs b/sdk/program/src/vote/state/vote_state_1_14_11.rs index 9a2365674171c2..285272a3ab646f 100644 --- a/sdk/program/src/vote/state/vote_state_1_14_11.rs +++ b/sdk/program/src/vote/state/vote_state_1_14_11.rs @@ -82,3 +82,46 @@ impl From for VoteState1_14_11 { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_vote_deserialize_1_14_11() { + // base case + let target_vote_state = VoteState1_14_11::default(); + let target_vote_state_versions = VoteStateVersions::V1_14_11(Box::new(target_vote_state)); + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + + let mut test_vote_state = MaybeUninit::uninit(); + VoteState::deserialize_into_uninit(&vote_state_buf, &mut test_vote_state).unwrap(); + let test_vote_state = unsafe { test_vote_state.assume_init() }; + + assert_eq!( + target_vote_state_versions.convert_to_current(), + test_vote_state + ); + + // variant + // provide 4x the minimum struct size in bytes to ensure we typically touch every field + let struct_bytes_x4 = std::mem::size_of::() * 4; + for _ in 0..1000 { + let raw_data: Vec = (0..struct_bytes_x4).map(|_| rand::random::()).collect(); + let mut unstructured = Unstructured::new(&raw_data); + + let arbitrary_vote_state = VoteState1_14_11::arbitrary(&mut unstructured).unwrap(); + let target_vote_state_versions = + VoteStateVersions::V1_14_11(Box::new(arbitrary_vote_state)); + + let vote_state_buf = bincode::serialize(&target_vote_state_versions).unwrap(); + let target_vote_state = target_vote_state_versions.convert_to_current(); + + let mut test_vote_state = MaybeUninit::uninit(); + VoteState::deserialize_into_uninit(&vote_state_buf, &mut test_vote_state).unwrap(); + let test_vote_state = unsafe { test_vote_state.assume_init() }; + + assert_eq!(target_vote_state, test_vote_state); + } + } +} diff --git a/sdk/program/src/vote/state/vote_state_deserialize.rs b/sdk/program/src/vote/state/vote_state_deserialize.rs index b457395ccbd38a..268341513a72b8 100644 --- a/sdk/program/src/vote/state/vote_state_deserialize.rs +++ b/sdk/program/src/vote/state/vote_state_deserialize.rs @@ -1,38 +1,68 @@ use { + super::{MAX_EPOCH_CREDITS_HISTORY, MAX_LOCKOUT_HISTORY}, crate::{ + clock::Epoch, instruction::InstructionError, pubkey::Pubkey, serialize_utils::cursor::*, - vote::state::{BlockTimestamp, LandedVote, Lockout, VoteState, MAX_ITEMS}, + vote::{ + authorized_voters::AuthorizedVoters, + state::{BlockTimestamp, LandedVote, Lockout, VoteState, MAX_ITEMS}, + }, }, - bincode::serialized_size, - std::io::Cursor, + std::{collections::VecDeque, io::Cursor, ptr::addr_of_mut}, }; pub(super) fn deserialize_vote_state_into( cursor: &mut Cursor<&[u8]>, - vote_state: &mut VoteState, + vote_state: *mut VoteState, has_latency: bool, ) -> Result<(), InstructionError> { - vote_state.node_pubkey = read_pubkey(cursor)?; - vote_state.authorized_withdrawer = read_pubkey(cursor)?; - vote_state.commission = read_u8(cursor)?; - read_votes_into(cursor, vote_state, has_latency)?; - vote_state.root_slot = read_option_u64(cursor)?; - read_authorized_voters_into(cursor, vote_state)?; + // General safety note: we must use add_or_mut! to access the `vote_state` fields as the value + // is assumed to be _uninitialized_, so creating references to the state or any of its inner + // fields is UB. + + read_pubkey_into( + cursor, + // Safety: if vote_state is non-null, node_pubkey is guaranteed to be valid too + unsafe { addr_of_mut!((*vote_state).node_pubkey) }, + )?; + read_pubkey_into( + cursor, + // Safety: if vote_state is non-null, authorized_withdrawer is guaranteed to be valid too + unsafe { addr_of_mut!((*vote_state).authorized_withdrawer) }, + )?; + let commission = read_u8(cursor)?; + let votes = read_votes(cursor, has_latency)?; + let root_slot = read_option_u64(cursor)?; + let authorized_voters = read_authorized_voters(cursor)?; read_prior_voters_into(cursor, vote_state)?; - read_epoch_credits_into(cursor, vote_state)?; + let epoch_credits = read_epoch_credits(cursor)?; read_last_timestamp_into(cursor, vote_state)?; + // Safety: if vote_state is non-null, all the fields are guaranteed to be + // valid pointers. + // + // Heap allocated collections - votes, authorized_voters and epoch_credits - + // are guaranteed not to leak after this point as the VoteState is fully + // initialized and will be regularly dropped. + unsafe { + addr_of_mut!((*vote_state).commission).write(commission); + addr_of_mut!((*vote_state).votes).write(votes); + addr_of_mut!((*vote_state).root_slot).write(root_slot); + addr_of_mut!((*vote_state).authorized_voters).write(authorized_voters); + addr_of_mut!((*vote_state).epoch_credits).write(epoch_credits); + } + Ok(()) } -fn read_votes_into>( +fn read_votes>( cursor: &mut Cursor, - vote_state: &mut VoteState, has_latency: bool, -) -> Result<(), InstructionError> { - let vote_count = read_u64(cursor)?; +) -> Result, InstructionError> { + let vote_count = read_u64(cursor)? as usize; + let mut votes = VecDeque::with_capacity(vote_count.min(MAX_LOCKOUT_HISTORY)); for _ in 0..vote_count { let latency = if has_latency { read_u8(cursor)? } else { 0 }; @@ -41,102 +71,81 @@ fn read_votes_into>( let confirmation_count = read_u32(cursor)?; let lockout = Lockout::new_with_confirmation_count(slot, confirmation_count); - vote_state.votes.push_back(LandedVote { latency, lockout }); + votes.push_back(LandedVote { latency, lockout }); } - Ok(()) + Ok(votes) } -fn read_authorized_voters_into>( +fn read_authorized_voters>( cursor: &mut Cursor, - vote_state: &mut VoteState, -) -> Result<(), InstructionError> { +) -> Result { let authorized_voter_count = read_u64(cursor)?; + let mut authorized_voters = AuthorizedVoters::default(); for _ in 0..authorized_voter_count { let epoch = read_u64(cursor)?; let authorized_voter = read_pubkey(cursor)?; - - vote_state.authorized_voters.insert(epoch, authorized_voter); + authorized_voters.insert(epoch, authorized_voter); } - Ok(()) + Ok(authorized_voters) } fn read_prior_voters_into>( cursor: &mut Cursor, - vote_state: &mut VoteState, + vote_state: *mut VoteState, ) -> Result<(), InstructionError> { - // record our position at the start of the struct - let prior_voters_position = cursor.position(); - - // `serialized_size()` must be used over `mem::size_of()` because of alignment - let is_empty_position = serialized_size(&vote_state.prior_voters) - .ok() - .and_then(|v| v.checked_add(prior_voters_position)) - .and_then(|v| v.checked_sub(1)) - .ok_or(InstructionError::InvalidAccountData)?; - - // move to the end, to check if we need to parse the data - cursor.set_position(is_empty_position); - - // if empty, we already read past the end of this struct and need to do no further work - // otherwise we go back to the start and proceed to decode the data - let is_empty = read_bool(cursor)?; - if !is_empty { - cursor.set_position(prior_voters_position); - - let mut encountered_null_voter = false; + // Safety: if vote_state is non-null, prior_voters is guaranteed to be valid too + unsafe { + let prior_voters = addr_of_mut!((*vote_state).prior_voters); + let prior_voters_buf = addr_of_mut!((*prior_voters).buf) as *mut (Pubkey, Epoch, Epoch); + for i in 0..MAX_ITEMS { let prior_voter = read_pubkey(cursor)?; let from_epoch = read_u64(cursor)?; let until_epoch = read_u64(cursor)?; - let item = (prior_voter, from_epoch, until_epoch); - - if item == (Pubkey::default(), 0, 0) { - encountered_null_voter = true; - } else if encountered_null_voter { - // `prior_voters` should never be sparse - return Err(InstructionError::InvalidAccountData); - } else { - vote_state.prior_voters.buf[i] = item; - } + + prior_voters_buf + .add(i) + .write((prior_voter, from_epoch, until_epoch)); } - vote_state.prior_voters.idx = read_u64(cursor)? as usize; - vote_state.prior_voters.is_empty = read_bool(cursor)?; + (*vote_state).prior_voters.idx = read_u64(cursor)? as usize; + (*vote_state).prior_voters.is_empty = read_bool(cursor)?; } - Ok(()) } -fn read_epoch_credits_into>( +fn read_epoch_credits>( cursor: &mut Cursor, - vote_state: &mut VoteState, -) -> Result<(), InstructionError> { - let epoch_credit_count = read_u64(cursor)?; +) -> Result, InstructionError> { + let epoch_credit_count = read_u64(cursor)? as usize; + let mut epoch_credits = Vec::with_capacity(epoch_credit_count.min(MAX_EPOCH_CREDITS_HISTORY)); for _ in 0..epoch_credit_count { let epoch = read_u64(cursor)?; let credits = read_u64(cursor)?; let prev_credits = read_u64(cursor)?; - - vote_state - .epoch_credits - .push((epoch, credits, prev_credits)); + epoch_credits.push((epoch, credits, prev_credits)); } - Ok(()) + Ok(epoch_credits) } fn read_last_timestamp_into>( cursor: &mut Cursor, - vote_state: &mut VoteState, + vote_state: *mut VoteState, ) -> Result<(), InstructionError> { let slot = read_u64(cursor)?; let timestamp = read_i64(cursor)?; - vote_state.last_timestamp = BlockTimestamp { slot, timestamp }; + let last_timestamp = BlockTimestamp { slot, timestamp }; + + // Safety: if vote_state is non-null, last_timestamp is guaranteed to be valid too + unsafe { + addr_of_mut!((*vote_state).last_timestamp).write(last_timestamp); + } Ok(()) } diff --git a/sdk/sbf/scripts/install.sh b/sdk/sbf/scripts/install.sh index 12343a413ed7b7..e51f46d58c418a 100755 --- a/sdk/sbf/scripts/install.sh +++ b/sdk/sbf/scripts/install.sh @@ -109,7 +109,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install platform tools -version=v1.41 +version=v1.42 if [[ ! -e platform-tools-$version.md || ! -e platform-tools ]]; then ( set -e diff --git a/sdk/src/commitment_config.rs b/sdk/src/commitment_config.rs index f0068659f4d7b4..7aca56b8947dbd 100644 --- a/sdk/src/commitment_config.rs +++ b/sdk/src/commitment_config.rs @@ -1,6 +1,5 @@ //! Definitions of commitment levels. -#![allow(deprecated)] #![cfg(feature = "full")] use {std::str::FromStr, thiserror::Error}; @@ -12,56 +11,6 @@ pub struct CommitmentConfig { } impl CommitmentConfig { - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::processed() instead" - )] - pub fn recent() -> Self { - Self { - commitment: CommitmentLevel::Recent, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::finalized() instead" - )] - pub fn max() -> Self { - Self { - commitment: CommitmentLevel::Max, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::finalized() instead" - )] - pub fn root() -> Self { - Self { - commitment: CommitmentLevel::Root, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::confirmed() instead" - )] - pub fn single() -> Self { - Self { - commitment: CommitmentLevel::Single, - } - } - - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentConfig::confirmed() instead" - )] - pub fn single_gossip() -> Self { - Self { - commitment: CommitmentLevel::SingleGossip, - } - } - pub const fn finalized() -> Self { Self { commitment: CommitmentLevel::Finalized, @@ -89,37 +38,27 @@ impl CommitmentConfig { } pub fn is_finalized(&self) -> bool { - matches!( - &self.commitment, - CommitmentLevel::Finalized | CommitmentLevel::Max | CommitmentLevel::Root - ) + self.commitment == CommitmentLevel::Finalized } pub fn is_confirmed(&self) -> bool { - matches!( - &self.commitment, - CommitmentLevel::Confirmed | CommitmentLevel::SingleGossip | CommitmentLevel::Single - ) + self.commitment == CommitmentLevel::Confirmed } pub fn is_processed(&self) -> bool { - matches!( - &self.commitment, - CommitmentLevel::Processed | CommitmentLevel::Recent - ) + self.commitment == CommitmentLevel::Processed } pub fn is_at_least_confirmed(&self) -> bool { self.is_confirmed() || self.is_finalized() } + #[deprecated( + since = "2.0.2", + note = "Returns self. Please do not use. Will be removed in the future." + )] pub fn use_deprecated_commitment(commitment: CommitmentConfig) -> Self { - match commitment.commitment { - CommitmentLevel::Finalized => CommitmentConfig::max(), - CommitmentLevel::Confirmed => CommitmentConfig::single_gossip(), - CommitmentLevel::Processed => CommitmentConfig::recent(), - _ => commitment, - } + commitment } } @@ -138,48 +77,6 @@ impl FromStr for CommitmentConfig { /// finalized. When querying the ledger state, use lower levels of commitment to report progress and higher /// levels to ensure state changes will not be rolled back. pub enum CommitmentLevel { - /// (DEPRECATED) The highest slot having reached max vote lockout, as recognized by a supermajority of the cluster. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Finalized instead" - )] - Max, - - /// (DEPRECATED) The highest slot of the heaviest fork. Ledger state at this slot is not derived from a finalized - /// block, but if multiple forks are present, is from the fork the validator believes is most likely - /// to finalize. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Processed instead" - )] - Recent, - - /// (DEPRECATED) The highest slot having reached max vote lockout. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Finalized instead" - )] - Root, - - /// (DEPRECATED) The highest slot having reached 1 confirmation by supermajority of the cluster. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Confirmed instead" - )] - Single, - - /// (DEPRECATED) The highest slot that has been voted on by supermajority of the cluster - /// This differs from `single` in that: - /// 1) It incorporates votes from gossip and replay. - /// 2) It does not count votes on descendants of a block, only direct votes on that block. - /// 3) This confirmation level also upholds "optimistic confirmation" guarantees in - /// release 1.3 and onwards. - #[deprecated( - since = "1.5.5", - note = "Please use CommitmentLevel::Confirmed instead" - )] - SingleGossip, - /// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is /// not derived from a confirmed or finalized block, but if multiple forks are present, is from /// the fork the validator believes is most likely to finalize. @@ -207,11 +104,6 @@ impl FromStr for CommitmentLevel { fn from_str(s: &str) -> Result { match s { - "max" => Ok(CommitmentLevel::Max), - "recent" => Ok(CommitmentLevel::Recent), - "root" => Ok(CommitmentLevel::Root), - "single" => Ok(CommitmentLevel::Single), - "singleGossip" => Ok(CommitmentLevel::SingleGossip), "processed" => Ok(CommitmentLevel::Processed), "confirmed" => Ok(CommitmentLevel::Confirmed), "finalized" => Ok(CommitmentLevel::Finalized), @@ -223,11 +115,6 @@ impl FromStr for CommitmentLevel { impl std::fmt::Display for CommitmentLevel { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let s = match self { - CommitmentLevel::Max => "max", - CommitmentLevel::Recent => "recent", - CommitmentLevel::Root => "root", - CommitmentLevel::Single => "single", - CommitmentLevel::SingleGossip => "singleGossip", CommitmentLevel::Processed => "processed", CommitmentLevel::Confirmed => "confirmed", CommitmentLevel::Finalized => "finalized", diff --git a/sdk/src/compute_budget.rs b/sdk/src/compute_budget.rs index a0b766b3514019..8a3614f07e8f93 100644 --- a/sdk/src/compute_budget.rs +++ b/sdk/src/compute_budget.rs @@ -12,7 +12,8 @@ crate::declare_id!("ComputeBudget111111111111111111111111111111"); /// Compute Budget Instructions #[cfg_attr(feature = "frozen-abi", derive(AbiExample, AbiEnumVisitor))] -#[derive(BorshDeserialize, BorshSerialize, Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] pub enum ComputeBudgetInstruction { Unused, // deprecated variant, reserved value. /// Request a specific transaction-wide program heap region size in bytes. diff --git a/sdk/src/ed25519_instruction.rs b/sdk/src/ed25519_instruction.rs index 10ae533f478171..bdc4d4d0c4681b 100644 --- a/sdk/src/ed25519_instruction.rs +++ b/sdk/src/ed25519_instruction.rs @@ -5,7 +5,11 @@ #![cfg(feature = "full")] use { - crate::{feature_set::FeatureSet, instruction::Instruction, precompiles::PrecompileError}, + crate::{ + feature_set::{ed25519_precompile_verify_strict, FeatureSet}, + instruction::Instruction, + precompiles::PrecompileError, + }, bytemuck::bytes_of, bytemuck_derive::{Pod, Zeroable}, ed25519_dalek::{ed25519::signature::Signature, Signer, Verifier}, @@ -86,7 +90,7 @@ pub fn new_ed25519_instruction(keypair: &ed25519_dalek::Keypair, message: &[u8]) pub fn verify( data: &[u8], instruction_datas: &[&[u8]], - _feature_set: &FeatureSet, + feature_set: &FeatureSet, ) -> Result<(), PrecompileError> { if data.len() < SIGNATURE_OFFSETS_START { return Err(PrecompileError::InvalidInstructionDataSize); @@ -145,9 +149,15 @@ pub fn verify( offsets.message_data_size as usize, )?; - publickey - .verify(message, &signature) - .map_err(|_| PrecompileError::InvalidSignature)?; + if feature_set.is_active(&ed25519_precompile_verify_strict::id()) { + publickey + .verify_strict(message, &signature) + .map_err(|_| PrecompileError::InvalidSignature)?; + } else { + publickey + .verify(message, &signature) + .map_err(|_| PrecompileError::InvalidSignature)?; + } } Ok(()) } @@ -189,9 +199,64 @@ pub mod test { signature::{Keypair, Signer}, transaction::Transaction, }, + hex, rand0_7::{thread_rng, Rng}, }; + pub fn new_ed25519_instruction_raw( + pubkey: &[u8], + signature: &[u8], + message: &[u8], + ) -> Instruction { + assert_eq!(pubkey.len(), PUBKEY_SERIALIZED_SIZE); + assert_eq!(signature.len(), SIGNATURE_SERIALIZED_SIZE); + + let mut instruction_data = Vec::with_capacity( + DATA_START + .saturating_add(SIGNATURE_SERIALIZED_SIZE) + .saturating_add(PUBKEY_SERIALIZED_SIZE) + .saturating_add(message.len()), + ); + + let num_signatures: u8 = 1; + let public_key_offset = DATA_START; + let signature_offset = public_key_offset.saturating_add(PUBKEY_SERIALIZED_SIZE); + let message_data_offset = signature_offset.saturating_add(SIGNATURE_SERIALIZED_SIZE); + + // add padding byte so that offset structure is aligned + instruction_data.extend_from_slice(bytes_of(&[num_signatures, 0])); + + let offsets = Ed25519SignatureOffsets { + signature_offset: signature_offset as u16, + signature_instruction_index: u16::MAX, + public_key_offset: public_key_offset as u16, + public_key_instruction_index: u16::MAX, + message_data_offset: message_data_offset as u16, + message_data_size: message.len() as u16, + message_instruction_index: u16::MAX, + }; + + instruction_data.extend_from_slice(bytes_of(&offsets)); + + debug_assert_eq!(instruction_data.len(), public_key_offset); + + instruction_data.extend_from_slice(pubkey); + + debug_assert_eq!(instruction_data.len(), signature_offset); + + instruction_data.extend_from_slice(signature); + + debug_assert_eq!(instruction_data.len(), message_data_offset); + + instruction_data.extend_from_slice(message); + + Instruction { + program_id: solana_sdk::ed25519_program::id(), + accounts: vec![], + data: instruction_data, + } + } + fn test_case( num_signatures: u16, offsets: &Ed25519SignatureOffsets, @@ -380,4 +445,50 @@ pub mod test { ); assert!(tx.verify_precompiles(&feature_set).is_err()); } + + #[test] + fn test_ed25519_malleability() { + solana_logger::setup(); + let mint_keypair = Keypair::new(); + + // sig created via ed25519_dalek: both pass + let privkey = ed25519_dalek::Keypair::generate(&mut thread_rng()); + let message_arr = b"hello"; + let instruction = new_ed25519_instruction(&privkey, message_arr); + let tx = Transaction::new_signed_with_payer( + &[instruction.clone()], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + ); + + let feature_set = FeatureSet::default(); + assert!(tx.verify_precompiles(&feature_set).is_ok()); + + let feature_set = FeatureSet::all_enabled(); + assert!(tx.verify_precompiles(&feature_set).is_ok()); + + // malleable sig: verify_strict does NOT pass + // for example, test number 5: + // https://github.com/C2SP/CCTV/tree/main/ed25519 + // R has low order (in fact R == 0) + let pubkey = + &hex::decode("10eb7c3acfb2bed3e0d6ab89bf5a3d6afddd1176ce4812e38d9fd485058fdb1f") + .unwrap(); + let signature = &hex::decode("00000000000000000000000000000000000000000000000000000000000000009472a69cd9a701a50d130ed52189e2455b23767db52cacb8716fb896ffeeac09").unwrap(); + let message = b"ed25519vectors 3"; + let instruction = new_ed25519_instruction_raw(pubkey, signature, message); + let tx = Transaction::new_signed_with_payer( + &[instruction.clone()], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + Hash::default(), + ); + + let feature_set = FeatureSet::default(); + assert!(tx.verify_precompiles(&feature_set).is_ok()); + + let feature_set = FeatureSet::all_enabled(); + assert!(tx.verify_precompiles(&feature_set).is_err()); // verify_strict does NOT pass + } } diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index a2732f37e1e526..887d2e547f19b2 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -282,10 +282,6 @@ pub mod stake_deactivate_delinquent_instruction { solana_sdk::declare_id!("437r62HoAdUb63amq3D7ENnBLDhHT2xY8eFkLJYVKK4x"); } -pub mod stake_redelegate_instruction { - solana_sdk::declare_id!("2KKG3C6RBnxQo9jVVrbzsoSh41TDXLK7gBc9gduyxSzW"); -} - pub mod vote_withdraw_authority_may_change_authorized_voter { solana_sdk::declare_id!("AVZS3ZsN4gi6Rkx2QUibYuSJG3S6QHib7xCYhG6vGJxU"); } @@ -837,6 +833,14 @@ pub mod move_stake_and_move_lamports_ixs { solana_sdk::declare_id!("7bTK6Jis8Xpfrs8ZoUfiMDPazTcdPcTWheZFJTA5Z6X4"); } +pub mod ed25519_precompile_verify_strict { + solana_sdk::declare_id!("ed9tNscbWLYBooxWA7FE2B5KHWs8A6sxfY8EzezEcoo"); +} + +pub mod vote_only_retransmitter_signed_fec_sets { + solana_sdk::declare_id!("RfEcA95xnhuwooVAhUUksEJLZBF7xKCLuqrJoqk4Zph"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -942,7 +946,6 @@ lazy_static! { (nonce_must_be_authorized::id(), "nonce must be authorized"), (nonce_must_be_advanceable::id(), "durable nonces must be advanceable"), (vote_authorize_with_seed::id(), "An instruction you can use to change a vote accounts authority when the current authority is a derived key #25860"), - (stake_redelegate_instruction::id(), "enable the redelegate stake instruction #26294"), (preserve_rent_epoch_for_rent_exempt_accounts::id(), "preserve rent epoch for rent exempt accounts #26479"), (enable_bpf_loader_extend_program_ix::id(), "enable bpf upgradeable loader ExtendProgram instruction #25234"), (skip_rent_rewrites::id(), "skip rewriting rent exempt accounts during rent collection #26491"), @@ -1041,6 +1044,8 @@ lazy_static! { (zk_elgamal_proof_program_enabled::id(), "Enable ZkElGamalProof program SIMD-0153"), (verify_retransmitter_signature::id(), "Verify retransmitter signature #1840"), (move_stake_and_move_lamports_ixs::id(), "Enable MoveStake and MoveLamports stake program instructions #1610"), + (ed25519_precompile_verify_strict::id(), "Use strict verification in ed25519 precompile SIMD-0152"), + (vote_only_retransmitter_signed_fec_sets::id(), "vote only on retransmitter signed fec sets"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index e7bbaa15ebec93..24e61e1d14848f 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -1,8 +1,8 @@ //! Fee structures. -use crate::native_token::sol_to_lamports; #[cfg(not(target_os = "solana"))] use solana_program::message::SanitizedMessage; +use {crate::native_token::sol_to_lamports, std::num::NonZeroU32}; /// A fee and its associated compute unit limit #[derive(Debug, Default, Clone, Eq, PartialEq)] @@ -14,7 +14,7 @@ pub struct FeeBin { } pub struct FeeBudgetLimits { - pub loaded_accounts_data_size_limit: usize, + pub loaded_accounts_data_size_limit: NonZeroU32, pub heap_cost: u64, pub compute_unit_limit: u64, pub prioritization_fee: u64, @@ -39,8 +39,7 @@ pub struct FeeDetails { } impl FeeDetails { - #[cfg(feature = "dev-context-only-utils")] - pub fn new_for_tests( + pub fn new( transaction_fee: u64, prioritization_fee: u64, remove_rounding_in_fee_calculation: bool, @@ -115,7 +114,7 @@ impl FeeStructure { } pub fn calculate_memory_usage_cost( - loaded_accounts_data_size_limit: usize, + loaded_accounts_data_size_limit: u32, heap_cost: u64, ) -> u64 { (loaded_accounts_data_size_limit as u64) @@ -126,6 +125,10 @@ impl FeeStructure { /// Calculate fee for `SanitizedMessage` #[cfg(not(target_os = "solana"))] + #[deprecated( + since = "2.1.0", + note = "Please use `solana_fee::calculate_fee` instead." + )] pub fn calculate_fee( &self, message: &SanitizedMessage, @@ -134,6 +137,7 @@ impl FeeStructure { include_loaded_account_data_size_in_fee: bool, remove_rounding_in_fee_calculation: bool, ) -> u64 { + #[allow(deprecated)] self.calculate_fee_details( message, lamports_per_signature, @@ -146,6 +150,10 @@ impl FeeStructure { /// Calculate fee details for `SanitizedMessage` #[cfg(not(target_os = "solana"))] + #[deprecated( + since = "2.1.0", + note = "Please use `solana_fee::calculate_fee_details` instead." + )] pub fn calculate_fee_details( &self, message: &SanitizedMessage, @@ -161,7 +169,7 @@ impl FeeStructure { } let signature_fee = message - .num_signatures() + .num_total_signatures() .saturating_mul(self.lamports_per_signature); let write_lock_fee = message .num_write_locks() @@ -171,7 +179,7 @@ impl FeeStructure { // requested_loaded_account_data_size let loaded_accounts_data_size_cost = if include_loaded_account_data_size_in_fee { FeeStructure::calculate_memory_usage_cost( - budget_limits.loaded_accounts_data_size_limit, + budget_limits.loaded_accounts_data_size_limit.get(), budget_limits.heap_cost, ) } else { @@ -221,7 +229,7 @@ mod tests { #[test] fn test_calculate_memory_usage_cost() { let heap_cost = 99; - const K: usize = 1024; + const K: u32 = 1024; // accounts data size are priced in block of 32K, ... diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 9c97a2c3d9a2ed..c021789b507da8 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -41,25 +41,23 @@ pub use signer::signers; pub use solana_program::program_stubs; // These solana_program imports could be *-imported, but that causes a bunch of // confusing duplication in the docs due to a rustdoc bug. #26211 +#[allow(deprecated)] +pub use solana_program::sdk_ids; #[cfg(target_arch = "wasm32")] pub use solana_program::wasm_bindgen; pub use solana_program::{ - account_info, address_lookup_table, alt_bn128, big_mod_exp, blake3, bpf_loader, - bpf_loader_deprecated, bpf_loader_upgradeable, clock, config, custom_heap_default, - custom_panic_default, debug_account_data, declare_deprecated_sysvar_id, declare_sysvar_id, - decode_error, ed25519_program, epoch_rewards, epoch_schedule, fee_calculator, impl_sysvar_get, - incinerator, instruction, keccak, lamports, loader_instruction, loader_upgradeable_instruction, - loader_v4, loader_v4_instruction, message, msg, native_token, nonce, program, program_error, - program_memory, program_option, program_pack, rent, secp256k1_program, secp256k1_recover, - serde_varint, serialize_utils, short_vec, slot_hashes, slot_history, stable_layout, stake, - stake_history, syscalls, system_instruction, system_program, sysvar, unchecked_div_by_const, - vote, + account_info, address_lookup_table, big_mod_exp, blake3, bpf_loader, bpf_loader_deprecated, + bpf_loader_upgradeable, clock, config, custom_heap_default, custom_panic_default, + debug_account_data, declare_deprecated_sysvar_id, declare_sysvar_id, ed25519_program, + epoch_rewards, epoch_schedule, fee_calculator, impl_sysvar_get, incinerator, instruction, + keccak, lamports, loader_instruction, loader_upgradeable_instruction, loader_v4, + loader_v4_instruction, message, msg, native_token, nonce, program, program_error, + program_option, program_pack, rent, secp256k1_program, serde_varint, serialize_utils, + slot_hashes, slot_history, stable_layout, stake, stake_history, syscalls, system_instruction, + system_program, sysvar, unchecked_div_by_const, vote, }; -#[allow(deprecated)] -pub use solana_program::{address_lookup_table_account, sdk_ids}; #[cfg(feature = "borsh")] pub use solana_program::{borsh, borsh0_10, borsh1}; - pub mod account; pub mod account_utils; pub mod client; @@ -93,7 +91,6 @@ pub mod precompiles; pub mod program_utils; pub mod pubkey; pub mod quic; -pub mod recent_blockhashes_account; pub mod rent_collector; pub mod rent_debits; pub mod reserved_account_keys; @@ -112,6 +109,12 @@ pub mod transaction_context; pub mod transport; pub mod wasm; +#[deprecated(since = "2.1.0", note = "Use `solana-bn254` crate instead")] +pub use solana_bn254 as alt_bn128; +#[deprecated(since = "2.1.0", note = "Use `solana-decode-error` crate instead")] +pub use solana_decode_error as decode_error; +#[deprecated(since = "2.1.0", note = "Use `solana-program-memory` crate instead")] +pub use solana_program_memory as program_memory; #[deprecated(since = "2.1.0", note = "Use `solana-sanitize` crate instead")] pub use solana_sanitize as sanitize; /// Same as `declare_id` except report that this id has been deprecated. @@ -156,6 +159,10 @@ pub use solana_sdk_macro::declare_id; pub use solana_sdk_macro::pubkey; /// Convenience macro to define multiple static public keys. pub use solana_sdk_macro::pubkeys; +#[deprecated(since = "2.1.0", note = "Use `solana-secp256k1-recover` crate instead")] +pub use solana_secp256k1_recover as secp256k1_recover; +#[deprecated(since = "2.1.0", note = "Use `solana-short-vec` crate instead")] +pub use solana_short_vec as short_vec; /// Convenience macro for `AddAssign` with saturating arithmetic. /// Replace by `std::num::Saturating` once stable diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index 0f45dd4e6311c0..8d3ef8b3e539cf 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -58,7 +58,7 @@ impl ::solana_frozen_abi::abi_example::AbiExample for PacketFlags { } #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] -impl ::solana_frozen_abi::abi_example::IgnoreAsHelper for PacketFlags {} +impl ::solana_frozen_abi::abi_example::TransparentAsHelper for PacketFlags {} #[cfg(all(RUSTC_WITH_SPECIALIZATION, feature = "frozen-abi"))] impl ::solana_frozen_abi::abi_example::EvenAsOpaque for PacketFlags { diff --git a/sdk/src/precompiles.rs b/sdk/src/precompiles.rs index 34885b05e287a9..04e1e2ea2ec389 100644 --- a/sdk/src/precompiles.rs +++ b/sdk/src/precompiles.rs @@ -3,11 +3,9 @@ #![cfg(feature = "full")] use { - crate::{ - decode_error::DecodeError, feature_set::FeatureSet, instruction::CompiledInstruction, - pubkey::Pubkey, - }, + crate::{feature_set::FeatureSet, instruction::CompiledInstruction, pubkey::Pubkey}, lazy_static::lazy_static, + solana_decode_error::DecodeError, thiserror::Error, }; diff --git a/sdk/src/signer/mod.rs b/sdk/src/signer/mod.rs index 4c90d2d10ff293..66e95267fa2ca7 100644 --- a/sdk/src/signer/mod.rs +++ b/sdk/src/signer/mod.rs @@ -229,7 +229,7 @@ mod tests { let _ref_signer = Foo { signer: &Keypair::new(), }; - foo(&Keypair::new()); + foo(Keypair::new()); let _box_signer = Foo { signer: Box::new(Keypair::new()), diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 44fa7630b06302..535c8dd9bf665e 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -122,7 +122,6 @@ use { precompiles::verify_if_precompile, program_utils::limited_deserialize, pubkey::Pubkey, - short_vec, signature::{Signature, SignerError}, signers::Signers, }, @@ -130,6 +129,7 @@ use { solana_program::{system_instruction::SystemInstruction, system_program}, solana_sanitize::{Sanitize, SanitizeError}, solana_sdk::feature_set, + solana_short_vec as short_vec, std::result, }; @@ -172,7 +172,7 @@ pub type Result = result::Result; #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "FZtncnS1Xk8ghHfKiXE5oGiUbw2wJhmfXQuNgQR3K6Mc") + frozen_abi(digest = "5LPHxp7TKPeV7GZ9pcT4NxNxJa3ZhvToDekCMAPvNWLv") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { @@ -200,7 +200,7 @@ pub struct Transaction { #[cfg_attr( feature = "frozen-abi", derive(AbiExample), - frozen_abi(digest = "FZtncnS1Xk8ghHfKiXE5oGiUbw2wJhmfXQuNgQR3K6Mc") + frozen_abi(digest = "5LPHxp7TKPeV7GZ9pcT4NxNxJa3ZhvToDekCMAPvNWLv") )] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize)] pub struct Transaction { diff --git a/sdk/src/transaction/versioned/mod.rs b/sdk/src/transaction/versioned/mod.rs index 94d4ca2cb3b22f..1305b6ab514587 100644 --- a/sdk/src/transaction/versioned/mod.rs +++ b/sdk/src/transaction/versioned/mod.rs @@ -6,7 +6,6 @@ use { crate::{ hash::Hash, message::VersionedMessage, - short_vec, signature::Signature, signer::SignerError, signers::Signers, @@ -14,6 +13,7 @@ use { }, serde::Serialize, solana_sanitize::SanitizeError, + solana_short_vec as short_vec, std::cmp::Ordering, }; diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index 35e76524d9017a..a69c366a358fdc 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -13,6 +13,7 @@ edition = { workspace = true } crossbeam-channel = { workspace = true } log = { workspace = true } solana-client = { workspace = true } +solana-connection-cache = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-runtime = { workspace = true } diff --git a/send-transaction-service/src/send_transaction_service.rs b/send-transaction-service/src/send_transaction_service.rs index abe53b236d2e75..8cc21b12359639 100644 --- a/send-transaction-service/src/send_transaction_service.rs +++ b/send-transaction-service/src/send_transaction_service.rs @@ -2,10 +2,8 @@ use { crate::tpu_info::TpuInfo, crossbeam_channel::{Receiver, RecvTimeoutError}, log::*, - solana_client::{ - connection_cache::{ConnectionCache, Protocol}, - tpu_connection::TpuConnection, - }, + solana_client::connection_cache::{ConnectionCache, Protocol}, + solana_connection_cache::client_connection::ClientConnection as TpuConnection, solana_measure::measure::Measure, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{ diff --git a/send-transaction-service/src/tpu_info.rs b/send-transaction-service/src/tpu_info.rs index 456a0ced0d3304..262a4b74babe5c 100644 --- a/send-transaction-service/src/tpu_info.rs +++ b/send-transaction-service/src/tpu_info.rs @@ -1,4 +1,7 @@ -use {solana_client::connection_cache::Protocol, solana_sdk::clock::Slot, std::net::SocketAddr}; +use { + solana_connection_cache::connection_cache::Protocol, solana_sdk::clock::Slot, + std::net::SocketAddr, +}; pub trait TpuInfo { fn refresh_recent_peers(&mut self); diff --git a/short-vec/Cargo.toml b/short-vec/Cargo.toml new file mode 100644 index 00000000000000..32af444ac95e9c --- /dev/null +++ b/short-vec/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "solana-short-vec" +description = "Solana compact serde-encoding of vectors with small length." +documentation = "https://docs.rs/solana-short-vec" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[build-dependencies] +rustc_version = { workspace = true } + +[dependencies] +serde = { workspace = true } +solana-frozen-abi = { workspace = true, optional = true } +solana-frozen-abi-macro = { workspace = true, optional = true } + +[dev-dependencies] +assert_matches = { workspace = true } +bincode = { workspace = true } +serde_json = { workspace = true } + +[features] +frozen-abi = ["dep:solana-frozen-abi", "dep:solana-frozen-abi-macro"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/short-vec/build.rs b/short-vec/build.rs new file mode 120000 index 00000000000000..ae66c237c5f4fd --- /dev/null +++ b/short-vec/build.rs @@ -0,0 +1 @@ +../frozen-abi/build.rs \ No newline at end of file diff --git a/sdk/program/src/short_vec.rs b/short-vec/src/lib.rs similarity index 97% rename from sdk/program/src/short_vec.rs rename to short-vec/src/lib.rs index a90e3cf0426111..f1f9f554e28882 100644 --- a/sdk/program/src/short_vec.rs +++ b/short-vec/src/lib.rs @@ -1,6 +1,8 @@ //! Compact serde-encoding of vectors with small length. - +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(min_specialization))] #![allow(clippy::arithmetic_side_effects)] +#[cfg(feature = "frozen-abi")] +use solana_frozen_abi_macro::AbiExample; use { serde::{ de::{self, Deserializer, SeqAccess, Visitor}, @@ -13,8 +15,8 @@ use { /// Same as u16, but serialized with 1 to 3 bytes. If the value is above /// 0x7f, the top bit is set and the remaining value is stored in the next /// bytes. Each byte follows the same pattern until the 3rd byte. The 3rd -/// byte, if needed, uses all 8 bits to store the last byte of the original -/// value. +/// byte may only have the 2 least-significant bits set, otherwise the encoded +/// value will overflow the u16. #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] pub struct ShortU16(pub u16); diff --git a/stake-accounts/src/stake_accounts.rs b/stake-accounts/src/stake_accounts.rs index 084b7837cd99f4..bcea83eee9a627 100644 --- a/stake-accounts/src/stake_accounts.rs +++ b/stake-accounts/src/stake_accounts.rs @@ -283,7 +283,7 @@ pub(crate) fn move_stake_accounts( mod tests { use { super::*, - solana_runtime::{bank::Bank, bank_client::BankClient}, + solana_runtime::{bank::Bank, bank_client::BankClient, bank_forks::BankForks}, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, client::SyncClient, @@ -292,16 +292,16 @@ mod tests { stake::state::StakeStateV2, }, solana_stake_program::stake_state, - std::sync::Arc, + std::sync::{Arc, RwLock}, }; - fn create_bank(lamports: u64) -> (Arc, Keypair, u64, u64) { + fn create_bank(lamports: u64) -> (Arc, Arc>, Keypair, u64, u64) { let (mut genesis_config, mint_keypair) = create_genesis_config(lamports); genesis_config.fee_rate_governor = solana_sdk::fee_calculator::FeeRateGovernor::new(0, 0); - let bank = Bank::new_with_bank_forks_for_tests(&genesis_config).0; + let (bank, bank_forks) = Bank::new_with_bank_forks_for_tests(&genesis_config); let stake_rent = bank.get_minimum_balance_for_rent_exemption(StakeStateV2::size_of()); let system_rent = bank.get_minimum_balance_for_rent_exemption(0); - (bank, mint_keypair, stake_rent, system_rent) + (bank, bank_forks, mint_keypair, stake_rent, system_rent) } fn create_account( @@ -355,7 +355,7 @@ mod tests { #[test] fn test_new_derived_stake_account() { - let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); + let (bank, _bank_forks, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); @@ -392,7 +392,7 @@ mod tests { #[test] fn test_authorize_stake_accounts() { - let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); + let (bank, _bank_forks, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); @@ -454,7 +454,7 @@ mod tests { #[test] fn test_lockup_stake_accounts() { - let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); + let (bank, _bank_forks, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); @@ -545,7 +545,7 @@ mod tests { #[test] fn test_rebase_stake_accounts() { - let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); + let (bank, _bank_forks, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); @@ -608,7 +608,7 @@ mod tests { #[test] fn test_move_stake_accounts() { - let (bank, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); + let (bank, _bank_forks, funding_keypair, stake_rent, system_rent) = create_bank(10_000_000); let funding_pubkey = funding_keypair.pubkey(); let bank_client = BankClient::new_shared(bank); let fee_payer_keypair = create_account(&bank_client, &funding_keypair, system_rent); diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index 8ffa23299ba5b2..b6051bc604451f 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -29,6 +29,7 @@ quinn-proto = { workspace = true } rand = { workspace = true } rustls = { workspace = true, features = ["dangerous_configuration"] } smallvec = { workspace = true } +socket2 = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-perf = { workspace = true } @@ -40,7 +41,6 @@ x509-parser = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -socket2 = { workspace = true } solana-logger = { workspace = true } [lib] diff --git a/streamer/src/nonblocking/connection_rate_limiter.rs b/streamer/src/nonblocking/connection_rate_limiter.rs index bc53e49e201a97..b14b88f6ee3af0 100644 --- a/streamer/src/nonblocking/connection_rate_limiter.rs +++ b/streamer/src/nonblocking/connection_rate_limiter.rs @@ -28,7 +28,7 @@ impl ConnectionRateLimiter { } } - /// retain only keys whose throttle start date is within the throttle interval. + /// retain only keys whose rate-limiting start date is within the rate-limiting interval. /// Otherwise drop them as inactive pub fn retain_recent(&self) { self.limiter.retain_recent() diff --git a/streamer/src/nonblocking/keyed_rate_limiter.rs b/streamer/src/nonblocking/keyed_rate_limiter.rs index 790fda72701081..c73682c8add542 100644 --- a/streamer/src/nonblocking/keyed_rate_limiter.rs +++ b/streamer/src/nonblocking/keyed_rate_limiter.rs @@ -40,13 +40,12 @@ where allowed } - /// retain only keys whose throttle start date is within the throttle interval. + /// retain only keys whose rate-limiting start date is within the set up interval. /// Otherwise drop them as inactive pub fn retain_recent(&self) { let now = tokio::time::Instant::now(); - self.limiters.retain(|_key, limiter| { - now.duration_since(*limiter.throttle_start_instant()) <= self.interval - }); + self.limiters + .retain(|_key, limiter| now.duration_since(*limiter.start_instant()) <= self.interval); } /// Returns the number of "live" keys in the rate limiter. @@ -79,7 +78,7 @@ pub mod test { assert!(!limiter.check_and_update(2)); assert!(limiter.len() == 2); - // sleep 150 ms, the throttle parameters should have been reset. + // sleep 150 ms, the rate-limiting parameters should have been reset. sleep(Duration::from_millis(150)).await; assert!(limiter.len() == 2); diff --git a/streamer/src/nonblocking/mod.rs b/streamer/src/nonblocking/mod.rs index 9eed5e402c5f25..d7205e42468235 100644 --- a/streamer/src/nonblocking/mod.rs +++ b/streamer/src/nonblocking/mod.rs @@ -5,3 +5,4 @@ pub mod rate_limiter; pub mod recvmmsg; pub mod sendmmsg; mod stream_throttle; +pub mod testing_utilities; diff --git a/streamer/src/nonblocking/quic.rs b/streamer/src/nonblocking/quic.rs index aa0de316c732e2..4d5c2326f5a0a6 100644 --- a/streamer/src/nonblocking/quic.rs +++ b/streamer/src/nonblocking/quic.rs @@ -7,7 +7,7 @@ use { STREAM_THROTTLING_INTERVAL_MS, }, }, - quic::{configure_server, QuicServerError, StreamStats}, + quic::{configure_server, QuicServerError, StreamerStats}, streamer::StakedNodes, tls_certificates::get_pubkey_from_tls_certificate, }, @@ -99,7 +99,7 @@ const TOTAL_CONNECTIONS_PER_SECOND: u64 = 2500; /// The threshold of the size of the connection rate limiter map. When /// the map size is above this, we will trigger a cleanup of older /// entries used by past requests. -const CONNECITON_RATE_LIMITER_CLEANUP_SIZE_THRESHOLD: usize = 100_000; +const CONNECTION_RATE_LIMITER_CLEANUP_SIZE_THRESHOLD: usize = 100_000; // A sequence of bytes that is part of a packet // along with where in the packet it is @@ -140,7 +140,7 @@ impl ConnectionPeerType { pub struct SpawnNonBlockingServerResult { pub endpoints: Vec, - pub stats: Arc, + pub stats: Arc, pub thread: JoinHandle<()>, pub max_concurrent_connections: usize, } @@ -212,7 +212,7 @@ pub fn spawn_server_multi( .map_err(QuicServerError::EndpointFailed) }) .collect::, _>>()?; - let stats = Arc::::default(); + let stats = Arc::::default(); let handle = tokio::spawn(run_server( name, endpoints.clone(), @@ -248,7 +248,7 @@ async fn run_server( max_unstaked_connections: usize, max_streams_per_ms: u64, max_connections_per_ipaddr_per_min: u64, - stats: Arc, + stats: Arc, wait_for_chunk_timeout: Duration, coalesce: Duration, ) { @@ -266,6 +266,9 @@ async fn run_server( max_unstaked_connections, max_streams_per_ms, )); + stats + .quic_endpoints_count + .store(incoming.len(), Ordering::Relaxed); let staked_connection_table: Arc> = Arc::new(Mutex::new(ConnectionTable::new())); let (sender, receiver) = async_unbounded(); @@ -326,12 +329,12 @@ async fn run_server( remote_address.ip() ); stats - .connection_throttled_across_all + .connection_rate_limited_across_all .fetch_add(1, Ordering::Relaxed); continue; } - if rate_limiter.len() > CONNECITON_RATE_LIMITER_CLEANUP_SIZE_THRESHOLD { + if rate_limiter.len() > CONNECTION_RATE_LIMITER_CLEANUP_SIZE_THRESHOLD { rate_limiter.retain_recent(); } stats @@ -344,7 +347,7 @@ async fn run_server( remote_address ); stats - .connection_throttled_per_ipaddr + .connection_rate_limited_per_ipaddr .fetch_add(1, Ordering::Relaxed); continue; } @@ -374,7 +377,7 @@ async fn run_server( fn prune_unstaked_connection_table( unstaked_connection_table: &mut ConnectionTable, max_unstaked_connections: usize, - stats: Arc, + stats: Arc, ) { if unstaked_connection_table.total_size >= max_unstaked_connections { const PRUNE_TABLE_TO_PERCENTAGE: u8 = 90; @@ -457,7 +460,7 @@ struct NewConnectionHandlerParams { peer_type: ConnectionPeerType, total_stake: u64, max_connections_per_peer: usize, - stats: Arc, + stats: Arc, max_stake: u64, min_stake: u64, } @@ -466,7 +469,7 @@ impl NewConnectionHandlerParams { fn new_unstaked( packet_sender: AsyncSender, max_connections_per_peer: usize, - stats: Arc, + stats: Arc, ) -> NewConnectionHandlerParams { NewConnectionHandlerParams { packet_sender, @@ -640,7 +643,7 @@ async fn setup_connection( max_staked_connections: usize, max_unstaked_connections: usize, max_streams_per_ms: u64, - stats: Arc, + stats: Arc, wait_for_chunk_timeout: Duration, stream_load_ema: Arc, ) { @@ -769,7 +772,7 @@ async fn setup_connection( } } -fn handle_connection_error(e: quinn::ConnectionError, stats: &StreamStats, from: SocketAddr) { +fn handle_connection_error(e: quinn::ConnectionError, stats: &StreamerStats, from: SocketAddr) { debug!("error: {:?} from: {:?}", e, from); stats.connection_setup_error.fetch_add(1, Ordering::Relaxed); match e { @@ -807,11 +810,13 @@ fn handle_connection_error(e: quinn::ConnectionError, stats: &StreamStats, from: } } +// Holder(s) of the AsyncSender on the other end should not +// wait for this function to exit to exit async fn packet_batch_sender( packet_sender: Sender, packet_receiver: AsyncReceiver, exit: Arc, - stats: Arc, + stats: Arc, coalesce: Duration, ) { trace!("enter packet_batch_sender"); @@ -862,7 +867,19 @@ async fn packet_batch_sender( break; } - let timeout_res = timeout(Duration::from_micros(250), packet_receiver.recv()).await; + let timeout_res = if !packet_batch.is_empty() { + // If we get here, elapsed < coalesce (see above if condition) + timeout(coalesce - elapsed, packet_receiver.recv()).await + } else { + // Small bit of non-idealness here: the holder(s) of the other end + // of packet_receiver must drop it (without waiting for us to exit) + // or we have a chance of sleeping here forever + // and never polling exit. Not a huge deal in practice as the + // only time this happens is when we tear down the server + // and at that time the other end does indeed not wait for us + // to exit here + Ok(packet_receiver.recv().await) + }; if let Ok(Ok(packet_accumulator)) = timeout_res { // Start the timeout from when the packet batch first becomes non-empty @@ -902,7 +919,7 @@ async fn packet_batch_sender( fn track_streamer_fetch_packet_performance( packet_perf_measure: &[([u8; 64], Instant)], - stats: &StreamStats, + stats: &StreamerStats, ) { if packet_perf_measure.is_empty() { return; @@ -1075,7 +1092,7 @@ async fn handle_chunk( packet_accum: &mut Option, remote_addr: &SocketAddr, packet_sender: &AsyncSender, - stats: Arc, + stats: Arc, peer_type: ConnectionPeerType, ) -> bool { match chunk { @@ -1422,159 +1439,23 @@ pub mod test { use { super::*, crate::{ - nonblocking::quic::compute_max_allowed_uni_streams, + nonblocking::{ + quic::compute_max_allowed_uni_streams, + testing_utilities::{ + get_client_config, make_client_endpoint, setup_quic_server, + SpawnTestServerResult, TestServerConfig, + }, + }, quic::{MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, - tls_certificates::new_dummy_x509_certificate, }, assert_matches::assert_matches, async_channel::unbounded as async_unbounded, crossbeam_channel::{unbounded, Receiver}, - quinn::{ClientConfig, IdleTimeout, TransportConfig}, - solana_sdk::{ - net::DEFAULT_TPU_COALESCE, - quic::{QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT}, - signature::Keypair, - signer::Signer, - }, + solana_sdk::{net::DEFAULT_TPU_COALESCE, signature::Keypair, signer::Signer}, std::collections::HashMap, tokio::time::sleep, }; - struct SkipServerVerification; - - impl SkipServerVerification { - fn new() -> Arc { - Arc::new(Self) - } - } - - impl rustls::client::ServerCertVerifier for SkipServerVerification { - fn verify_server_cert( - &self, - _end_entity: &rustls::Certificate, - _intermediates: &[rustls::Certificate], - _server_name: &rustls::ServerName, - _scts: &mut dyn Iterator, - _ocsp_response: &[u8], - _now: std::time::SystemTime, - ) -> Result { - Ok(rustls::client::ServerCertVerified::assertion()) - } - } - - pub fn get_client_config(keypair: &Keypair) -> ClientConfig { - let (cert, key) = new_dummy_x509_certificate(keypair); - - let mut crypto = rustls::ClientConfig::builder() - .with_safe_defaults() - .with_custom_certificate_verifier(SkipServerVerification::new()) - .with_client_auth_cert(vec![cert], key) - .expect("Failed to use client certificate"); - - crypto.enable_early_data = true; - crypto.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()]; - - let mut config = ClientConfig::new(Arc::new(crypto)); - - let mut transport_config = TransportConfig::default(); - let timeout = IdleTimeout::try_from(QUIC_MAX_TIMEOUT).unwrap(); - transport_config.max_idle_timeout(Some(timeout)); - transport_config.keep_alive_interval(Some(QUIC_KEEP_ALIVE)); - config.transport_config(Arc::new(transport_config)); - - config - } - - fn setup_quic_server( - option_staked_nodes: Option, - max_connections_per_peer: usize, - ) -> ( - JoinHandle<()>, - Arc, - crossbeam_channel::Receiver, - SocketAddr, - Arc, - ) { - let sockets = { - #[cfg(not(target_os = "windows"))] - { - use std::{ - os::fd::{FromRawFd, IntoRawFd}, - str::FromStr as _, - }; - (0..10) - .map(|_| { - let sock = socket2::Socket::new( - socket2::Domain::IPV4, - socket2::Type::DGRAM, - Some(socket2::Protocol::UDP), - ) - .unwrap(); - sock.set_reuse_port(true).unwrap(); - sock.bind(&SocketAddr::from_str("127.0.0.1:0").unwrap().into()) - .unwrap(); - unsafe { UdpSocket::from_raw_fd(sock.into_raw_fd()) } - }) - .collect::>() - } - #[cfg(target_os = "windows")] - { - vec![UdpSocket::bind("127.0.0.1:0").unwrap()] - } - }; - - let exit = Arc::new(AtomicBool::new(false)); - let (sender, receiver) = unbounded(); - let keypair = Keypair::new(); - let server_address = sockets[0].local_addr().unwrap(); - let staked_nodes = Arc::new(RwLock::new(option_staked_nodes.unwrap_or_default())); - let SpawnNonBlockingServerResult { - endpoints: _, - stats, - thread: t, - max_concurrent_connections: _, - } = spawn_server_multi( - "quic_streamer_test", - sockets, - &keypair, - sender, - exit.clone(), - max_connections_per_peer, - staked_nodes, - MAX_STAKED_CONNECTIONS, - MAX_UNSTAKED_CONNECTIONS, - DEFAULT_MAX_STREAMS_PER_MS, - DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, - Duration::from_secs(2), - DEFAULT_TPU_COALESCE, - ) - .unwrap(); - (t, exit, receiver, server_address, stats) - } - - pub async fn make_client_endpoint( - addr: &SocketAddr, - client_keypair: Option<&Keypair>, - ) -> Connection { - let client_socket = UdpSocket::bind("127.0.0.1:0").unwrap(); - let mut endpoint = quinn::Endpoint::new( - EndpointConfig::default(), - None, - client_socket, - Arc::new(TokioRuntime), - ) - .unwrap(); - let default_keypair = Keypair::new(); - endpoint.set_default_client_config(get_client_config( - client_keypair.unwrap_or(&default_keypair), - )); - endpoint - .connect(*addr, "localhost") - .expect("Failed in connecting") - .await - .expect("Failed in waiting") - } - pub async fn check_timeout(receiver: Receiver, server_address: SocketAddr) { let conn1 = make_client_endpoint(&server_address, None).await; let total = 30; @@ -1722,18 +1603,31 @@ pub mod test { #[tokio::test] async fn test_quic_server_exit() { - let (t, exit, _receiver, _server_address, _stats) = setup_quic_server(None, 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver: _, + server_address: _, + stats: _, + } = setup_quic_server(None, TestServerConfig::default()); exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); } #[tokio::test] async fn test_quic_timeout() { solana_logger::setup(); - let (t, exit, receiver, server_address, _stats) = setup_quic_server(None, 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver, + server_address, + stats: _, + } = setup_quic_server(None, TestServerConfig::default()); + check_timeout(receiver, server_address).await; exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); } #[tokio::test] @@ -1742,7 +1636,7 @@ pub mod test { let (pkt_batch_sender, pkt_batch_receiver) = unbounded(); let (ptk_sender, pkt_receiver) = async_unbounded(); let exit = Arc::new(AtomicBool::new(false)); - let stats = Arc::new(StreamStats::default()); + let stats = Arc::new(StreamerStats::default()); let handle = tokio::spawn(packet_batch_sender( pkt_batch_sender, @@ -1782,13 +1676,21 @@ pub mod test { } assert_eq!(i, num_packets); exit.store(true, Ordering::Relaxed); + // Explicit drop to wake up packet_batch_sender + drop(ptk_sender); handle.await.unwrap(); } #[tokio::test] async fn test_quic_stream_timeout() { solana_logger::setup(); - let (t, exit, _receiver, server_address, stats) = setup_quic_server(None, 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver: _, + server_address, + stats, + } = setup_quic_server(None, TestServerConfig::default()); let conn1 = make_client_endpoint(&server_address, None).await; assert_eq!(stats.total_streams.load(Ordering::Relaxed), 0); @@ -1812,22 +1714,41 @@ pub mod test { assert!(s1.finish().await.is_err()); exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); } #[tokio::test] async fn test_quic_server_block_multiple_connections() { solana_logger::setup(); - let (t, exit, _receiver, server_address, _stats) = setup_quic_server(None, 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver: _, + server_address, + stats: _, + } = setup_quic_server(None, TestServerConfig::default()); check_block_multiple_connections(server_address).await; exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); } #[tokio::test] async fn test_quic_server_multiple_connections_on_single_client_endpoint() { solana_logger::setup(); - let (t, exit, _receiver, server_address, stats) = setup_quic_server(None, 2); + + let SpawnTestServerResult { + join_handle, + exit, + receiver: _, + server_address, + stats, + } = setup_quic_server( + None, + TestServerConfig { + max_connections_per_peer: 2, + ..Default::default() + }, + ); let client_socket = UdpSocket::bind("127.0.0.1:0").unwrap(); let mut endpoint = quinn::Endpoint::new( @@ -1880,16 +1801,22 @@ pub mod test { assert_eq!(stats.connection_removed.load(Ordering::Relaxed), 2); exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); } #[tokio::test] async fn test_quic_server_multiple_writes() { solana_logger::setup(); - let (t, exit, receiver, server_address, _stats) = setup_quic_server(None, 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver, + server_address, + stats: _, + } = setup_quic_server(None, TestServerConfig::default()); check_multiple_writes(receiver, server_address, None).await; exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); } #[tokio::test] @@ -1902,10 +1829,16 @@ pub mod test { Arc::new(stakes), HashMap::::default(), // overrides ); - let (t, exit, receiver, server_address, stats) = setup_quic_server(Some(staked_nodes), 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver, + server_address, + stats, + } = setup_quic_server(Some(staked_nodes), TestServerConfig::default()); check_multiple_writes(receiver, server_address, Some(&client_keypair)).await; exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); sleep(Duration::from_millis(100)).await; assert_eq!( stats @@ -1928,10 +1861,16 @@ pub mod test { Arc::new(stakes), HashMap::::default(), // overrides ); - let (t, exit, receiver, server_address, stats) = setup_quic_server(Some(staked_nodes), 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver, + server_address, + stats, + } = setup_quic_server(Some(staked_nodes), TestServerConfig::default()); check_multiple_writes(receiver, server_address, Some(&client_keypair)).await; exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); sleep(Duration::from_millis(100)).await; assert_eq!( stats @@ -1946,10 +1885,16 @@ pub mod test { #[tokio::test] async fn test_quic_server_unstaked_connection_removal() { solana_logger::setup(); - let (t, exit, receiver, server_address, stats) = setup_quic_server(None, 1); + let SpawnTestServerResult { + join_handle, + exit, + receiver, + server_address, + stats, + } = setup_quic_server(None, TestServerConfig::default()); check_multiple_writes(receiver, server_address, None).await; exit.store(true, Ordering::Relaxed); - t.await.unwrap(); + join_handle.await.unwrap(); sleep(Duration::from_millis(100)).await; assert_eq!( stats @@ -2340,4 +2285,53 @@ pub mod test { compute_receive_window_ratio_for_staked_node(max_stake, min_stake, max_stake + 10); assert_eq!(ratio, max_ratio); } + + #[tokio::test] + async fn test_throttling_check_no_packet_drop() { + solana_logger::setup_with_default_filter(); + + let SpawnTestServerResult { + join_handle, + exit, + receiver, + server_address, + stats, + } = setup_quic_server(None, TestServerConfig::default()); + + let client_connection = make_client_endpoint(&server_address, None).await; + + // unstaked connection can handle up to 100tps, so we should send in ~1s. + let expected_num_txs = 100; + let start_time = tokio::time::Instant::now(); + for i in 0..expected_num_txs { + let mut send_stream = client_connection.open_uni().await.unwrap(); + let data = format!("{i}").into_bytes(); + send_stream.write_all(&data).await.unwrap(); + send_stream.finish().await.unwrap(); + } + let elapsed_sending: f64 = start_time.elapsed().as_secs_f64(); + info!("Elapsed sending: {elapsed_sending}"); + + // check that delivered all of them + let start_time = tokio::time::Instant::now(); + let mut num_txs_received = 0; + while num_txs_received < expected_num_txs && start_time.elapsed() < Duration::from_secs(2) { + if let Ok(packets) = receiver.try_recv() { + num_txs_received += packets.len(); + } else { + sleep(Duration::from_millis(100)).await; + } + } + assert_eq!(expected_num_txs, num_txs_received); + + // stop it + exit.store(true, Ordering::Relaxed); + join_handle.await.unwrap(); + + assert_eq!( + stats.total_new_streams.load(Ordering::Relaxed), + expected_num_txs + ); + assert!(stats.throttled_unstaked_streams.load(Ordering::Relaxed) > 0); + } } diff --git a/streamer/src/nonblocking/rate_limiter.rs b/streamer/src/nonblocking/rate_limiter.rs index e9e7e126545a48..96ce89391fa1ac 100644 --- a/streamer/src/nonblocking/rate_limiter.rs +++ b/streamer/src/nonblocking/rate_limiter.rs @@ -5,28 +5,28 @@ pub struct RateLimiter { /// count of requests in an interval pub(crate) count: u64, - /// Throttle start time - throttle_start_instant: Instant, + /// Rate limit start time + start_instant: Instant, interval: Duration, limit: u64, } /// A naive rate limiter, to be replaced by using governor which has more even -/// distribution of requests passing through using GCRA algroithm. +/// distribution of requests passing through using GCRA algorithm. impl RateLimiter { pub fn new(limit: u64, interval: Duration) -> Self { Self { count: 0, - throttle_start_instant: Instant::now(), + start_instant: Instant::now(), interval, limit, } } - /// Reset the counter and throttling start instant if needed. - pub fn reset_throttling_params_if_needed(&mut self) { - if Instant::now().duration_since(self.throttle_start_instant) > self.interval { - self.throttle_start_instant = Instant::now(); + /// Reset the counter and start instant if needed. + pub fn reset_params_if_needed(&mut self) { + if Instant::now().duration_since(self.start_instant) > self.interval { + self.start_instant = Instant::now(); self.count = 0; } } @@ -35,7 +35,7 @@ impl RateLimiter { /// When it is allowed, the rate limiter state is updated to reflect it has been /// allowed. For a unique request, the caller should call it only once when it is allowed. pub fn check_and_update(&mut self) -> bool { - self.reset_throttling_params_if_needed(); + self.reset_params_if_needed(); if self.count >= self.limit { return false; } @@ -44,9 +44,9 @@ impl RateLimiter { true } - /// Return the start instant for the current throttle interval. - pub fn throttle_start_instant(&self) -> &Instant { - &self.throttle_start_instant + /// Return the start instant for the current rate-limiting interval. + pub fn start_instant(&self) -> &Instant { + &self.start_instant } } @@ -60,15 +60,15 @@ pub mod test { assert!(limiter.check_and_update()); assert!(limiter.check_and_update()); assert!(!limiter.check_and_update()); - let instant1 = *limiter.throttle_start_instant(); + let instant1 = *limiter.start_instant(); - // sleep 150 ms, the throttle parameters should have been reset. + // sleep 150 ms, the rate-limiting parameters should have been reset. sleep(Duration::from_millis(150)).await; assert!(limiter.check_and_update()); assert!(limiter.check_and_update()); assert!(!limiter.check_and_update()); - let instant2 = *limiter.throttle_start_instant(); + let instant2 = *limiter.start_instant(); assert!(instant2 > instant1); } } diff --git a/streamer/src/nonblocking/stream_throttle.rs b/streamer/src/nonblocking/stream_throttle.rs index 699d8d7faf33fb..a9db82874b9c21 100644 --- a/streamer/src/nonblocking/stream_throttle.rs +++ b/streamer/src/nonblocking/stream_throttle.rs @@ -1,5 +1,5 @@ use { - crate::{nonblocking::quic::ConnectionPeerType, quic::StreamStats}, + crate::{nonblocking::quic::ConnectionPeerType, quic::StreamerStats}, percentage::Percentage, std::{ cmp, @@ -23,7 +23,7 @@ pub(crate) struct StakedStreamLoadEMA { current_load_ema: AtomicU64, load_in_recent_interval: AtomicU64, last_update: RwLock, - stats: Arc, + stats: Arc, // Maximum number of streams for a staked connection in EMA window // Note: EMA window can be different than stream throttling window. EMA is being calculated // specifically for staked connections. Unstaked connections have fixed limit on @@ -35,7 +35,7 @@ pub(crate) struct StakedStreamLoadEMA { impl StakedStreamLoadEMA { pub(crate) fn new( - stats: Arc, + stats: Arc, max_unstaked_connections: usize, max_streams_per_ms: u64, ) -> Self { @@ -239,7 +239,7 @@ pub mod test { nonblocking::{ quic::DEFAULT_MAX_STREAMS_PER_MS, stream_throttle::STREAM_LOAD_EMA_INTERVAL_MS, }, - quic::{StreamStats, MAX_UNSTAKED_CONNECTIONS}, + quic::{StreamerStats, MAX_UNSTAKED_CONNECTIONS}, }, std::{ sync::{atomic::Ordering, Arc}, @@ -250,7 +250,7 @@ pub mod test { #[test] fn test_max_streams_for_unstaked_connection() { let load_ema = Arc::new(StakedStreamLoadEMA::new( - Arc::new(StreamStats::default()), + Arc::new(StreamerStats::default()), MAX_UNSTAKED_CONNECTIONS, DEFAULT_MAX_STREAMS_PER_MS, )); @@ -267,7 +267,7 @@ pub mod test { #[test] fn test_max_streams_for_staked_connection() { let load_ema = Arc::new(StakedStreamLoadEMA::new( - Arc::new(StreamStats::default()), + Arc::new(StreamerStats::default()), MAX_UNSTAKED_CONNECTIONS, DEFAULT_MAX_STREAMS_PER_MS, )); @@ -359,7 +359,7 @@ pub mod test { #[test] fn test_max_streams_for_staked_connection_with_no_unstaked_connections() { let load_ema = Arc::new(StakedStreamLoadEMA::new( - Arc::new(StreamStats::default()), + Arc::new(StreamerStats::default()), 0, DEFAULT_MAX_STREAMS_PER_MS, )); @@ -447,7 +447,7 @@ pub mod test { #[test] fn test_update_ema() { let stream_load_ema = Arc::new(StakedStreamLoadEMA::new( - Arc::new(StreamStats::default()), + Arc::new(StreamerStats::default()), MAX_UNSTAKED_CONNECTIONS, DEFAULT_MAX_STREAMS_PER_MS, )); @@ -476,7 +476,7 @@ pub mod test { #[test] fn test_update_ema_missing_interval() { let stream_load_ema = Arc::new(StakedStreamLoadEMA::new( - Arc::new(StreamStats::default()), + Arc::new(StreamerStats::default()), MAX_UNSTAKED_CONNECTIONS, DEFAULT_MAX_STREAMS_PER_MS, )); @@ -496,7 +496,7 @@ pub mod test { #[test] fn test_update_ema_if_needed() { let stream_load_ema = Arc::new(StakedStreamLoadEMA::new( - Arc::new(StreamStats::default()), + Arc::new(StreamerStats::default()), MAX_UNSTAKED_CONNECTIONS, DEFAULT_MAX_STREAMS_PER_MS, )); diff --git a/streamer/src/nonblocking/testing_utilities.rs b/streamer/src/nonblocking/testing_utilities.rs new file mode 100644 index 00000000000000..d0a1fa98d6d182 --- /dev/null +++ b/streamer/src/nonblocking/testing_utilities.rs @@ -0,0 +1,196 @@ +//! Contains utility functions to create server and client for test purposes. +use { + super::quic::{ + spawn_server_multi, SpawnNonBlockingServerResult, ALPN_TPU_PROTOCOL_ID, + DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, DEFAULT_MAX_STREAMS_PER_MS, + }, + crate::{ + quic::{StreamerStats, MAX_STAKED_CONNECTIONS, MAX_UNSTAKED_CONNECTIONS}, + streamer::StakedNodes, + tls_certificates::new_dummy_x509_certificate, + }, + crossbeam_channel::unbounded, + quinn::{ClientConfig, Connection, EndpointConfig, IdleTimeout, TokioRuntime, TransportConfig}, + solana_perf::packet::PacketBatch, + solana_sdk::{ + net::DEFAULT_TPU_COALESCE, + quic::{QUIC_KEEP_ALIVE, QUIC_MAX_TIMEOUT}, + signer::keypair::Keypair, + }, + std::{ + net::{SocketAddr, UdpSocket}, + sync::{atomic::AtomicBool, Arc, RwLock}, + time::Duration, + }, + tokio::task::JoinHandle, +}; + +struct SkipServerVerification; + +impl SkipServerVerification { + fn new() -> Arc { + Arc::new(Self) + } +} + +impl rustls::client::ServerCertVerifier for SkipServerVerification { + fn verify_server_cert( + &self, + _end_entity: &rustls::Certificate, + _intermediates: &[rustls::Certificate], + _server_name: &rustls::ServerName, + _scts: &mut dyn Iterator, + _ocsp_response: &[u8], + _now: std::time::SystemTime, + ) -> Result { + Ok(rustls::client::ServerCertVerified::assertion()) + } +} + +pub fn get_client_config(keypair: &Keypair) -> ClientConfig { + let (cert, key) = new_dummy_x509_certificate(keypair); + + let mut crypto = rustls::ClientConfig::builder() + .with_safe_defaults() + .with_custom_certificate_verifier(SkipServerVerification::new()) + .with_client_auth_cert(vec![cert], key) + .expect("Provided key should be correctly set."); + + crypto.enable_early_data = true; + crypto.alpn_protocols = vec![ALPN_TPU_PROTOCOL_ID.to_vec()]; + + let mut config = ClientConfig::new(Arc::new(crypto)); + + let mut transport_config = TransportConfig::default(); + let timeout = IdleTimeout::try_from(QUIC_MAX_TIMEOUT).unwrap(); + transport_config.max_idle_timeout(Some(timeout)); + transport_config.keep_alive_interval(Some(QUIC_KEEP_ALIVE)); + config.transport_config(Arc::new(transport_config)); + + config +} + +#[derive(Debug, Clone)] +pub struct TestServerConfig { + pub max_connections_per_peer: usize, + pub max_staked_connections: usize, + pub max_unstaked_connections: usize, + pub max_streams_per_ms: u64, + pub max_connections_per_ipaddr_per_minute: u64, +} + +impl Default for TestServerConfig { + fn default() -> Self { + Self { + max_connections_per_peer: 1, + max_staked_connections: MAX_STAKED_CONNECTIONS, + max_unstaked_connections: MAX_UNSTAKED_CONNECTIONS, + max_streams_per_ms: DEFAULT_MAX_STREAMS_PER_MS, + max_connections_per_ipaddr_per_minute: DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE, + } + } +} + +pub struct SpawnTestServerResult { + pub join_handle: JoinHandle<()>, + pub exit: Arc, + pub receiver: crossbeam_channel::Receiver, + pub server_address: SocketAddr, + pub stats: Arc, +} + +pub fn setup_quic_server( + option_staked_nodes: Option, + TestServerConfig { + max_connections_per_peer, + max_staked_connections, + max_unstaked_connections, + max_streams_per_ms, + max_connections_per_ipaddr_per_minute, + }: TestServerConfig, +) -> SpawnTestServerResult { + let sockets = { + #[cfg(not(target_os = "windows"))] + { + use std::{ + os::fd::{FromRawFd, IntoRawFd}, + str::FromStr as _, + }; + (0..10) + .map(|_| { + let sock = socket2::Socket::new( + socket2::Domain::IPV4, + socket2::Type::DGRAM, + Some(socket2::Protocol::UDP), + ) + .unwrap(); + sock.set_reuse_port(true).unwrap(); + sock.bind(&SocketAddr::from_str("127.0.0.1:0").unwrap().into()) + .unwrap(); + unsafe { UdpSocket::from_raw_fd(sock.into_raw_fd()) } + }) + .collect::>() + } + #[cfg(target_os = "windows")] + { + vec![UdpSocket::bind("127.0.0.1:0").unwrap()] + } + }; + + let exit = Arc::new(AtomicBool::new(false)); + let (sender, receiver) = unbounded(); + let keypair = Keypair::new(); + let server_address = sockets[0].local_addr().unwrap(); + let staked_nodes = Arc::new(RwLock::new(option_staked_nodes.unwrap_or_default())); + let SpawnNonBlockingServerResult { + endpoints: _, + stats, + thread: handle, + max_concurrent_connections: _, + } = spawn_server_multi( + "quic_streamer_test", + sockets, + &keypair, + sender, + exit.clone(), + max_connections_per_peer, + staked_nodes, + max_staked_connections, + max_unstaked_connections, + max_streams_per_ms, + max_connections_per_ipaddr_per_minute, + Duration::from_secs(2), + DEFAULT_TPU_COALESCE, + ) + .unwrap(); + SpawnTestServerResult { + join_handle: handle, + exit, + receiver, + server_address, + stats, + } +} + +pub async fn make_client_endpoint( + addr: &SocketAddr, + client_keypair: Option<&Keypair>, +) -> Connection { + let client_socket = UdpSocket::bind("127.0.0.1:0").unwrap(); + let mut endpoint = quinn::Endpoint::new( + EndpointConfig::default(), + None, + client_socket, + Arc::new(TokioRuntime), + ) + .unwrap(); + let default_keypair = Keypair::new(); + endpoint.set_default_client_config(get_client_config( + client_keypair.unwrap_or(&default_keypair), + )); + endpoint + .connect(*addr, "localhost") + .expect("Endpoint configuration should be correct") + .await + .expect("Test server should be already listening on 'localhost'") +} diff --git a/streamer/src/quic.rs b/streamer/src/quic.rs index 9ab5d9cb7b46ed..08bcf7cd3f7f00 100644 --- a/streamer/src/quic.rs +++ b/streamer/src/quic.rs @@ -28,6 +28,9 @@ use { pub const MAX_STAKED_CONNECTIONS: usize = 2000; pub const MAX_UNSTAKED_CONNECTIONS: usize = 500; +// This will be adjusted and parameterized in follow-on PRs. +pub const DEFAULT_QUIC_ENDPOINTS: usize = 1; + pub struct SkipClientVerification; impl SkipClientVerification { @@ -136,7 +139,7 @@ impl NotifyKeyUpdate for EndpointKeyUpdater { } #[derive(Default)] -pub struct StreamStats { +pub struct StreamerStats { pub(crate) total_connections: AtomicUsize, pub(crate) total_new_connections: AtomicUsize, pub(crate) total_streams: AtomicUsize, @@ -178,8 +181,12 @@ pub struct StreamStats { pub(crate) connection_setup_error_locally_closed: AtomicUsize, pub(crate) connection_removed: AtomicUsize, pub(crate) connection_remove_failed: AtomicUsize, - pub(crate) connection_throttled_across_all: AtomicUsize, - pub(crate) connection_throttled_per_ipaddr: AtomicUsize, + // Number of connections to the endpoint exceeding the allowed limit + // regardless of the source IP address. + pub(crate) connection_rate_limited_across_all: AtomicUsize, + // Per IP rate-limiting is triggered each time when there are too many connections + // opened from a particular IP address. + pub(crate) connection_rate_limited_per_ipaddr: AtomicUsize, pub(crate) throttled_streams: AtomicUsize, pub(crate) stream_load_ema: AtomicUsize, pub(crate) stream_load_ema_overflow: AtomicUsize, @@ -193,9 +200,10 @@ pub struct StreamStats { pub(crate) connection_rate_limiter_length: AtomicUsize, pub(crate) outstanding_incoming_connection_attempts: AtomicUsize, pub(crate) total_incoming_connection_attempts: AtomicUsize, + pub(crate) quic_endpoints_count: AtomicUsize, } -impl StreamStats { +impl StreamerStats { pub fn report(&self, name: &'static str) { let process_sampled_packets_us_hist = { let mut metrics = self.process_sampled_packets_us_hist.lock().unwrap(); @@ -328,14 +336,14 @@ impl StreamStats { i64 ), ( - "connection_throttled_across_all", - self.connection_throttled_across_all + "connection_rate_limited_across_all", + self.connection_rate_limited_across_all .swap(0, Ordering::Relaxed), i64 ), ( - "connection_throttled_per_ipaddr", - self.connection_throttled_per_ipaddr + "connection_rate_limited_per_ipaddr", + self.connection_rate_limited_per_ipaddr .swap(0, Ordering::Relaxed), i64 ), @@ -533,6 +541,11 @@ impl StreamStats { .load(Ordering::Relaxed), i64 ), + ( + "quic_endpoints_count", + self.quic_endpoints_count.load(Ordering::Relaxed), + i64 + ), ); } } diff --git a/sdk/cargo-test-bpf/Cargo.toml b/svm-transaction/Cargo.toml similarity index 50% rename from sdk/cargo-test-bpf/Cargo.toml rename to svm-transaction/Cargo.toml index a639225b3a2539..7eb82ae0d99d1d 100644 --- a/sdk/cargo-test-bpf/Cargo.toml +++ b/svm-transaction/Cargo.toml @@ -1,7 +1,7 @@ [package] -name = "solana-cargo-test-bpf" -description = "Execute all unit and integration tests after building with the Solana SBF SDK" -publish = false +name = "solana-svm-transaction" +description = "Solana SVM Transaction" +documentation = "https://docs.rs/solana-svm-transaction" version = { workspace = true } authors = { workspace = true } repository = { workspace = true } @@ -9,6 +9,5 @@ homepage = { workspace = true } license = { workspace = true } edition = { workspace = true } -[[bin]] -name = "cargo-test-bpf" -path = "src/main.rs" +[dependencies] +solana-sdk = { workspace = true } diff --git a/svm-transaction/src/instruction.rs b/svm-transaction/src/instruction.rs new file mode 100644 index 00000000000000..b99eeebea7ded1 --- /dev/null +++ b/svm-transaction/src/instruction.rs @@ -0,0 +1,24 @@ +use solana_sdk::instruction::CompiledInstruction; + +/// A non-owning version of [`CompiledInstruction`] that references +/// slices of account indexes and data. +// `program_id_index` is still owned, as it is a simple u8. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct SVMInstruction<'a> { + /// Index into the transaction keys array indicating the program account that executes this instruction. + pub program_id_index: u8, + /// Ordered indices into the transaction keys array indicating which accounts to pass to the program. + pub accounts: &'a [u8], + /// The program input data. + pub data: &'a [u8], +} + +impl<'a> From<&'a CompiledInstruction> for SVMInstruction<'a> { + fn from(ix: &'a CompiledInstruction) -> Self { + Self { + program_id_index: ix.program_id_index, + accounts: ix.accounts.as_slice(), + data: ix.data.as_slice(), + } + } +} diff --git a/svm-transaction/src/lib.rs b/svm-transaction/src/lib.rs new file mode 100644 index 00000000000000..68860a9bd2a6c1 --- /dev/null +++ b/svm-transaction/src/lib.rs @@ -0,0 +1,4 @@ +pub mod instruction; +pub mod message_address_table_lookup; +pub mod svm_message; +pub mod svm_transaction; diff --git a/svm-transaction/src/message_address_table_lookup.rs b/svm-transaction/src/message_address_table_lookup.rs new file mode 100644 index 00000000000000..71268a270f5f3d --- /dev/null +++ b/svm-transaction/src/message_address_table_lookup.rs @@ -0,0 +1,23 @@ +use solana_sdk::{message::v0, pubkey::Pubkey}; + +/// A non-owning version of [`v0::MessageAddressTableLookup`]. +/// This simply references the data in the original message. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct SVMMessageAddressTableLookup<'a> { + /// Address lookup table account key + pub account_key: &'a Pubkey, + /// List of indexes used to load writable account addresses + pub writable_indexes: &'a [u8], + /// List of indexes used to load readonly account addresses + pub readonly_indexes: &'a [u8], +} + +impl<'a> From<&'a v0::MessageAddressTableLookup> for SVMMessageAddressTableLookup<'a> { + fn from(lookup: &'a v0::MessageAddressTableLookup) -> Self { + Self { + account_key: &lookup.account_key, + writable_indexes: &lookup.writable_indexes, + readonly_indexes: &lookup.readonly_indexes, + } + } +} diff --git a/svm-transaction/src/svm_message.rs b/svm-transaction/src/svm_message.rs new file mode 100644 index 00000000000000..85f5249998929e --- /dev/null +++ b/svm-transaction/src/svm_message.rs @@ -0,0 +1,68 @@ +use { + crate::{ + instruction::SVMInstruction, message_address_table_lookup::SVMMessageAddressTableLookup, + }, + core::fmt::Debug, + solana_sdk::{hash::Hash, message::AccountKeys, pubkey::Pubkey}, +}; + +mod sanitized_message; +mod sanitized_transaction; + +// - Debug to support legacy logging +pub trait SVMMessage: Debug { + /// Returns the total number of signatures in the message. + /// This includes required transaction signatures as well as any + /// pre-compile signatures that are attached in instructions. + fn num_total_signatures(&self) -> u64; + + /// Returns the number of requested write-locks in this message. + /// This does not consider if write-locks are demoted. + fn num_write_locks(&self) -> u64; + + /// Return the recent blockhash. + fn recent_blockhash(&self) -> &Hash; + + /// Return the number of instructions in the message. + fn num_instructions(&self) -> usize; + + /// Return an iterator over the instructions in the message. + fn instructions_iter(&self) -> impl Iterator; + + /// Return an iterator over the instructions in the message, paired with + /// the pubkey of the program. + fn program_instructions_iter(&self) -> impl Iterator; + + /// Return the account keys. + fn account_keys(&self) -> AccountKeys; + + /// Return the fee-payer + fn fee_payer(&self) -> &Pubkey; + + /// Returns `true` if the account at `index` is writable. + fn is_writable(&self, index: usize) -> bool; + + /// Returns `true` if the account at `index` is signer. + fn is_signer(&self, index: usize) -> bool; + + /// Returns true if the account at the specified index is invoked as a + /// program in top-level instructions of this message. + fn is_invoked(&self, key_index: usize) -> bool; + + /// Returns true if the account at the specified index is an input to some + /// program instruction in this message. + fn is_instruction_account(&self, key_index: usize) -> bool { + if let Ok(key_index) = u8::try_from(key_index) { + self.instructions_iter() + .any(|ix| ix.accounts.contains(&key_index)) + } else { + false + } + } + + /// Get the number of lookup tables. + fn num_lookup_tables(&self) -> usize; + + /// Get message address table lookups used in the message + fn message_address_table_lookups(&self) -> impl Iterator; +} diff --git a/svm-transaction/src/svm_message/sanitized_message.rs b/svm-transaction/src/svm_message/sanitized_message.rs new file mode 100644 index 00000000000000..66546a6d45f95b --- /dev/null +++ b/svm-transaction/src/svm_message/sanitized_message.rs @@ -0,0 +1,71 @@ +use { + crate::{ + instruction::SVMInstruction, message_address_table_lookup::SVMMessageAddressTableLookup, + svm_message::SVMMessage, + }, + solana_sdk::{ + hash::Hash, + message::{AccountKeys, SanitizedMessage}, + pubkey::Pubkey, + }, +}; + +// Implement for the "reference" `SanitizedMessage` type. +impl SVMMessage for SanitizedMessage { + fn num_total_signatures(&self) -> u64 { + SanitizedMessage::num_total_signatures(self) + } + + fn num_write_locks(&self) -> u64 { + SanitizedMessage::num_write_locks(self) + } + + fn recent_blockhash(&self) -> &Hash { + SanitizedMessage::recent_blockhash(self) + } + + fn num_instructions(&self) -> usize { + SanitizedMessage::instructions(self).len() + } + + fn instructions_iter(&self) -> impl Iterator { + SanitizedMessage::instructions(self) + .iter() + .map(SVMInstruction::from) + } + + fn program_instructions_iter(&self) -> impl Iterator { + SanitizedMessage::program_instructions_iter(self) + .map(|(pubkey, ix)| (pubkey, SVMInstruction::from(ix))) + } + + fn account_keys(&self) -> AccountKeys { + SanitizedMessage::account_keys(self) + } + + fn fee_payer(&self) -> &Pubkey { + SanitizedMessage::fee_payer(self) + } + + fn is_writable(&self, index: usize) -> bool { + SanitizedMessage::is_writable(self, index) + } + + fn is_signer(&self, index: usize) -> bool { + SanitizedMessage::is_signer(self, index) + } + + fn is_invoked(&self, key_index: usize) -> bool { + SanitizedMessage::is_invoked(self, key_index) + } + + fn num_lookup_tables(&self) -> usize { + SanitizedMessage::message_address_table_lookups(self).len() + } + + fn message_address_table_lookups(&self) -> impl Iterator { + SanitizedMessage::message_address_table_lookups(self) + .iter() + .map(SVMMessageAddressTableLookup::from) + } +} diff --git a/svm-transaction/src/svm_message/sanitized_transaction.rs b/svm-transaction/src/svm_message/sanitized_transaction.rs new file mode 100644 index 00000000000000..6321f27ca88e4f --- /dev/null +++ b/svm-transaction/src/svm_message/sanitized_transaction.rs @@ -0,0 +1,63 @@ +use { + crate::{ + instruction::SVMInstruction, message_address_table_lookup::SVMMessageAddressTableLookup, + svm_message::SVMMessage, + }, + solana_sdk::{ + hash::Hash, message::AccountKeys, pubkey::Pubkey, transaction::SanitizedTransaction, + }, +}; + +impl SVMMessage for SanitizedTransaction { + fn num_total_signatures(&self) -> u64 { + SVMMessage::num_total_signatures(SanitizedTransaction::message(self)) + } + + fn num_write_locks(&self) -> u64 { + SVMMessage::num_write_locks(SanitizedTransaction::message(self)) + } + + fn recent_blockhash(&self) -> &Hash { + SVMMessage::recent_blockhash(SanitizedTransaction::message(self)) + } + + fn num_instructions(&self) -> usize { + SVMMessage::num_instructions(SanitizedTransaction::message(self)) + } + + fn instructions_iter(&self) -> impl Iterator { + SVMMessage::instructions_iter(SanitizedTransaction::message(self)) + } + + fn program_instructions_iter(&self) -> impl Iterator { + SVMMessage::program_instructions_iter(SanitizedTransaction::message(self)) + } + + fn account_keys(&self) -> AccountKeys { + SVMMessage::account_keys(SanitizedTransaction::message(self)) + } + + fn fee_payer(&self) -> &Pubkey { + SVMMessage::fee_payer(SanitizedTransaction::message(self)) + } + + fn is_writable(&self, index: usize) -> bool { + SVMMessage::is_writable(SanitizedTransaction::message(self), index) + } + + fn is_signer(&self, index: usize) -> bool { + SVMMessage::is_signer(SanitizedTransaction::message(self), index) + } + + fn is_invoked(&self, key_index: usize) -> bool { + SVMMessage::is_invoked(SanitizedTransaction::message(self), key_index) + } + + fn num_lookup_tables(&self) -> usize { + SVMMessage::num_lookup_tables(SanitizedTransaction::message(self)) + } + + fn message_address_table_lookups(&self) -> impl Iterator { + SVMMessage::message_address_table_lookups(SanitizedTransaction::message(self)) + } +} diff --git a/svm-transaction/src/svm_transaction.rs b/svm-transaction/src/svm_transaction.rs new file mode 100644 index 00000000000000..4805453dbbef1e --- /dev/null +++ b/svm-transaction/src/svm_transaction.rs @@ -0,0 +1,11 @@ +use {crate::svm_message::SVMMessage, solana_sdk::signature::Signature}; + +mod sanitized_transaction; + +pub trait SVMTransaction: SVMMessage { + /// Get the first signature of the message. + fn signature(&self) -> &Signature; + + /// Get all the signatures of the message. + fn signatures(&self) -> &[Signature]; +} diff --git a/svm-transaction/src/svm_transaction/sanitized_transaction.rs b/svm-transaction/src/svm_transaction/sanitized_transaction.rs new file mode 100644 index 00000000000000..dadc3244333b9e --- /dev/null +++ b/svm-transaction/src/svm_transaction/sanitized_transaction.rs @@ -0,0 +1,14 @@ +use { + crate::svm_transaction::SVMTransaction, + solana_sdk::{signature::Signature, transaction::SanitizedTransaction}, +}; + +impl SVMTransaction for SanitizedTransaction { + fn signature(&self) -> &Signature { + SanitizedTransaction::signature(self) + } + + fn signatures(&self) -> &[Signature] { + SanitizedTransaction::signatures(self) + } +} diff --git a/svm/Cargo.toml b/svm/Cargo.toml index 1416cbec9be11c..5f1cbd544eed10 100644 --- a/svm/Cargo.toml +++ b/svm/Cargo.toml @@ -13,20 +13,27 @@ edition = { workspace = true } itertools = { workspace = true } log = { workspace = true } percentage = { workspace = true } +qualifier_attr = { workspace = true, optional = true } serde = { workspace = true, features = ["rc"] } serde_derive = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-compute-budget = { workspace = true } +solana-fee = { workspace = true } solana-frozen-abi = { workspace = true, optional = true } solana-frozen-abi-macro = { workspace = true, optional = true } solana-loader-v4-program = { workspace = true } +solana-log-collector = { workspace = true } solana-measure = { workspace = true } solana-metrics = { workspace = true } solana-program-runtime = { workspace = true } +solana-runtime-transaction = { workspace = true } solana-sdk = { workspace = true } +solana-svm-transaction = { workspace = true } solana-system-program = { workspace = true } +solana-timings = { workspace = true } solana-type-overrides = { workspace = true } solana-vote = { workspace = true } +thiserror = { workspace = true } [lib] crate-type = ["lib"] @@ -38,6 +45,7 @@ lazy_static = { workspace = true } libsecp256k1 = { workspace = true } prost = { workspace = true } rand = { workspace = true } +shuttle = { workspace = true } solana-bpf-loader-program = { workspace = true } solana-compute-budget-program = { workspace = true } solana-logger = { workspace = true } @@ -53,7 +61,7 @@ targets = ["x86_64-unknown-linux-gnu"] rustc_version = { workspace = true } [features] -dev-context-only-utils = [] +dev-context-only-utils = ["dep:qualifier_attr"] frozen-abi = [ "dep:solana-frozen-abi", "dep:solana-frozen-abi-macro", diff --git a/svm/doc/spec.md b/svm/doc/spec.md index 8f148b557debf0..a851b051bb909a 100644 --- a/svm/doc/spec.md +++ b/svm/doc/spec.md @@ -209,8 +209,6 @@ The output of the transaction batch processor's - `execution_results`: Vector of results indicating whether a transaction was executed or could not be executed. Note executed transactions can still have failed! -- `loaded_transactions`: Vector of loaded transactions from transactions that - were processed. # Functional Model @@ -232,12 +230,6 @@ In bank context `load_and_execute_sanitized_transactions` is called from from `load_execute_and_commit_transactions` which receives a batch of transactions from its caller. -Multiple results of `load_and_execute_sanitized_transactions` are aggregated in -the struct `LoadAndExecuteSanitizedTransactionsOutput` - - `LoadAndExecuteSanitizedTransactionsOutput` contains - - vector of `TransactionLoadResult` - - vector of `TransactionExecutionResult` - Steps of `load_and_execute_sanitized_transactions` 1. Steps of preparation for execution diff --git a/svm/src/account_loader.rs b/svm/src/account_loader.rs index c62963c09e0934..b62833e404004b 100644 --- a/svm/src/account_loader.rs +++ b/svm/src/account_loader.rs @@ -1,20 +1,16 @@ use { crate::{ - account_overrides::AccountOverrides, account_rent_state::RentState, - nonce_info::NoncePartial, rollback_accounts::RollbackAccounts, - transaction_error_metrics::TransactionErrorMetrics, + account_overrides::AccountOverrides, account_rent_state::RentState, nonce_info::NonceInfo, + rollback_accounts::RollbackAccounts, transaction_error_metrics::TransactionErrorMetrics, transaction_processing_callback::TransactionProcessingCallback, }, itertools::Itertools, - solana_compute_budget::compute_budget_processor::{ - process_compute_budget_instructions, ComputeBudgetLimits, - }, + solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_program_runtime::loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, feature_set::{self, FeatureSet}, fee::FeeDetails, - message::SanitizedMessage, native_loader, nonce::State as NonceState, pubkey::Pubkey, @@ -22,12 +18,16 @@ use { rent_collector::{CollectedInfo, RentCollector, RENT_EXEMPT_RENT_EPOCH}, rent_debits::RentDebits, saturating_add_assign, - sysvar::{self, instructions::construct_instructions_data}, - transaction::{Result, SanitizedTransaction, TransactionError}, + sysvar::{ + self, + instructions::{construct_instructions_data, BorrowedAccountMeta, BorrowedInstruction}, + }, + transaction::{Result, TransactionError}, transaction_context::{IndexOfAccount, TransactionAccount}, }, + solana_svm_transaction::svm_message::SVMMessage, solana_system_program::{get_system_account_kind, SystemAccountKind}, - std::num::NonZeroUsize, + std::num::NonZeroU32, }; // for the load instructions @@ -39,7 +39,7 @@ pub type TransactionLoadResult = Result; #[derive(PartialEq, Eq, Debug, Clone)] pub struct CheckedTransactionDetails { - pub nonce: Option, + pub nonce: Option, pub lamports_per_signature: u64, } @@ -54,6 +54,7 @@ pub struct ValidatedTransactionDetails { } #[derive(PartialEq, Eq, Debug, Clone)] +#[cfg_attr(feature = "dev-context-only-utils", derive(Default))] pub struct LoadedTransaction { pub accounts: Vec, pub program_indices: TransactionProgramIndices, @@ -62,7 +63,7 @@ pub struct LoadedTransaction { pub compute_budget_limits: ComputeBudgetLimits, pub rent: TransactionRent, pub rent_debits: RentDebits, - pub loaded_accounts_data_size: usize, + pub loaded_accounts_data_size: u32, } /// Collect rent from an account if rent is still enabled and regardless of @@ -156,7 +157,7 @@ pub fn validate_fee_payer( /// second element. pub(crate) fn load_accounts( callbacks: &CB, - txs: &[SanitizedTransaction], + txs: &[impl SVMMessage], validation_results: Vec, error_metrics: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, @@ -168,12 +169,10 @@ pub(crate) fn load_accounts( .zip(validation_results) .map(|etx| match etx { (tx, Ok(tx_details)) => { - let message = tx.message(); - // load transactions load_transaction_accounts( callbacks, - message, + tx, tx_details, error_metrics, account_overrides, @@ -189,7 +188,7 @@ pub(crate) fn load_accounts( fn load_transaction_accounts( callbacks: &CB, - message: &SanitizedMessage, + message: &impl SVMMessage, tx_details: ValidatedTransactionDetails, error_metrics: &mut TransactionErrorMetrics, account_overrides: Option<&AccountOverrides>, @@ -201,15 +200,11 @@ fn load_transaction_accounts( let account_keys = message.account_keys(); let mut accounts_found = Vec::with_capacity(account_keys.len()); let mut rent_debits = RentDebits::default(); - - let requested_loaded_accounts_data_size_limit = - get_requested_loaded_accounts_data_size_limit(message)?; - let mut accumulated_accounts_data_size: usize = 0; + let mut accumulated_accounts_data_size: u32 = 0; let instruction_accounts = message - .instructions() - .iter() - .flat_map(|instruction| &instruction.accounts) + .instructions_iter() + .flat_map(|instruction| instruction.accounts) .unique() .collect::>(); @@ -278,7 +273,7 @@ fn load_transaction_accounts( accumulate_and_check_loaded_account_data_size( &mut accumulated_accounts_data_size, account_size, - requested_loaded_accounts_data_size_limit, + tx_details.compute_budget_limits.loaded_accounts_bytes, error_metrics, )?; @@ -295,11 +290,10 @@ fn load_transaction_accounts( let builtins_start_index = accounts.len(); let program_indices = message - .instructions() - .iter() + .instructions_iter() .map(|instruction| { let mut account_indices = Vec::with_capacity(2); - let mut program_index = instruction.program_id_index as usize; + let program_index = instruction.program_id_index as usize; // This command may never return error, because the transaction is sanitized let (program_id, program_account) = accounts .get(program_index) @@ -323,15 +317,12 @@ fn load_transaction_accounts( if native_loader::check_id(owner_id) { return Ok(account_indices); } - program_index = if let Some(owner_index) = accounts + if !accounts .get(builtins_start_index..) .ok_or(TransactionError::ProgramAccountNotFound)? .iter() - .position(|(key, _)| key == owner_id) + .any(|(key, _)| key == owner_id) { - builtins_start_index.saturating_add(owner_index) - } else { - let owner_index = accounts.len(); if let Some(owner_account) = callbacks.get_account_shared_data(owner_id) { if !native_loader::check_id(owner_account.owner()) || !owner_account.executable() @@ -342,7 +333,7 @@ fn load_transaction_accounts( accumulate_and_check_loaded_account_data_size( &mut accumulated_accounts_data_size, owner_account.data().len(), - requested_loaded_accounts_data_size_limit, + tx_details.compute_budget_limits.loaded_accounts_bytes, error_metrics, )?; accounts.push((*owner_id, owner_account)); @@ -350,9 +341,7 @@ fn load_transaction_accounts( error_metrics.account_not_found += 1; return Err(TransactionError::ProgramAccountNotFound); } - owner_index - }; - account_indices.insert(0, program_index as IndexOfAccount); + } Ok(account_indices) }) .collect::>>>()?; @@ -369,28 +358,6 @@ fn load_transaction_accounts( }) } -/// Total accounts data a transaction can load is limited to -/// if `set_tx_loaded_accounts_data_size` instruction is not activated or not used, then -/// default value of 64MiB to not break anyone in Mainnet-beta today -/// else -/// user requested loaded accounts size. -/// Note, requesting zero bytes will result transaction error -fn get_requested_loaded_accounts_data_size_limit( - sanitized_message: &SanitizedMessage, -) -> Result> { - let compute_budget_limits = - process_compute_budget_instructions(sanitized_message.program_instructions_iter()) - .unwrap_or_default(); - // sanitize against setting size limit to zero - NonZeroUsize::new( - usize::try_from(compute_budget_limits.loaded_accounts_bytes).unwrap_or_default(), - ) - .map_or( - Err(TransactionError::InvalidLoadedAccountsDataSizeLimit), - |v| Ok(Some(v)), - ) -} - fn account_shared_data_from_program(loaded_program: &ProgramCacheEntry) -> AccountSharedData { // It's an executable program account. The program is already loaded in the cache. // So the account data is not needed. Return a dummy AccountSharedData with meta @@ -403,30 +370,53 @@ fn account_shared_data_from_program(loaded_program: &ProgramCacheEntry) -> Accou /// Accumulate loaded account data size into `accumulated_accounts_data_size`. /// Returns TransactionErr::MaxLoadedAccountsDataSizeExceeded if -/// `requested_loaded_accounts_data_size_limit` is specified and -/// `accumulated_accounts_data_size` exceeds it. +/// `accumulated_accounts_data_size` exceeds +/// `requested_loaded_accounts_data_size_limit`. fn accumulate_and_check_loaded_account_data_size( - accumulated_loaded_accounts_data_size: &mut usize, + accumulated_loaded_accounts_data_size: &mut u32, account_data_size: usize, - requested_loaded_accounts_data_size_limit: Option, + requested_loaded_accounts_data_size_limit: NonZeroU32, error_metrics: &mut TransactionErrorMetrics, ) -> Result<()> { - if let Some(requested_loaded_accounts_data_size) = requested_loaded_accounts_data_size_limit { - saturating_add_assign!(*accumulated_loaded_accounts_data_size, account_data_size); - if *accumulated_loaded_accounts_data_size > requested_loaded_accounts_data_size.get() { - error_metrics.max_loaded_accounts_data_size_exceeded += 1; - Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) - } else { - Ok(()) - } + let Ok(account_data_size) = u32::try_from(account_data_size) else { + error_metrics.max_loaded_accounts_data_size_exceeded += 1; + return Err(TransactionError::MaxLoadedAccountsDataSizeExceeded); + }; + saturating_add_assign!(*accumulated_loaded_accounts_data_size, account_data_size); + if *accumulated_loaded_accounts_data_size > requested_loaded_accounts_data_size_limit.get() { + error_metrics.max_loaded_accounts_data_size_exceeded += 1; + Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) } else { Ok(()) } } -fn construct_instructions_account(message: &SanitizedMessage) -> AccountSharedData { +fn construct_instructions_account(message: &impl SVMMessage) -> AccountSharedData { + let account_keys = message.account_keys(); + let mut decompiled_instructions = Vec::with_capacity(message.num_instructions()); + for (program_id, instruction) in message.program_instructions_iter() { + let accounts = instruction + .accounts + .iter() + .map(|account_index| { + let account_index = usize::from(*account_index); + BorrowedAccountMeta { + is_signer: message.is_signer(account_index), + is_writable: message.is_writable(account_index), + pubkey: account_keys.get(account_index).unwrap(), + } + }) + .collect(); + + decompiled_instructions.push(BorrowedInstruction { + accounts, + data: instruction.data, + program_id, + }); + } + AccountSharedData::from(Account { - data: construct_instructions_data(&message.decompile_instructions()), + data: construct_instructions_data(&decompiled_instructions), owner: sysvar::id(), ..Account::default() }) @@ -441,7 +431,7 @@ mod tests { transaction_processing_callback::TransactionProcessingCallback, }, nonce::state::Versions as NonceVersions, - solana_compute_budget::{compute_budget::ComputeBudget, compute_budget_processor}, + solana_compute_budget::{compute_budget::ComputeBudget, compute_budget_limits}, solana_program_runtime::loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch}, solana_sdk::{ account::{Account, AccountSharedData, ReadableAccount, WritableAccount}, @@ -467,7 +457,7 @@ mod tests { transaction::{Result, SanitizedTransaction, Transaction, TransactionError}, transaction_context::{TransactionAccount, TransactionContext}, }, - std::{borrow::Cow, collections::HashMap, convert::TryFrom, sync::Arc}, + std::{borrow::Cow, collections::HashMap, sync::Arc}, }; #[derive(Default)] @@ -755,21 +745,8 @@ mod tests { assert_eq!(loaded_transaction.accounts.len(), 4); assert_eq!(loaded_transaction.accounts[0].1, accounts[0].1); assert_eq!(loaded_transaction.program_indices.len(), 2); - assert_eq!(loaded_transaction.program_indices[0].len(), 1); - assert_eq!(loaded_transaction.program_indices[1].len(), 2); - for program_indices in loaded_transaction.program_indices.iter() { - for (i, program_index) in program_indices.iter().enumerate() { - // +1 to skip first not loader account - assert_eq!( - loaded_transaction.accounts[*program_index as usize].0, - accounts[i + 1].0 - ); - assert_eq!( - loaded_transaction.accounts[*program_index as usize].1, - accounts[i + 1].1 - ); - } - } + assert_eq!(loaded_transaction.program_indices[0], &[1]); + assert_eq!(loaded_transaction.program_indices[1], &[2]); } Err(e) => panic!("{e}"), } @@ -851,105 +828,31 @@ mod tests { #[test] fn test_accumulate_and_check_loaded_account_data_size() { let mut error_metrics = TransactionErrorMetrics::default(); + let mut accumulated_data_size: u32 = 0; + let data_size: usize = 123; + let requested_data_size_limit = NonZeroU32::new(data_size as u32).unwrap(); + + // OK - loaded data size is up to limit + assert!(accumulate_and_check_loaded_account_data_size( + &mut accumulated_data_size, + data_size, + requested_data_size_limit, + &mut error_metrics + ) + .is_ok()); + assert_eq!(data_size as u32, accumulated_data_size); - // assert check is OK if data limit is not enabled - { - let mut accumulated_data_size: usize = 0; - let data_size = usize::MAX; - let requested_data_size_limit = None; - - assert!(accumulate_and_check_loaded_account_data_size( - &mut accumulated_data_size, - data_size, - requested_data_size_limit, - &mut error_metrics - ) - .is_ok()); - } - - // assert check will fail with correct error if loaded data exceeds limit - { - let mut accumulated_data_size: usize = 0; - let data_size: usize = 123; - let requested_data_size_limit = NonZeroUsize::new(data_size); - - // OK - loaded data size is up to limit - assert!(accumulate_and_check_loaded_account_data_size( + // fail - loading more data that would exceed limit + let another_byte: usize = 1; + assert_eq!( + accumulate_and_check_loaded_account_data_size( &mut accumulated_data_size, - data_size, + another_byte, requested_data_size_limit, &mut error_metrics - ) - .is_ok()); - assert_eq!(data_size, accumulated_data_size); - - // fail - loading more data that would exceed limit - let another_byte: usize = 1; - assert_eq!( - accumulate_and_check_loaded_account_data_size( - &mut accumulated_data_size, - another_byte, - requested_data_size_limit, - &mut error_metrics - ), - Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) - ); - } - } - - #[test] - fn test_get_requested_loaded_accounts_data_size_limit() { - // an prrivate helper function - fn test( - instructions: &[solana_sdk::instruction::Instruction], - expected_result: &Result>, - ) { - let payer_keypair = Keypair::new(); - let tx = SanitizedTransaction::from_transaction_for_tests(Transaction::new( - &[&payer_keypair], - Message::new(instructions, Some(&payer_keypair.pubkey())), - Hash::default(), - )); - assert_eq!( - *expected_result, - get_requested_loaded_accounts_data_size_limit(tx.message()) - ); - } - - let tx_not_set_limit = &[solana_sdk::instruction::Instruction::new_with_bincode( - Pubkey::new_unique(), - &0_u8, - vec![], - )]; - let tx_set_limit_99 = - &[ - solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(99u32), - solana_sdk::instruction::Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ]; - let tx_set_limit_0 = - &[ - solana_sdk::compute_budget::ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(0u32), - solana_sdk::instruction::Instruction::new_with_bincode(Pubkey::new_unique(), &0_u8, vec![]), - ]; - - let result_default_limit = Ok(Some( - NonZeroUsize::new( - usize::try_from(compute_budget_processor::MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES) - .unwrap(), - ) - .unwrap(), - )); - let result_requested_limit: Result> = - Ok(Some(NonZeroUsize::new(99).unwrap())); - let result_invalid_limit = Err(TransactionError::InvalidLoadedAccountsDataSizeLimit); - - // the results should be: - // if tx doesn't set limit, then default limit (64MiB) - // if tx sets limit, then requested limit - // if tx sets limit to zero, then TransactionError::InvalidLoadedAccountsDataSizeLimit - test(tx_not_set_limit, &result_default_limit); - test(tx_set_limit_99, &result_requested_limit); - test(tx_set_limit_0, &result_invalid_limit); + ), + Err(TransactionError::MaxLoadedAccountsDataSizeExceeded) + ); } struct ValidateFeePayerTestParameter { @@ -1625,7 +1528,7 @@ mod tests { mock_bank.accounts_map[&key3.pubkey()].clone() ), ], - program_indices: vec![vec![2, 1]], + program_indices: vec![vec![1]], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget_limits: ComputeBudgetLimits::default(), @@ -1718,7 +1621,7 @@ mod tests { mock_bank.accounts_map[&key3.pubkey()].clone() ), ], - program_indices: vec![vec![3, 1], vec![3, 1]], + program_indices: vec![vec![1], vec![1]], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget_limits: ComputeBudgetLimits::default(), @@ -1770,7 +1673,7 @@ mod tests { ); let compute_budget = ComputeBudget::new(u64::from( - compute_budget_processor::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, + compute_budget_limits::DEFAULT_INSTRUCTION_COMPUTE_UNIT_LIMIT, )); let transaction_context = TransactionContext::new( loaded_txs[0].as_ref().unwrap().accounts.clone(), @@ -1880,7 +1783,7 @@ mod tests { mock_bank.accounts_map[&key3.pubkey()].clone() ), ], - program_indices: vec![vec![3, 1], vec![3, 1]], + program_indices: vec![vec![1], vec![1]], fee_details: FeeDetails::default(), rollback_accounts: RollbackAccounts::default(), compute_budget_limits: ComputeBudgetLimits::default(), diff --git a/svm/src/account_saver.rs b/svm/src/account_saver.rs new file mode 100644 index 00000000000000..4ba2ec259fd87b --- /dev/null +++ b/svm/src/account_saver.rs @@ -0,0 +1,529 @@ +use { + crate::{ + rollback_accounts::RollbackAccounts, + transaction_execution_result::TransactionExecutionResult, + }, + solana_sdk::{ + account::AccountSharedData, nonce::state::DurableNonce, pubkey::Pubkey, + transaction::SanitizedTransaction, transaction_context::TransactionAccount, + }, +}; + +// Used to approximate how many accounts will be calculated for storage so that +// vectors are allocated with an appropriate capacity. Doesn't account for some +// optimization edge cases where some write locked accounts have skip storage. +fn max_number_of_accounts_to_collect( + txs: &[SanitizedTransaction], + execution_results: &[TransactionExecutionResult], +) -> usize { + execution_results + .iter() + .zip(txs) + .filter_map(|(execution_result, tx)| { + execution_result + .executed_transaction() + .map(|executed_tx| (executed_tx, tx)) + }) + .map( + |(executed_tx, tx)| match executed_tx.execution_details.status { + Ok(_) => tx.message().num_write_locks() as usize, + Err(_) => executed_tx.loaded_transaction.rollback_accounts.count(), + }, + ) + .sum() +} + +pub fn collect_accounts_to_store<'a>( + txs: &'a [SanitizedTransaction], + execution_results: &'a mut [TransactionExecutionResult], + durable_nonce: &DurableNonce, + lamports_per_signature: u64, +) -> ( + Vec<(&'a Pubkey, &'a AccountSharedData)>, + Vec>, +) { + let collect_capacity = max_number_of_accounts_to_collect(txs, execution_results); + let mut accounts = Vec::with_capacity(collect_capacity); + let mut transactions = Vec::with_capacity(collect_capacity); + for (execution_result, tx) in execution_results.iter_mut().zip(txs) { + let Some(executed_tx) = execution_result.executed_transaction_mut() else { + // Don't store any accounts if tx wasn't executed + continue; + }; + + if executed_tx.execution_details.status.is_ok() { + collect_accounts_for_successful_tx( + &mut accounts, + &mut transactions, + tx, + &executed_tx.loaded_transaction.accounts, + ); + } else { + collect_accounts_for_failed_tx( + &mut accounts, + &mut transactions, + tx, + &mut executed_tx.loaded_transaction.rollback_accounts, + durable_nonce, + lamports_per_signature, + ); + } + } + (accounts, transactions) +} + +fn collect_accounts_for_successful_tx<'a>( + collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, + collected_account_transactions: &mut Vec>, + transaction: &'a SanitizedTransaction, + transaction_accounts: &'a [TransactionAccount], +) { + let message = transaction.message(); + for (_, (address, account)) in (0..message.account_keys().len()) + .zip(transaction_accounts) + .filter(|(i, _)| { + message.is_writable(*i) && { + // Accounts that are invoked and also not passed as an instruction + // account to a program don't need to be stored because it's assumed + // to be impossible for a committable transaction to modify an + // invoked account if said account isn't passed to some program. + !message.is_invoked(*i) || message.is_instruction_account(*i) + } + }) + { + collected_accounts.push((address, account)); + collected_account_transactions.push(Some(transaction)); + } +} + +fn collect_accounts_for_failed_tx<'a>( + collected_accounts: &mut Vec<(&'a Pubkey, &'a AccountSharedData)>, + collected_account_transactions: &mut Vec>, + transaction: &'a SanitizedTransaction, + rollback_accounts: &'a mut RollbackAccounts, + durable_nonce: &DurableNonce, + lamports_per_signature: u64, +) { + let message = transaction.message(); + let fee_payer_address = message.fee_payer(); + match rollback_accounts { + RollbackAccounts::FeePayerOnly { fee_payer_account } => { + collected_accounts.push((fee_payer_address, &*fee_payer_account)); + collected_account_transactions.push(Some(transaction)); + } + RollbackAccounts::SameNonceAndFeePayer { nonce } => { + // Since we know we are dealing with a valid nonce account, + // unwrap is safe here + nonce + .try_advance_nonce(*durable_nonce, lamports_per_signature) + .unwrap(); + collected_accounts.push((nonce.address(), nonce.account())); + collected_account_transactions.push(Some(transaction)); + } + RollbackAccounts::SeparateNonceAndFeePayer { + nonce, + fee_payer_account, + } => { + collected_accounts.push((fee_payer_address, &*fee_payer_account)); + collected_account_transactions.push(Some(transaction)); + + // Since we know we are dealing with a valid nonce account, + // unwrap is safe here + nonce + .try_advance_nonce(*durable_nonce, lamports_per_signature) + .unwrap(); + collected_accounts.push((nonce.address(), nonce.account())); + collected_account_transactions.push(Some(transaction)); + } + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::{ + account_loader::LoadedTransaction, + nonce_info::NonceInfo, + transaction_execution_result::{ExecutedTransaction, TransactionExecutionDetails}, + }, + solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, + solana_sdk::{ + account::{AccountSharedData, ReadableAccount}, + fee::FeeDetails, + hash::Hash, + instruction::{CompiledInstruction, InstructionError}, + message::Message, + native_loader, + nonce::{ + state::{Data as NonceData, Versions as NonceVersions}, + State as NonceState, + }, + nonce_account, + rent_debits::RentDebits, + signature::{keypair_from_seed, signers::Signers, Keypair, Signer}, + system_instruction, system_program, + transaction::{Result, Transaction, TransactionError}, + }, + std::collections::HashMap, + }; + + fn new_sanitized_tx( + from_keypairs: &T, + message: Message, + recent_blockhash: Hash, + ) -> SanitizedTransaction { + SanitizedTransaction::from_transaction_for_tests(Transaction::new( + from_keypairs, + message, + recent_blockhash, + )) + } + + fn new_execution_result( + status: Result<()>, + loaded_transaction: LoadedTransaction, + ) -> TransactionExecutionResult { + TransactionExecutionResult::Executed(Box::new(ExecutedTransaction { + execution_details: TransactionExecutionDetails { + status, + log_messages: None, + inner_instructions: None, + return_data: None, + executed_units: 0, + accounts_data_len_delta: 0, + }, + loaded_transaction, + programs_modified_by_tx: HashMap::new(), + })) + } + + #[test] + fn test_collect_accounts_to_store() { + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let pubkey = solana_sdk::pubkey::new_rand(); + let account0 = AccountSharedData::new(1, 0, &Pubkey::default()); + let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); + let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); + + let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; + let message = Message::new_with_compiled_instructions( + 1, + 0, + 2, + vec![keypair0.pubkey(), pubkey, native_loader::id()], + Hash::default(), + instructions, + ); + let transaction_accounts0 = vec![ + (message.account_keys[0], account0), + (message.account_keys[1], account2.clone()), + ]; + let tx0 = new_sanitized_tx(&[&keypair0], message, Hash::default()); + + let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; + let message = Message::new_with_compiled_instructions( + 1, + 0, + 2, + vec![keypair1.pubkey(), pubkey, native_loader::id()], + Hash::default(), + instructions, + ); + let transaction_accounts1 = vec![ + (message.account_keys[0], account1), + (message.account_keys[1], account2), + ]; + let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default()); + + let loaded0 = LoadedTransaction { + accounts: transaction_accounts0, + program_indices: vec![], + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), + rent: 0, + rent_debits: RentDebits::default(), + loaded_accounts_data_size: 0, + }; + + let loaded1 = LoadedTransaction { + accounts: transaction_accounts1, + program_indices: vec![], + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::default(), + compute_budget_limits: ComputeBudgetLimits::default(), + rent: 0, + rent_debits: RentDebits::default(), + loaded_accounts_data_size: 0, + }; + + let txs = vec![tx0.clone(), tx1.clone()]; + let mut execution_results = vec![ + new_execution_result(Ok(()), loaded0), + new_execution_result(Ok(()), loaded1), + ]; + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + assert_eq!(max_collected_accounts, 2); + let (collected_accounts, transactions) = + collect_accounts_to_store(&txs, &mut execution_results, &DurableNonce::default(), 0); + assert_eq!(collected_accounts.len(), 2); + assert!(collected_accounts + .iter() + .any(|(pubkey, _account)| *pubkey == &keypair0.pubkey())); + assert!(collected_accounts + .iter() + .any(|(pubkey, _account)| *pubkey == &keypair1.pubkey())); + + assert_eq!(transactions.len(), 2); + assert!(transactions.iter().any(|txn| txn.unwrap().eq(&tx0))); + assert!(transactions.iter().any(|txn| txn.unwrap().eq(&tx1))); + } + + #[test] + fn test_nonced_failure_accounts_rollback_fee_payer_only() { + let from = keypair_from_seed(&[1; 32]).unwrap(); + let from_address = from.pubkey(); + let to_address = Pubkey::new_unique(); + let from_account_post = AccountSharedData::new(4199, 0, &Pubkey::default()); + let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); + + let instructions = vec![system_instruction::transfer(&from_address, &to_address, 42)]; + let message = Message::new(&instructions, Some(&from_address)); + let blockhash = Hash::new_unique(); + let transaction_accounts = vec![ + (message.account_keys[0], from_account_post), + (message.account_keys[1], to_account), + ]; + let tx = new_sanitized_tx(&[&from], message, blockhash); + + let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); + + let loaded = LoadedTransaction { + accounts: transaction_accounts, + program_indices: vec![], + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::FeePayerOnly { + fee_payer_account: from_account_pre.clone(), + }, + compute_budget_limits: ComputeBudgetLimits::default(), + rent: 0, + rent_debits: RentDebits::default(), + loaded_accounts_data_size: 0, + }; + + let txs = vec![tx]; + let mut execution_results = vec![new_execution_result( + Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidArgument, + )), + loaded, + )]; + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + assert_eq!(max_collected_accounts, 1); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let (collected_accounts, _) = + collect_accounts_to_store(&txs, &mut execution_results, &durable_nonce, 0); + assert_eq!(collected_accounts.len(), 1); + assert_eq!( + collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &from_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(), + from_account_pre, + ); + } + + #[test] + fn test_nonced_failure_accounts_rollback_separate_nonce_and_fee_payer() { + let nonce_address = Pubkey::new_unique(); + let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); + let from = keypair_from_seed(&[1; 32]).unwrap(); + let from_address = from.pubkey(); + let to_address = Pubkey::new_unique(); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); + let nonce_account_post = + AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); + let from_account_post = AccountSharedData::new(4199, 0, &Pubkey::default()); + let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); + let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); + let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); + + let instructions = vec![ + system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()), + system_instruction::transfer(&from_address, &to_address, 42), + ]; + let message = Message::new(&instructions, Some(&from_address)); + let blockhash = Hash::new_unique(); + let transaction_accounts = vec![ + (message.account_keys[0], from_account_post), + (message.account_keys[1], nonce_authority_account), + (message.account_keys[2], nonce_account_post), + (message.account_keys[3], to_account), + (message.account_keys[4], recent_blockhashes_sysvar_account), + ]; + let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); + + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); + let nonce_account_pre = + AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); + let from_account_pre = AccountSharedData::new(4242, 0, &Pubkey::default()); + + let nonce = NonceInfo::new(nonce_address, nonce_account_pre.clone()); + let loaded = LoadedTransaction { + accounts: transaction_accounts, + program_indices: vec![], + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::SeparateNonceAndFeePayer { + nonce: nonce.clone(), + fee_payer_account: from_account_pre.clone(), + }, + compute_budget_limits: ComputeBudgetLimits::default(), + rent: 0, + rent_debits: RentDebits::default(), + loaded_accounts_data_size: 0, + }; + + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let txs = vec![tx]; + let mut execution_results = vec![new_execution_result( + Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidArgument, + )), + loaded, + )]; + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + assert_eq!(max_collected_accounts, 2); + let (collected_accounts, _) = + collect_accounts_to_store(&txs, &mut execution_results, &durable_nonce, 0); + assert_eq!(collected_accounts.len(), 2); + assert_eq!( + collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &from_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(), + from_account_pre, + ); + let collected_nonce_account = collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &nonce_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(); + assert_eq!( + collected_nonce_account.lamports(), + nonce_account_pre.lamports(), + ); + assert!(nonce_account::verify_nonce_account( + &collected_nonce_account, + durable_nonce.as_hash() + ) + .is_some()); + } + + #[test] + fn test_nonced_failure_accounts_rollback_same_nonce_and_fee_payer() { + let nonce_authority = keypair_from_seed(&[0; 32]).unwrap(); + let nonce_address = nonce_authority.pubkey(); + let from = keypair_from_seed(&[1; 32]).unwrap(); + let from_address = from.pubkey(); + let to_address = Pubkey::new_unique(); + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); + let nonce_account_post = + AccountSharedData::new_data(43, &nonce_state, &system_program::id()).unwrap(); + let from_account_post = AccountSharedData::new(4200, 0, &Pubkey::default()); + let to_account = AccountSharedData::new(2, 0, &Pubkey::default()); + let nonce_authority_account = AccountSharedData::new(3, 0, &Pubkey::default()); + let recent_blockhashes_sysvar_account = AccountSharedData::new(4, 0, &Pubkey::default()); + + let instructions = vec![ + system_instruction::advance_nonce_account(&nonce_address, &nonce_authority.pubkey()), + system_instruction::transfer(&from_address, &to_address, 42), + ]; + let message = Message::new(&instructions, Some(&nonce_address)); + let blockhash = Hash::new_unique(); + let transaction_accounts = vec![ + (message.account_keys[0], from_account_post), + (message.account_keys[1], nonce_authority_account), + (message.account_keys[2], nonce_account_post), + (message.account_keys[3], to_account), + (message.account_keys[4], recent_blockhashes_sysvar_account), + ]; + let tx = new_sanitized_tx(&[&nonce_authority, &from], message, blockhash); + + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let nonce_state = NonceVersions::new(NonceState::Initialized(NonceData::new( + nonce_authority.pubkey(), + durable_nonce, + 0, + ))); + let nonce_account_pre = + AccountSharedData::new_data(42, &nonce_state, &system_program::id()).unwrap(); + + let nonce = NonceInfo::new(nonce_address, nonce_account_pre.clone()); + let loaded = LoadedTransaction { + accounts: transaction_accounts, + program_indices: vec![], + fee_details: FeeDetails::default(), + rollback_accounts: RollbackAccounts::SameNonceAndFeePayer { + nonce: nonce.clone(), + }, + compute_budget_limits: ComputeBudgetLimits::default(), + rent: 0, + rent_debits: RentDebits::default(), + loaded_accounts_data_size: 0, + }; + + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let txs = vec![tx]; + let mut execution_results = vec![new_execution_result( + Err(TransactionError::InstructionError( + 1, + InstructionError::InvalidArgument, + )), + loaded, + )]; + let max_collected_accounts = max_number_of_accounts_to_collect(&txs, &execution_results); + assert_eq!(max_collected_accounts, 1); + let (collected_accounts, _) = + collect_accounts_to_store(&txs, &mut execution_results, &durable_nonce, 0); + assert_eq!(collected_accounts.len(), 1); + let collected_nonce_account = collected_accounts + .iter() + .find(|(pubkey, _account)| *pubkey == &nonce_address) + .map(|(_pubkey, account)| *account) + .cloned() + .unwrap(); + assert_eq!( + collected_nonce_account.lamports(), + nonce_account_pre.lamports() + ); + assert!(nonce_account::verify_nonce_account( + &collected_nonce_account, + durable_nonce.as_hash() + ) + .is_some()); + } +} diff --git a/svm/src/lib.rs b/svm/src/lib.rs index 201bf671a1be49..cbfef2305e41f9 100644 --- a/svm/src/lib.rs +++ b/svm/src/lib.rs @@ -4,16 +4,18 @@ pub mod account_loader; pub mod account_overrides; pub mod account_rent_state; +pub mod account_saver; pub mod message_processor; pub mod nonce_info; pub mod program_loader; pub mod rollback_accounts; pub mod runtime_config; pub mod transaction_account_state_info; +pub mod transaction_commit_result; pub mod transaction_error_metrics; +pub mod transaction_execution_result; pub mod transaction_processing_callback; pub mod transaction_processor; -pub mod transaction_results; #[macro_use] extern crate solana_metrics; diff --git a/svm/src/message_processor.rs b/svm/src/message_processor.rs index 95e5223b3ce53c..21348bc1ae7d95 100644 --- a/svm/src/message_processor.rs +++ b/svm/src/message_processor.rs @@ -1,18 +1,16 @@ use { solana_measure::measure::Measure, - solana_program_runtime::{ - invoke_context::InvokeContext, - timings::{ExecuteDetailsTimings, ExecuteTimings}, - }, + solana_program_runtime::invoke_context::InvokeContext, solana_sdk::{ account::WritableAccount, - message::SanitizedMessage, precompiles::is_precompile, saturating_add_assign, sysvar::instructions, transaction::TransactionError, transaction_context::{IndexOfAccount, InstructionAccount}, }, + solana_svm_transaction::svm_message::SVMMessage, + solana_timings::{ExecuteDetailsTimings, ExecuteTimings}, }; #[derive(Debug, Default, Clone, serde_derive::Deserialize, serde_derive::Serialize)] @@ -34,13 +32,13 @@ impl MessageProcessor { /// the call does not violate the bank's accounting rules. /// The accounts are committed back to the bank only if every instruction succeeds. pub fn process_message( - message: &SanitizedMessage, + message: &impl SVMMessage, program_indices: &[Vec], invoke_context: &mut InvokeContext, execute_timings: &mut ExecuteTimings, accumulated_consumed_units: &mut u64, ) -> Result<(), TransactionError> { - debug_assert_eq!(program_indices.len(), message.instructions().len()); + debug_assert_eq!(program_indices.len(), message.num_instructions()); for (instruction_index, ((program_id, instruction), program_indices)) in message .program_instructions_iter() .zip(program_indices.iter()) @@ -97,7 +95,7 @@ impl MessageProcessor { instruction_context.configure( program_indices, &instruction_accounts, - &instruction.data, + instruction.data, ); }) .and_then(|_| { @@ -108,7 +106,7 @@ impl MessageProcessor { let time = Measure::start("execute_instruction"); let mut compute_units_consumed = 0; let result = invoke_context.process_instruction( - &instruction.data, + instruction.data, &instruction_accounts, program_indices, &mut compute_units_consumed, @@ -160,7 +158,7 @@ mod tests { feature_set::FeatureSet, hash::Hash, instruction::{AccountMeta, Instruction, InstructionError}, - message::{AccountKeys, Message}, + message::{AccountKeys, Message, SanitizedMessage}, native_loader::{self, create_loadable_account_for_test}, pubkey::Pubkey, rent::Rent, diff --git a/svm/src/nonce_info.rs b/svm/src/nonce_info.rs index 062b8fc221f87f..6405c5e9cbe1f4 100644 --- a/svm/src/nonce_info.rs +++ b/svm/src/nonce_info.rs @@ -1,38 +1,61 @@ -use solana_sdk::{account::AccountSharedData, nonce_account, pubkey::Pubkey}; - -pub trait NonceInfo { - fn address(&self) -> &Pubkey; - fn account(&self) -> &AccountSharedData; - fn lamports_per_signature(&self) -> Option; - fn fee_payer_account(&self) -> Option<&AccountSharedData>; -} +use { + solana_sdk::{ + account::AccountSharedData, + account_utils::StateMut, + nonce::state::{DurableNonce, State as NonceState, Versions as NonceVersions}, + pubkey::Pubkey, + }, + thiserror::Error, +}; /// Holds limited nonce info available during transaction checks #[derive(Clone, Debug, Default, PartialEq, Eq)] -pub struct NoncePartial { +pub struct NonceInfo { address: Pubkey, account: AccountSharedData, } -impl NoncePartial { +#[derive(Error, Debug, PartialEq)] +pub enum AdvanceNonceError { + #[error("Invalid account")] + Invalid, + #[error("Uninitialized nonce")] + Uninitialized, +} + +impl NonceInfo { pub fn new(address: Pubkey, account: AccountSharedData) -> Self { Self { address, account } } -} -impl NonceInfo for NoncePartial { - fn address(&self) -> &Pubkey { + // Advance the stored blockhash to prevent fee theft by someone + // replaying nonce transactions that have failed with an + // `InstructionError`. + pub fn try_advance_nonce( + &mut self, + durable_nonce: DurableNonce, + lamports_per_signature: u64, + ) -> Result<(), AdvanceNonceError> { + let nonce_versions = StateMut::::state(&self.account) + .map_err(|_| AdvanceNonceError::Invalid)?; + if let NonceState::Initialized(ref data) = nonce_versions.state() { + let nonce_state = + NonceState::new_initialized(&data.authority, durable_nonce, lamports_per_signature); + let nonce_versions = NonceVersions::new(nonce_state); + self.account.set_state(&nonce_versions).unwrap(); + Ok(()) + } else { + Err(AdvanceNonceError::Uninitialized) + } + } + + pub fn address(&self) -> &Pubkey { &self.address } - fn account(&self) -> &AccountSharedData { + + pub fn account(&self) -> &AccountSharedData { &self.account } - fn lamports_per_signature(&self) -> Option { - nonce_account::lamports_per_signature_of(&self.account) - } - fn fee_payer_account(&self) -> Option<&AccountSharedData> { - None - } } #[cfg(test)] @@ -48,30 +71,76 @@ mod tests { }, }; + fn create_nonce_account(state: NonceState) -> AccountSharedData { + AccountSharedData::new_data(1_000_000, &NonceVersions::new(state), &system_program::id()) + .unwrap() + } + #[test] fn test_nonce_info() { let nonce_address = Pubkey::new_unique(); let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); let lamports_per_signature = 42; - let nonce_account = AccountSharedData::new_data( - 43, - &NonceVersions::new(NonceState::Initialized(NonceData::new( - Pubkey::default(), - durable_nonce, - lamports_per_signature, + let nonce_account = create_nonce_account(NonceState::Initialized(NonceData::new( + Pubkey::default(), + durable_nonce, + lamports_per_signature, + ))); + + let nonce_info = NonceInfo::new(nonce_address, nonce_account.clone()); + assert_eq!(*nonce_info.address(), nonce_address); + assert_eq!(*nonce_info.account(), nonce_account); + } + + #[test] + fn test_try_advance_nonce_success() { + let authority = Pubkey::new_unique(); + let mut nonce_info = NonceInfo::new( + Pubkey::new_unique(), + create_nonce_account(NonceState::Initialized(NonceData::new( + authority, + DurableNonce::from_blockhash(&Hash::new_unique()), + 42, ))), - &system_program::id(), - ) - .unwrap(); - - // NoncePartial create + NonceInfo impl - let partial = NoncePartial::new(nonce_address, nonce_account.clone()); - assert_eq!(*partial.address(), nonce_address); - assert_eq!(*partial.account(), nonce_account); + ); + + let new_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let new_lamports_per_signature = 100; + let result = nonce_info.try_advance_nonce(new_nonce, new_lamports_per_signature); + assert_eq!(result, Ok(())); + + let nonce_versions = StateMut::::state(&nonce_info.account).unwrap(); assert_eq!( - partial.lamports_per_signature(), - Some(lamports_per_signature) + &NonceState::Initialized(NonceData::new( + authority, + new_nonce, + new_lamports_per_signature + )), + nonce_versions.state() + ); + } + + #[test] + fn test_try_advance_nonce_invalid() { + let mut nonce_info = NonceInfo::new( + Pubkey::new_unique(), + AccountSharedData::new(1_000_000, 0, &Pubkey::default()), ); - assert_eq!(partial.fee_payer_account(), None); + + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let result = nonce_info.try_advance_nonce(durable_nonce, 5000); + assert_eq!(result, Err(AdvanceNonceError::Invalid)); + } + + #[test] + fn test_try_advance_nonce_uninitialized() { + let mut nonce_info = NonceInfo::new( + Pubkey::new_unique(), + create_nonce_account(NonceState::Uninitialized), + ); + + let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); + let result = nonce_info.try_advance_nonce(durable_nonce, 5000); + assert_eq!(result, Err(AdvanceNonceError::Uninitialized)); } } diff --git a/svm/src/program_loader.rs b/svm/src/program_loader.rs index f77780fc9fecfa..70d6b3d8108f6d 100644 --- a/svm/src/program_loader.rs +++ b/svm/src/program_loader.rs @@ -1,11 +1,8 @@ use { crate::transaction_processing_callback::TransactionProcessingCallback, - solana_program_runtime::{ - loaded_programs::{ - LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, - ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, - }, - timings::ExecuteDetailsTimings, + solana_program_runtime::loaded_programs::{ + LoadProgramMetrics, ProgramCacheEntry, ProgramCacheEntryOwner, ProgramCacheEntryType, + ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, @@ -18,6 +15,7 @@ use { pubkey::Pubkey, transaction::{self, TransactionError}, }, + solana_timings::ExecuteDetailsTimings, solana_type_overrides::sync::Arc, }; diff --git a/svm/src/rollback_accounts.rs b/svm/src/rollback_accounts.rs index 6fbd3a9c2e91e8..71b670d37c4f85 100644 --- a/svm/src/rollback_accounts.rs +++ b/svm/src/rollback_accounts.rs @@ -1,5 +1,5 @@ use { - crate::nonce_info::{NonceInfo, NoncePartial}, + crate::nonce_info::NonceInfo, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, clock::Epoch, @@ -15,10 +15,10 @@ pub enum RollbackAccounts { fee_payer_account: AccountSharedData, }, SameNonceAndFeePayer { - nonce: NoncePartial, + nonce: NonceInfo, }, SeparateNonceAndFeePayer { - nonce: NoncePartial, + nonce: NonceInfo, fee_payer_account: AccountSharedData, }, } @@ -34,7 +34,7 @@ impl Default for RollbackAccounts { impl RollbackAccounts { pub fn new( - nonce: Option, + nonce: Option, fee_payer_address: Pubkey, mut fee_payer_account: AccountSharedData, fee_payer_rent_debit: u64, @@ -52,7 +52,7 @@ impl RollbackAccounts { if let Some(nonce) = nonce { if &fee_payer_address == nonce.address() { RollbackAccounts::SameNonceAndFeePayer { - nonce: NoncePartial::new(fee_payer_address, fee_payer_account), + nonce: NonceInfo::new(fee_payer_address, fee_payer_account), } } else { RollbackAccounts::SeparateNonceAndFeePayer { @@ -72,22 +72,11 @@ impl RollbackAccounts { } } - pub fn nonce(&self) -> Option<&NoncePartial> { + /// Number of accounts tracked for rollback + pub fn count(&self) -> usize { match self { - Self::FeePayerOnly { .. } => None, - Self::SameNonceAndFeePayer { nonce } | Self::SeparateNonceAndFeePayer { nonce, .. } => { - Some(nonce) - } - } - } - - pub fn fee_payer_account(&self) -> &AccountSharedData { - match self { - Self::FeePayerOnly { fee_payer_account } - | Self::SeparateNonceAndFeePayer { - fee_payer_account, .. - } => fee_payer_account, - Self::SameNonceAndFeePayer { nonce } => nonce.account(), + Self::FeePayerOnly { .. } | Self::SameNonceAndFeePayer { .. } => 1, + Self::SeparateNonceAndFeePayer { .. } => 2, } } } @@ -160,7 +149,7 @@ mod tests { account }; - let nonce = NoncePartial::new(nonce_address, rent_collected_nonce_account.clone()); + let nonce = NonceInfo::new(nonce_address, rent_collected_nonce_account.clone()); let rollback_accounts = RollbackAccounts::new( Some(nonce), nonce_address, @@ -204,7 +193,7 @@ mod tests { account }; - let nonce = NoncePartial::new(nonce_address, nonce_account.clone()); + let nonce = NonceInfo::new(nonce_address, nonce_account.clone()); let rollback_accounts = RollbackAccounts::new( Some(nonce), fee_payer_address, diff --git a/svm/src/transaction_commit_result.rs b/svm/src/transaction_commit_result.rs new file mode 100644 index 00000000000000..5cc413d7b175f9 --- /dev/null +++ b/svm/src/transaction_commit_result.rs @@ -0,0 +1,43 @@ +use { + crate::transaction_execution_result::{ + TransactionExecutionDetails, TransactionLoadedAccountsStats, + }, + solana_sdk::{ + fee::FeeDetails, rent_debits::RentDebits, transaction::Result as TransactionResult, + }, +}; + +pub type TransactionCommitResult = TransactionResult; + +#[derive(Clone, Debug)] +pub struct CommittedTransaction { + pub loaded_account_stats: TransactionLoadedAccountsStats, + pub execution_details: TransactionExecutionDetails, + pub fee_details: FeeDetails, + pub rent_debits: RentDebits, +} + +pub trait TransactionCommitResultExtensions { + fn was_executed(&self) -> bool; + fn was_executed_successfully(&self) -> bool; + fn transaction_result(&self) -> TransactionResult<()>; +} + +impl TransactionCommitResultExtensions for TransactionCommitResult { + fn was_executed(&self) -> bool { + self.is_ok() + } + + fn was_executed_successfully(&self) -> bool { + match self { + Ok(committed_tx) => committed_tx.execution_details.status.is_ok(), + Err(_) => false, + } + } + + fn transaction_result(&self) -> TransactionResult<()> { + self.as_ref() + .map_err(|err| err.clone()) + .and_then(|committed_tx| committed_tx.execution_details.status.clone()) + } +} diff --git a/svm/src/transaction_results.rs b/svm/src/transaction_execution_result.rs similarity index 64% rename from svm/src/transaction_results.rs rename to svm/src/transaction_execution_result.rs index 9f829a675267ed..a7c965fbed01bd 100644 --- a/svm/src/transaction_results.rs +++ b/svm/src/transaction_execution_result.rs @@ -5,28 +5,20 @@ )] pub use solana_sdk::inner_instruction::{InnerInstruction, InnerInstructionsList}; use { + crate::account_loader::LoadedTransaction, serde::{Deserialize, Serialize}, solana_program_runtime::loaded_programs::ProgramCacheEntry, solana_sdk::{ - fee::FeeDetails, pubkey::Pubkey, - rent_debits::RentDebits, transaction::{self, TransactionError}, transaction_context::TransactionReturnData, }, std::{collections::HashMap, sync::Arc}, }; -pub struct TransactionResults { - pub fee_collection_results: Vec>, - pub loaded_accounts_stats: Vec>, - pub execution_results: Vec, - pub rent_debits: Vec, -} - #[derive(Debug, Default, Clone)] pub struct TransactionLoadedAccountsStats { - pub loaded_accounts_data_size: usize, + pub loaded_accounts_data_size: u32, pub loaded_accounts_count: usize, } @@ -42,39 +34,57 @@ pub struct TransactionLoadedAccountsStats { /// make such checks hard to do incorrectly. #[derive(Debug, Clone)] pub enum TransactionExecutionResult { - Executed { - details: TransactionExecutionDetails, - programs_modified_by_tx: HashMap>, - }, + Executed(Box), NotExecuted(TransactionError), } +#[derive(Debug, Clone)] +pub struct ExecutedTransaction { + pub loaded_transaction: LoadedTransaction, + pub execution_details: TransactionExecutionDetails, + pub programs_modified_by_tx: HashMap>, +} + +impl ExecutedTransaction { + pub fn was_successful(&self) -> bool { + self.execution_details.status.is_ok() + } +} + impl TransactionExecutionResult { pub fn was_executed_successfully(&self) -> bool { - match self { - Self::Executed { details, .. } => details.status.is_ok(), - Self::NotExecuted { .. } => false, - } + self.executed_transaction() + .map(|executed_tx| executed_tx.was_successful()) + .unwrap_or(false) } pub fn was_executed(&self) -> bool { + self.executed_transaction().is_some() + } + + pub fn details(&self) -> Option<&TransactionExecutionDetails> { + self.executed_transaction() + .map(|executed_tx| &executed_tx.execution_details) + } + + pub fn flattened_result(&self) -> transaction::Result<()> { match self { - Self::Executed { .. } => true, - Self::NotExecuted(_) => false, + Self::Executed(executed_tx) => executed_tx.execution_details.status.clone(), + Self::NotExecuted(err) => Err(err.clone()), } } - pub fn details(&self) -> Option<&TransactionExecutionDetails> { + pub fn executed_transaction(&self) -> Option<&ExecutedTransaction> { match self { - Self::Executed { details, .. } => Some(details), - Self::NotExecuted(_) => None, + Self::Executed(executed_tx) => Some(executed_tx.as_ref()), + Self::NotExecuted { .. } => None, } } - pub fn flattened_result(&self) -> transaction::Result<()> { + pub fn executed_transaction_mut(&mut self) -> Option<&mut ExecutedTransaction> { match self { - Self::Executed { details, .. } => details.status.clone(), - Self::NotExecuted(err) => Err(err.clone()), + Self::Executed(executed_tx) => Some(executed_tx.as_mut()), + Self::NotExecuted { .. } => None, } } } @@ -84,7 +94,6 @@ pub struct TransactionExecutionDetails { pub status: transaction::Result<()>, pub log_messages: Option>, pub inner_instructions: Option, - pub fee_details: FeeDetails, pub return_data: Option, pub executed_units: u64, /// The change in accounts data len for this transaction. diff --git a/svm/src/transaction_processor.rs b/svm/src/transaction_processor.rs index 791d847e689f32..c2fb997b3f522f 100644 --- a/svm/src/transaction_processor.rs +++ b/svm/src/transaction_processor.rs @@ -1,9 +1,11 @@ +#[cfg(feature = "dev-context-only-utils")] +use qualifier_attr::{field_qualifiers, qualifiers}; use { crate::{ account_loader::{ collect_rent_from_account, load_accounts, validate_fee_payer, CheckedTransactionDetails, LoadedTransaction, TransactionCheckResult, - TransactionLoadResult, TransactionValidationResult, ValidatedTransactionDetails, + TransactionValidationResult, ValidatedTransactionDetails, }, account_overrides::AccountOverrides, message_processor::MessageProcessor, @@ -11,35 +13,31 @@ use { rollback_accounts::RollbackAccounts, transaction_account_state_info::TransactionAccountStateInfo, transaction_error_metrics::TransactionErrorMetrics, + transaction_execution_result::{ + ExecutedTransaction, TransactionExecutionDetails, TransactionExecutionResult, + }, transaction_processing_callback::TransactionProcessingCallback, - transaction_results::{TransactionExecutionDetails, TransactionExecutionResult}, }, log::debug, percentage::Percentage, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, - solana_compute_budget::{ - compute_budget::ComputeBudget, - compute_budget_processor::process_compute_budget_instructions, - }, + solana_compute_budget::compute_budget::ComputeBudget, solana_loader_v4_program::create_program_runtime_environment_v2, - solana_measure::{measure, measure::Measure}, + solana_log_collector::LogCollector, + solana_measure::{measure::Measure, measure_us}, solana_program_runtime::{ invoke_context::{EnvironmentConfig, InvokeContext}, loaded_programs::{ ForkGraph, ProgramCache, ProgramCacheEntry, ProgramCacheForTxBatch, ProgramCacheMatchCriteria, }, - log_collector::LogCollector, sysvar_cache::SysvarCache, - timings::{ExecuteTimingType, ExecuteTimings}, }, + solana_runtime_transaction::instructions_processor::process_compute_budget_instructions, solana_sdk::{ account::{AccountSharedData, ReadableAccount, PROGRAM_OWNERS}, clock::{Epoch, Slot}, - feature_set::{ - include_loaded_accounts_data_size_in_fee_calculation, - remove_rounding_in_fee_calculation, FeatureSet, - }, + feature_set::{remove_rounding_in_fee_calculation, FeatureSet}, fee::{FeeBudgetLimits, FeeStructure}, hash::Hash, inner_instruction::{InnerInstruction, InnerInstructionsList}, @@ -51,10 +49,10 @@ use { transaction::{self, SanitizedTransaction, TransactionError}, transaction_context::{ExecutionRecord, TransactionContext}, }, + solana_timings::{ExecuteTimingType, ExecuteTimings}, solana_type_overrides::sync::{atomic::Ordering, Arc, RwLock, RwLockReadGuard}, solana_vote::vote_account::VoteAccountsHashMap, std::{ - cell::RefCell, collections::{hash_map::Entry, HashMap, HashSet}, fmt::{Debug, Formatter}, rc::Rc, @@ -74,8 +72,6 @@ pub struct LoadAndExecuteSanitizedTransactionsOutput { // Vector of results indicating whether a transaction was executed or could not // be executed. Note executed transactions can still have failed! pub execution_results: Vec, - // Vector of loaded transactions from transactions that were processed. - pub loaded_transactions: Vec, } /// Configuration of the recording capabilities for transaction execution @@ -138,6 +134,10 @@ pub struct TransactionProcessingEnvironment<'a> { } #[cfg_attr(feature = "frozen-abi", derive(AbiExample))] +#[cfg_attr( + feature = "dev-context-only-utils", + field_qualifiers(slot(pub), epoch(pub)) +)] pub struct TransactionBatchProcessor { /// Bank slot (i.e. block) slot: Slot, @@ -234,7 +234,7 @@ impl TransactionBatchProcessor { let mut error_metrics = TransactionErrorMetrics::default(); let mut execute_timings = ExecuteTimings::default(); - let (validation_results, validate_fees_time) = measure!(self.validate_fees( + let (validation_results, validate_fees_us) = measure_us!(self.validate_fees( callbacks, sanitized_txs, check_results, @@ -248,40 +248,39 @@ impl TransactionBatchProcessor { &mut error_metrics )); - let mut program_cache_time = Measure::start("program_cache"); - let mut program_accounts_map = Self::filter_executable_program_accounts( - callbacks, - sanitized_txs, - &validation_results, - PROGRAM_OWNERS, - ); - for builtin_program in self.builtin_program_ids.read().unwrap().iter() { - program_accounts_map.insert(*builtin_program, 0); - } + let (mut program_cache_for_tx_batch, program_cache_us) = measure_us!({ + let mut program_accounts_map = Self::filter_executable_program_accounts( + callbacks, + sanitized_txs, + &validation_results, + PROGRAM_OWNERS, + ); + for builtin_program in self.builtin_program_ids.read().unwrap().iter() { + program_accounts_map.insert(*builtin_program, 0); + } - let program_cache_for_tx_batch = Rc::new(RefCell::new(self.replenish_program_cache( - callbacks, - &program_accounts_map, - config.check_program_modification_slot, - config.limit_to_load_programs, - ))); - - if program_cache_for_tx_batch.borrow().hit_max_limit { - const ERROR: TransactionError = TransactionError::ProgramCacheHitMaxLimit; - let loaded_transactions = vec![Err(ERROR); sanitized_txs.len()]; - let execution_results = - vec![TransactionExecutionResult::NotExecuted(ERROR); sanitized_txs.len()]; - return LoadAndExecuteSanitizedTransactionsOutput { - error_metrics, - execute_timings, - execution_results, - loaded_transactions, - }; - } - program_cache_time.stop(); + let program_cache_for_tx_batch = self.replenish_program_cache( + callbacks, + &program_accounts_map, + config.check_program_modification_slot, + config.limit_to_load_programs, + ); + + if program_cache_for_tx_batch.hit_max_limit { + const ERROR: TransactionError = TransactionError::ProgramCacheHitMaxLimit; + let execution_results = + vec![TransactionExecutionResult::NotExecuted(ERROR); sanitized_txs.len()]; + return LoadAndExecuteSanitizedTransactionsOutput { + error_metrics, + execute_timings, + execution_results, + }; + } + + program_cache_for_tx_batch + }); - let mut load_time = Measure::start("accounts_load"); - let mut loaded_transactions = load_accounts( + let (loaded_transactions, load_accounts_us) = measure_us!(load_accounts( callbacks, sanitized_txs, validation_results, @@ -291,56 +290,42 @@ impl TransactionBatchProcessor { environment .rent_collector .unwrap_or(&RentCollector::default()), - &program_cache_for_tx_batch.borrow(), - ); - load_time.stop(); - - let mut execution_time = Measure::start("execution_time"); - - let execution_results: Vec = loaded_transactions - .iter_mut() - .zip(sanitized_txs.iter()) - .map(|(load_result, tx)| match load_result { - Err(e) => TransactionExecutionResult::NotExecuted(e.clone()), - Ok(loaded_transaction) => { - let result = self.execute_loaded_transaction( - tx, - loaded_transaction, - &mut execute_timings, - &mut error_metrics, - &mut program_cache_for_tx_batch.borrow_mut(), - environment, - config, - ); + &program_cache_for_tx_batch, + )); + + let (execution_results, execution_us): (Vec, u64) = + measure_us!(loaded_transactions + .into_iter() + .zip(sanitized_txs.iter()) + .map(|(load_result, tx)| match load_result { + Err(e) => TransactionExecutionResult::NotExecuted(e.clone()), + Ok(loaded_transaction) => { + let executed_tx = self.execute_loaded_transaction( + tx, + loaded_transaction, + &mut execute_timings, + &mut error_metrics, + &mut program_cache_for_tx_batch, + environment, + config, + ); - if let TransactionExecutionResult::Executed { - details, - programs_modified_by_tx, - } = &result - { // Update batch specific cache of the loaded programs with the modifications // made by the transaction, if it executed successfully. - if details.status.is_ok() { - program_cache_for_tx_batch - .borrow_mut() - .merge(programs_modified_by_tx); + if executed_tx.was_successful() { + program_cache_for_tx_batch.merge(&executed_tx.programs_modified_by_tx); } - } - - result - } - }) - .collect(); - execution_time.stop(); + TransactionExecutionResult::Executed(Box::new(executed_tx)) + } + }) + .collect()); // Skip eviction when there's no chance this particular tx batch has increased the size of // ProgramCache entries. Note that loaded_missing is deliberately defined, so that there's // still at least one other batch, which will evict the program cache, even after the // occurrences of cooperative loading. - if program_cache_for_tx_batch.borrow().loaded_missing - || program_cache_for_tx_batch.borrow().merged_modified - { + if program_cache_for_tx_batch.loaded_missing || program_cache_for_tx_batch.merged_modified { const SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE: u8 = 90; self.program_cache .write() @@ -353,28 +338,22 @@ impl TransactionBatchProcessor { debug!( "load: {}us execute: {}us txs_len={}", - load_time.as_us(), - execution_time.as_us(), + load_accounts_us, + execution_us, sanitized_txs.len(), ); - execute_timings.saturating_add_in_place( - ExecuteTimingType::ValidateFeesUs, - validate_fees_time.as_us(), - ); - execute_timings.saturating_add_in_place( - ExecuteTimingType::ProgramCacheUs, - program_cache_time.as_us(), - ); - execute_timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_time.as_us()); execute_timings - .saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_time.as_us()); + .saturating_add_in_place(ExecuteTimingType::ValidateFeesUs, validate_fees_us); + execute_timings + .saturating_add_in_place(ExecuteTimingType::ProgramCacheUs, program_cache_us); + execute_timings.saturating_add_in_place(ExecuteTimingType::LoadUs, load_accounts_us); + execute_timings.saturating_add_in_place(ExecuteTimingType::ExecuteUs, execution_us); LoadAndExecuteSanitizedTransactionsOutput { error_metrics, execute_timings, execution_results, - loaded_transactions, } } @@ -451,11 +430,11 @@ impl TransactionBatchProcessor { } = checked_details; let fee_budget_limits = FeeBudgetLimits::from(compute_budget_limits); - let fee_details = fee_structure.calculate_fee_details( + let fee_details = solana_fee::calculate_fee_details( message, - lamports_per_signature, - &fee_budget_limits, - feature_set.is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), + lamports_per_signature == 0, + fee_structure.lamports_per_signature, + fee_budget_limits.prioritization_fee, feature_set.is_active(&remove_rounding_in_fee_calculation::id()), ); @@ -521,6 +500,7 @@ impl TransactionBatchProcessor { result } + #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] fn replenish_program_cache( &self, callback: &CB, @@ -544,7 +524,7 @@ impl TransactionBatchProcessor { }) .collect(); - let mut loaded_programs_for_txs = None; + let mut loaded_programs_for_txs: Option = None; loop { let (program_to_store, task_cookie, task_waiter) = { // Lock the global cache. @@ -585,6 +565,7 @@ impl TransactionBatchProcessor { }; if let Some((key, program)) = program_to_store { + loaded_programs_for_txs.as_mut().unwrap().loaded_missing = true; let mut program_cache = self.program_cache.write().unwrap(); // Submit our last completed loading task. if program_cache.finish_cooperative_loading_task(self.slot, key, program) @@ -705,13 +686,13 @@ impl TransactionBatchProcessor { fn execute_loaded_transaction( &self, tx: &SanitizedTransaction, - loaded_transaction: &mut LoadedTransaction, + mut loaded_transaction: LoadedTransaction, execute_timings: &mut ExecuteTimings, error_metrics: &mut TransactionErrorMetrics, program_cache_for_tx_batch: &mut ProgramCacheForTxBatch, environment: &TransactionProcessingEnvironment, config: &TransactionProcessingConfig, - ) -> TransactionExecutionResult { + ) -> ExecutedTransaction { let transaction_accounts = std::mem::take(&mut loaded_transaction.accounts); fn transaction_accounts_lamports_sum( @@ -875,16 +856,16 @@ impl TransactionBatchProcessor { None }; - TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { + ExecutedTransaction { + execution_details: TransactionExecutionDetails { status, log_messages, inner_instructions, - fee_details: loaded_transaction.fee_details, return_data, executed_units, accounts_data_len_delta, }, + loaded_transaction, programs_modified_by_tx: program_cache_for_tx_batch.drain_modified_entries(), } } @@ -986,15 +967,14 @@ mod tests { use { super::*, crate::{ - account_loader::ValidatedTransactionDetails, nonce_info::NoncePartial, + account_loader::ValidatedTransactionDetails, nonce_info::NonceInfo, rollback_accounts::RollbackAccounts, }, - solana_compute_budget::compute_budget_processor::ComputeBudgetLimits, + solana_compute_budget::compute_budget_limits::ComputeBudgetLimits, solana_program_runtime::loaded_programs::{BlockRelation, ProgramCacheEntryType}, solana_sdk::{ account::{create_account_shared_data_for_test, WritableAccount}, bpf_loader, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, feature_set::FeatureSet, @@ -1012,12 +992,6 @@ mod tests { transaction::{SanitizedTransaction, Transaction, TransactionError}, transaction_context::TransactionContext, }, - std::{ - env, - fs::{self, File}, - io::Read, - thread, - }, }; fn new_unchecked_sanitized_message(message: Message) -> SanitizedMessage { @@ -1145,7 +1119,7 @@ mod tests { false, ); - let mut loaded_transaction = LoadedTransaction { + let loaded_transaction = LoadedTransaction { accounts: vec![(Pubkey::new_unique(), AccountSharedData::default())], program_indices: vec![vec![0]], fee_details: FeeDetails::default(), @@ -1161,59 +1135,38 @@ mod tests { let mut processing_config = TransactionProcessingConfig::default(); processing_config.recording_config.enable_log_recording = true; - let result = batch_processor.execute_loaded_transaction( + let executed_tx = batch_processor.execute_loaded_transaction( &sanitized_transaction, - &mut loaded_transaction, + loaded_transaction.clone(), &mut ExecuteTimings::default(), &mut TransactionErrorMetrics::default(), &mut program_cache_for_tx_batch, &processing_environment, &processing_config, ); - - let TransactionExecutionResult::Executed { - details: TransactionExecutionDetails { log_messages, .. }, - .. - } = result - else { - panic!("Unexpected result") - }; - assert!(log_messages.is_some()); + assert!(executed_tx.execution_details.log_messages.is_some()); processing_config.log_messages_bytes_limit = Some(2); - let result = batch_processor.execute_loaded_transaction( + let executed_tx = batch_processor.execute_loaded_transaction( &sanitized_transaction, - &mut loaded_transaction, + loaded_transaction.clone(), &mut ExecuteTimings::default(), &mut TransactionErrorMetrics::default(), &mut program_cache_for_tx_batch, &processing_environment, &processing_config, ); - - let TransactionExecutionResult::Executed { - details: - TransactionExecutionDetails { - log_messages, - inner_instructions, - .. - }, - .. - } = result - else { - panic!("Unexpected result") - }; - assert!(log_messages.is_some()); - assert!(inner_instructions.is_none()); + assert!(executed_tx.execution_details.log_messages.is_some()); + assert!(executed_tx.execution_details.inner_instructions.is_none()); processing_config.recording_config.enable_log_recording = false; processing_config.recording_config.enable_cpi_recording = true; processing_config.log_messages_bytes_limit = None; - let result = batch_processor.execute_loaded_transaction( + let executed_tx = batch_processor.execute_loaded_transaction( &sanitized_transaction, - &mut loaded_transaction, + loaded_transaction, &mut ExecuteTimings::default(), &mut TransactionErrorMetrics::default(), &mut program_cache_for_tx_batch, @@ -1221,20 +1174,8 @@ mod tests { &processing_config, ); - let TransactionExecutionResult::Executed { - details: - TransactionExecutionDetails { - log_messages, - inner_instructions, - .. - }, - .. - } = result - else { - panic!("Unexpected result") - }; - assert!(log_messages.is_none()); - assert!(inner_instructions.is_some()); + assert!(executed_tx.execution_details.log_messages.is_none()); + assert!(executed_tx.execution_details.inner_instructions.is_some()); } #[test] @@ -1267,7 +1208,7 @@ mod tests { let mut account_data = AccountSharedData::default(); account_data.set_owner(bpf_loader::id()); - let mut loaded_transaction = LoadedTransaction { + let loaded_transaction = LoadedTransaction { accounts: vec![ (key1, AccountSharedData::default()), (key2, AccountSharedData::default()), @@ -1289,7 +1230,7 @@ mod tests { let _ = batch_processor.execute_loaded_transaction( &sanitized_transaction, - &mut loaded_transaction, + loaded_transaction, &mut ExecuteTimings::default(), &mut error_metrics, &mut program_cache_for_tx_batch, @@ -1305,8 +1246,9 @@ mod tests { fn test_replenish_program_cache_with_nonexistent_accounts() { let mock_bank = MockBankCallback::default(); let batch_processor = TransactionBatchProcessor::::default(); + let fork_graph = Arc::new(RwLock::new(TestForkGraph {})); batch_processor.program_cache.write().unwrap().fork_graph = - Some(Arc::new(RwLock::new(TestForkGraph {}))); + Some(Arc::downgrade(&fork_graph)); let key = Pubkey::new_unique(); let mut account_maps: HashMap = HashMap::new(); @@ -1319,8 +1261,9 @@ mod tests { fn test_replenish_program_cache() { let mock_bank = MockBankCallback::default(); let batch_processor = TransactionBatchProcessor::::default(); + let fork_graph = Arc::new(RwLock::new(TestForkGraph {})); batch_processor.program_cache.write().unwrap().fork_graph = - Some(Arc::new(RwLock::new(TestForkGraph {}))); + Some(Arc::downgrade(&fork_graph)); let key = Pubkey::new_unique(); let mut account_data = AccountSharedData::default(); @@ -1333,6 +1276,7 @@ mod tests { let mut account_maps: HashMap = HashMap::new(); account_maps.insert(key, 4); + let mut loaded_missing = 0; for limit_to_load_programs in [false, true] { let result = batch_processor.replenish_program_cache( @@ -1342,12 +1286,17 @@ mod tests { limit_to_load_programs, ); assert!(!result.hit_max_limit); + if result.loaded_missing { + loaded_missing += 1; + } + let program = result.find(&key).unwrap(); assert!(matches!( program.program, ProgramCacheEntryType::FailedVerification(_) )); } + assert!(loaded_missing > 0); } #[test] @@ -1800,8 +1749,9 @@ mod tests { fn test_add_builtin() { let mock_bank = MockBankCallback::default(); let batch_processor = TransactionBatchProcessor::::default(); + let fork_graph = Arc::new(RwLock::new(TestForkGraph {})); batch_processor.program_cache.write().unwrap().fork_graph = - Some(Arc::new(RwLock::new(TestForkGraph {}))); + Some(Arc::downgrade(&fork_graph)); let key = Pubkey::new_unique(); let name = "a_builtin_name"; @@ -1839,111 +1789,6 @@ mod tests { assert_eq!(entry, Arc::new(program)); } - #[test] - fn fast_concur_test() { - let mut mock_bank = MockBankCallback::default(); - let batch_processor = TransactionBatchProcessor::::new(5, 5, HashSet::new()); - batch_processor.program_cache.write().unwrap().fork_graph = - Some(Arc::new(RwLock::new(TestForkGraph {}))); - - let programs = vec![ - deploy_program("hello-solana".to_string(), &mut mock_bank), - deploy_program("simple-transfer".to_string(), &mut mock_bank), - deploy_program("clock-sysvar".to_string(), &mut mock_bank), - ]; - - let account_maps: HashMap = programs - .iter() - .enumerate() - .map(|(idx, key)| (*key, idx as u64)) - .collect(); - - for _ in 0..10 { - let ths: Vec<_> = (0..4) - .map(|_| { - let local_bank = mock_bank.clone(); - let processor = TransactionBatchProcessor::new_from( - &batch_processor, - batch_processor.slot, - batch_processor.epoch, - ); - let maps = account_maps.clone(); - let programs = programs.clone(); - thread::spawn(move || { - let result = - processor.replenish_program_cache(&local_bank, &maps, false, true); - for key in &programs { - let cache_entry = result.find(key); - assert!(matches!( - cache_entry.unwrap().program, - ProgramCacheEntryType::Loaded(_) - )); - } - }) - }) - .collect(); - - for th in ths { - th.join().unwrap(); - } - } - } - - fn deploy_program(name: String, mock_bank: &mut MockBankCallback) -> Pubkey { - let program_account = Pubkey::new_unique(); - let program_data_account = Pubkey::new_unique(); - let state = UpgradeableLoaderState::Program { - programdata_address: program_data_account, - }; - - // The program account must have funds and hold the executable binary - let mut account_data = AccountSharedData::default(); - account_data.set_data(bincode::serialize(&state).unwrap()); - account_data.set_lamports(25); - account_data.set_owner(bpf_loader_upgradeable::id()); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(program_account, account_data); - - let mut account_data = AccountSharedData::default(); - let state = UpgradeableLoaderState::ProgramData { - slot: 0, - upgrade_authority_address: None, - }; - let mut header = bincode::serialize(&state).unwrap(); - let mut complement = vec![ - 0; - std::cmp::max( - 0, - UpgradeableLoaderState::size_of_programdata_metadata().saturating_sub(header.len()) - ) - ]; - - let mut dir = env::current_dir().unwrap(); - dir.push("tests"); - dir.push("example-programs"); - dir.push(name.as_str()); - let name = name.replace('-', "_"); - dir.push(name + "_program.so"); - let mut file = File::open(dir.clone()).expect("file not found"); - let metadata = fs::metadata(dir).expect("Unable to read metadata"); - let mut buffer = vec![0; metadata.len() as usize]; - file.read_exact(&mut buffer).expect("Buffer overflow"); - - header.append(&mut complement); - header.append(&mut buffer); - account_data.set_data(header); - mock_bank - .account_shared_data - .write() - .unwrap() - .insert(program_data_account, account_data); - - program_account - } - #[test] fn test_validate_transaction_fee_payer_exact_balance() { let lamports_per_signature = 5000; @@ -2020,7 +1865,7 @@ mod tests { fee_payer_rent_epoch ), compute_budget_limits, - fee_details: FeeDetails::new_for_tests(transaction_fee, priority_fee, false), + fee_details: FeeDetails::new(transaction_fee, priority_fee, false), fee_payer_rent_debit, fee_payer_account: post_validation_fee_payer_account, }) @@ -2092,7 +1937,7 @@ mod tests { 0, // rent epoch ), compute_budget_limits, - fee_details: FeeDetails::new_for_tests(transaction_fee, 0, false), + fee_details: FeeDetails::new(transaction_fee, 0, false), fee_payer_rent_debit, fee_payer_account: post_validation_fee_payer_account, }) @@ -2298,7 +2143,7 @@ mod tests { let mut error_counters = TransactionErrorMetrics::default(); let batch_processor = TransactionBatchProcessor::::default(); - let nonce = Some(NoncePartial::new( + let nonce = Some(NonceInfo::new( *fee_payer_address, fee_payer_account.clone(), )); @@ -2333,7 +2178,7 @@ mod tests { 0, // fee_payer_rent_epoch ), compute_budget_limits, - fee_details: FeeDetails::new_for_tests(transaction_fee, priority_fee, false), + fee_details: FeeDetails::new(transaction_fee, priority_fee, false), fee_payer_rent_debit: 0, // rent due fee_payer_account: post_validation_fee_payer_account, }) diff --git a/svm/tests/concurrent_tests.rs b/svm/tests/concurrent_tests.rs new file mode 100644 index 00000000000000..cfe9f2233afceb --- /dev/null +++ b/svm/tests/concurrent_tests.rs @@ -0,0 +1,99 @@ +#![cfg(feature = "shuttle-test")] + +use { + crate::mock_bank::{deploy_program, MockForkGraph}, + mock_bank::MockBankCallback, + shuttle::{ + sync::{Arc, RwLock}, + thread, Runner, + }, + solana_program_runtime::loaded_programs::ProgramCacheEntryType, + solana_sdk::pubkey::Pubkey, + solana_svm::transaction_processor::TransactionBatchProcessor, + std::collections::{HashMap, HashSet}, +}; + +mod mock_bank; + +fn program_cache_execution(threads: usize) { + let mut mock_bank = MockBankCallback::default(); + let batch_processor = TransactionBatchProcessor::::new(5, 5, HashSet::new()); + let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); + batch_processor.program_cache.write().unwrap().fork_graph = Some(Arc::downgrade(&fork_graph)); + + let programs = vec![ + deploy_program("hello-solana".to_string(), 0, &mut mock_bank), + deploy_program("simple-transfer".to_string(), 0, &mut mock_bank), + deploy_program("clock-sysvar".to_string(), 0, &mut mock_bank), + ]; + + let account_maps: HashMap = programs + .iter() + .enumerate() + .map(|(idx, key)| (*key, idx as u64)) + .collect(); + + let ths: Vec<_> = (0..threads) + .map(|_| { + let local_bank = mock_bank.clone(); + let processor = TransactionBatchProcessor::new_from( + &batch_processor, + batch_processor.slot, + batch_processor.epoch, + ); + let maps = account_maps.clone(); + let programs = programs.clone(); + thread::spawn(move || { + let result = processor.replenish_program_cache(&local_bank, &maps, false, true); + for key in &programs { + let cache_entry = result.find(key); + assert!(matches!( + cache_entry.unwrap().program, + ProgramCacheEntryType::Loaded(_) + )); + } + }) + }) + .collect(); + + for th in ths { + th.join().unwrap(); + } +} + +// Shuttle has its own internal scheduler and the following tests change the way it operates to +// increase the efficiency in finding problems in the program cache's concurrent code. + +// This test leverages the probabilistic concurrency testing algorithm +// (https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/asplos277-pct.pdf). +// It bounds the numbers of preemptions to explore (five in this test) for the four +// threads we use. We run it for 300 iterations. +#[test] +fn test_program_cache_with_probabilistic_scheduler() { + shuttle::check_pct( + move || { + program_cache_execution(4); + }, + 300, + 5, + ); +} + +// In this case, the scheduler is random and may preempt threads at any point and any time. +#[test] +fn test_program_cache_with_random_scheduler() { + shuttle::check_random(move || program_cache_execution(4), 300); +} + +// This test explores all the possible thread scheduling patterns that might affect the program +// cache. There is a limitation to run only 500 iterations to avoid consuming too much CI time. +#[test] +fn test_program_cache_with_exhaustive_scheduler() { + // The DFS (shuttle::check_dfs) test is only complete when we do not generate random + // values in a thread. + // Since this is not the case for the execution of jitted program, we can still run the test + // but with decreased accuracy. + let scheduler = shuttle::scheduler::DfsScheduler::new(Some(500), true); + let runner = Runner::new(scheduler, Default::default()); + runner.run(move || program_cache_execution(4)); +} diff --git a/svm/tests/conformance.rs b/svm/tests/conformance.rs index ad98dd8cb3708a..8e82cf98623837 100644 --- a/svm/tests/conformance.rs +++ b/svm/tests/conformance.rs @@ -7,15 +7,14 @@ use { prost::Message, solana_bpf_loader_program::syscalls::create_program_runtime_environment_v1, solana_compute_budget::compute_budget::ComputeBudget, + solana_log_collector::LogCollector, solana_program_runtime::{ invoke_context::{EnvironmentConfig, InvokeContext}, loaded_programs::{ProgramCacheEntry, ProgramCacheForTxBatch, ProgramRuntimeEnvironments}, - log_collector::LogCollector, solana_rbpf::{ program::{BuiltinProgram, FunctionRegistry}, vm::Config, }, - timings::ExecuteTimings, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, @@ -43,6 +42,7 @@ use { }, }, solana_svm_conformance::proto::{InstrEffects, InstrFixture}, + solana_timings::ExecuteTimings, std::{ collections::{HashMap, HashSet}, env, @@ -197,7 +197,7 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool let mut fee_payer = Pubkey::new_unique(); let mut mock_bank = MockBankCallback::default(); { - let mut account_data_map = mock_bank.account_shared_data.borrow_mut(); + let mut account_data_map = mock_bank.account_shared_data.write().unwrap(); for item in input.accounts { let pubkey = Pubkey::new_from_array(item.address.try_into().unwrap()); let mut account_data = AccountSharedData::default(); @@ -245,6 +245,7 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool mock_bank.override_feature_set(feature_set); let batch_processor = TransactionBatchProcessor::::new(42, 2, HashSet::new()); + let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); { let mut program_cache = batch_processor.program_cache.write().unwrap(); program_cache.environments = ProgramRuntimeEnvironments { @@ -254,7 +255,7 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool FunctionRegistry::default(), )), }; - program_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); + program_cache.fork_graph = Some(Arc::downgrade(&fork_graph.clone())); } batch_processor.fill_missing_sysvar_cache_entries(&mock_bank); @@ -343,9 +344,11 @@ fn run_fixture(fixture: InstrFixture, filename: OsString, execute_as_instr: bool return; } - let execution_details = result.execution_results[0].details().unwrap(); + let executed_tx = result.execution_results[0].executed_transaction().unwrap(); + let execution_details = &executed_tx.execution_details; + let loaded_accounts = &executed_tx.loaded_transaction.accounts; verify_accounts_and_data( - &result.loaded_transactions[0].as_ref().unwrap().accounts, + loaded_accounts, output, execution_details.executed_units, input.cu_avail, diff --git a/svm/tests/integration_test.rs b/svm/tests/integration_test.rs index c47ce03af9b5a1..8d8c80b8e89422 100644 --- a/svm/tests/integration_test.rs +++ b/svm/tests/integration_test.rs @@ -1,7 +1,10 @@ #![cfg(test)] use { - crate::{mock_bank::MockBankCallback, transaction_builder::SanitizedTransactionBuilder}, + crate::{ + mock_bank::{deploy_program, MockBankCallback}, + transaction_builder::SanitizedTransactionBuilder, + }, solana_bpf_loader_program::syscalls::{ SyscallAbort, SyscallGetClockSysvar, SyscallInvokeSignedRust, SyscallLog, SyscallMemcpy, SyscallMemset, SyscallSetReturnData, @@ -19,7 +22,7 @@ use { }, solana_sdk::{ account::{AccountSharedData, ReadableAccount, WritableAccount}, - bpf_loader_upgradeable::{self, UpgradeableLoaderState}, + bpf_loader_upgradeable::{self}, clock::{Clock, Epoch, Slot, UnixTimestamp}, hash::Hash, instruction::AccountMeta, @@ -30,19 +33,16 @@ use { }, solana_svm::{ account_loader::{CheckedTransactionDetails, TransactionCheckResult}, + transaction_execution_result::TransactionExecutionResult, transaction_processing_callback::TransactionProcessingCallback, transaction_processor::{ ExecutionRecordingConfig, TransactionBatchProcessor, TransactionProcessingConfig, TransactionProcessingEnvironment, }, - transaction_results::TransactionExecutionResult, }, std::{ cmp::Ordering, collections::{HashMap, HashSet}, - env, - fs::{self, File}, - io::Read, sync::{Arc, RwLock}, time::{SystemTime, UNIX_EPOCH}, }, @@ -94,7 +94,6 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { enable_sbpf_v1: true, enable_sbpf_v2: false, optimize_rodata: false, - new_elf_parser: false, aligned_memory_mapping: true, }; @@ -130,6 +129,7 @@ fn create_custom_environment<'a>() -> BuiltinProgram> { } fn create_executable_environment( + fork_graph: Arc>, mock_bank: &mut MockBankCallback, program_cache: &mut ProgramCache, ) { @@ -142,7 +142,7 @@ fn create_executable_environment( )), }; - program_cache.fork_graph = Some(Arc::new(RwLock::new(MockForkGraph {}))); + program_cache.fork_graph = Some(Arc::downgrade(&fork_graph)); // We must fill in the sysvar cache entries let time_now = SystemTime::now() @@ -161,67 +161,11 @@ fn create_executable_environment( account_data.set_data(bincode::serialize(&clock).unwrap()); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(Clock::id(), account_data); } -fn load_program(name: String) -> Vec { - // Loading the program file - let mut dir = env::current_dir().unwrap(); - dir.push("tests"); - dir.push("example-programs"); - dir.push(name.as_str()); - let name = name.replace('-', "_"); - dir.push(name + "_program.so"); - let mut file = File::open(dir.clone()).expect("file not found"); - let metadata = fs::metadata(dir).expect("Unable to read metadata"); - let mut buffer = vec![0; metadata.len() as usize]; - file.read_exact(&mut buffer).expect("Buffer overflow"); - buffer -} - -fn deploy_program(name: String, mock_bank: &mut MockBankCallback) -> Pubkey { - let program_account = Pubkey::new_unique(); - let program_data_account = Pubkey::new_unique(); - let state = UpgradeableLoaderState::Program { - programdata_address: program_data_account, - }; - - // The program account must have funds and hold the executable binary - let mut account_data = AccountSharedData::default(); - account_data.set_data(bincode::serialize(&state).unwrap()); - account_data.set_lamports(25); - account_data.set_owner(bpf_loader_upgradeable::id()); - mock_bank - .account_shared_data - .borrow_mut() - .insert(program_account, account_data); - - let mut account_data = AccountSharedData::default(); - let state = UpgradeableLoaderState::ProgramData { - slot: DEPLOYMENT_SLOT, - upgrade_authority_address: None, - }; - let mut header = bincode::serialize(&state).unwrap(); - let mut complement = vec![ - 0; - std::cmp::max( - 0, - UpgradeableLoaderState::size_of_programdata_metadata().saturating_sub(header.len()) - ) - ]; - let mut buffer = load_program(name); - header.append(&mut complement); - header.append(&mut buffer); - account_data.set_data(header); - mock_bank - .account_shared_data - .borrow_mut() - .insert(program_data_account, account_data); - - program_account -} - fn register_builtins( mock_bank: &MockBankCallback, batch_processor: &TransactionBatchProcessor, @@ -261,7 +205,7 @@ fn prepare_transactions( let mut transaction_checks = Vec::new(); // A transaction that works without any account - let hello_program = deploy_program("hello-solana".to_string(), mock_bank); + let hello_program = deploy_program("hello-solana".to_string(), DEPLOYMENT_SLOT, mock_bank); let fee_payer = Pubkey::new_unique(); transaction_builder.create_instruction(hello_program, Vec::new(), HashMap::new(), Vec::new()); @@ -279,11 +223,13 @@ fn prepare_transactions( account_data.set_lamports(80000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(fee_payer, account_data); // A simple funds transfer between accounts - let transfer_program_account = deploy_program("simple-transfer".to_string(), mock_bank); + let transfer_program_account = + deploy_program("simple-transfer".to_string(), DEPLOYMENT_SLOT, mock_bank); let sender = Pubkey::new_unique(); let recipient = Pubkey::new_unique(); let fee_payer = Pubkey::new_unique(); @@ -327,7 +273,8 @@ fn prepare_transactions( account_data.set_lamports(80000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(fee_payer, account_data); // sender @@ -335,7 +282,8 @@ fn prepare_transactions( account_data.set_lamports(900000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(sender, account_data); // recipient @@ -343,13 +291,14 @@ fn prepare_transactions( account_data.set_lamports(900000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(recipient, account_data); // The system account is set in `create_executable_environment` // A program that utilizes a Sysvar - let program_account = deploy_program("clock-sysvar".to_string(), mock_bank); + let program_account = deploy_program("clock-sysvar".to_string(), DEPLOYMENT_SLOT, mock_bank); let fee_payer = Pubkey::new_unique(); transaction_builder.create_instruction(program_account, Vec::new(), HashMap::new(), Vec::new()); @@ -366,7 +315,8 @@ fn prepare_transactions( account_data.set_lamports(80000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(fee_payer, account_data); // A transaction that fails @@ -411,7 +361,8 @@ fn prepare_transactions( account_data.set_lamports(80000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(fee_payer, account_data); // Sender without enough funds @@ -419,7 +370,8 @@ fn prepare_transactions( account_data.set_lamports(900000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(sender, account_data); // recipient @@ -427,7 +379,8 @@ fn prepare_transactions( account_data.set_lamports(900000); mock_bank .account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(recipient, account_data); // A transaction whose verification has already failed @@ -447,7 +400,10 @@ fn svm_integration() { HashSet::new(), ); + let fork_graph = Arc::new(RwLock::new(MockForkGraph {})); + create_executable_environment( + fork_graph.clone(), &mut mock_bank, &mut batch_processor.program_cache.write().unwrap(), ); @@ -474,39 +430,32 @@ fn svm_integration() { ); assert_eq!(result.execution_results.len(), 5); - assert!(result.execution_results[0] - .details() - .unwrap() - .status - .is_ok()); - let logs = result.execution_results[0] - .details() - .unwrap() + + let executed_tx_0 = result.execution_results[0].executed_transaction().unwrap(); + assert!(executed_tx_0.was_successful()); + let logs = executed_tx_0 + .execution_details .log_messages .as_ref() .unwrap(); assert!(logs.contains(&"Program log: Hello, Solana!".to_string())); - assert!(result.execution_results[1] - .details() - .unwrap() - .status - .is_ok()); + let executed_tx_1 = result.execution_results[1].executed_transaction().unwrap(); + assert!(executed_tx_1.was_successful()); // The SVM does not commit the account changes in MockBank let recipient_key = transactions[1].message().account_keys()[2]; - let recipient_data = result.loaded_transactions[1] - .as_ref() - .unwrap() + let recipient_data = executed_tx_1 + .loaded_transaction .accounts .iter() .find(|key| key.0 == recipient_key) .unwrap(); assert_eq!(recipient_data.1.lamports(), 900010); - let return_data = result.execution_results[2] - .details() - .unwrap() + let executed_tx_2 = result.execution_results[2].executed_transaction().unwrap(); + let return_data = executed_tx_2 + .execution_details .return_data .as_ref() .unwrap(); @@ -515,14 +464,10 @@ fn svm_integration() { let clock_info: Clock = bincode::deserialize(clock_data.data()).unwrap(); assert_eq!(clock_info.unix_timestamp, time); - assert!(result.execution_results[3] - .details() - .unwrap() - .status - .is_err()); - assert!(result.execution_results[3] - .details() - .unwrap() + let executed_tx_3 = result.execution_results[3].executed_transaction().unwrap(); + assert!(executed_tx_3.execution_details.status.is_err()); + assert!(executed_tx_3 + .execution_details .log_messages .as_ref() .unwrap() diff --git a/svm/tests/mock_bank.rs b/svm/tests/mock_bank.rs index f67b63346e95b7..355f9f0ce8898a 100644 --- a/svm/tests/mock_bank.rs +++ b/svm/tests/mock_bank.rs @@ -1,7 +1,8 @@ use { solana_program_runtime::loaded_programs::{BlockRelation, ForkGraph}, solana_sdk::{ - account::{AccountSharedData, ReadableAccount}, + account::{AccountSharedData, ReadableAccount, WritableAccount}, + bpf_loader_upgradeable::{self, UpgradeableLoaderState}, clock::Epoch, feature_set::FeatureSet, native_loader, @@ -9,7 +10,14 @@ use { slot_hashes::Slot, }, solana_svm::transaction_processing_callback::TransactionProcessingCallback, - std::{cell::RefCell, cmp::Ordering, collections::HashMap, sync::Arc}, + solana_type_overrides::sync::{Arc, RwLock}, + std::{ + cmp::Ordering, + collections::HashMap, + env, + fs::{self, File}, + io::Read, + }, }; pub struct MockForkGraph {} @@ -28,15 +36,15 @@ impl ForkGraph for MockForkGraph { } } -#[derive(Default)] +#[derive(Default, Clone)] pub struct MockBankCallback { pub feature_set: Arc, - pub account_shared_data: RefCell>, + pub account_shared_data: Arc>>, } impl TransactionProcessingCallback for MockBankCallback { fn account_matches_owners(&self, account: &Pubkey, owners: &[Pubkey]) -> Option { - if let Some(data) = self.account_shared_data.borrow().get(account) { + if let Some(data) = self.account_shared_data.read().unwrap().get(account) { if data.lamports() == 0 { None } else { @@ -48,21 +56,91 @@ impl TransactionProcessingCallback for MockBankCallback { } fn get_account_shared_data(&self, pubkey: &Pubkey) -> Option { - self.account_shared_data.borrow().get(pubkey).cloned() + self.account_shared_data + .read() + .unwrap() + .get(pubkey) + .cloned() } fn add_builtin_account(&self, name: &str, program_id: &Pubkey) { let account_data = native_loader::create_loadable_account_with_fields(name, (5000, 0)); self.account_shared_data - .borrow_mut() + .write() + .unwrap() .insert(*program_id, account_data); } } impl MockBankCallback { - #[allow(dead_code)] + #[allow(unused)] pub fn override_feature_set(&mut self, new_set: FeatureSet) { self.feature_set = Arc::new(new_set) } } + +#[allow(unused)] +fn load_program(name: String) -> Vec { + // Loading the program file + let mut dir = env::current_dir().unwrap(); + dir.push("tests"); + dir.push("example-programs"); + dir.push(name.as_str()); + let name = name.replace('-', "_"); + dir.push(name + "_program.so"); + let mut file = File::open(dir.clone()).expect("file not found"); + let metadata = fs::metadata(dir).expect("Unable to read metadata"); + let mut buffer = vec![0; metadata.len() as usize]; + file.read_exact(&mut buffer).expect("Buffer overflow"); + buffer +} + +#[allow(unused)] +pub fn deploy_program( + name: String, + deployment_slot: Slot, + mock_bank: &mut MockBankCallback, +) -> Pubkey { + let program_account = Pubkey::new_unique(); + let program_data_account = Pubkey::new_unique(); + let state = UpgradeableLoaderState::Program { + programdata_address: program_data_account, + }; + + // The program account must have funds and hold the executable binary + let mut account_data = AccountSharedData::default(); + account_data.set_data(bincode::serialize(&state).unwrap()); + account_data.set_lamports(25); + account_data.set_owner(bpf_loader_upgradeable::id()); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(program_account, account_data); + + let mut account_data = AccountSharedData::default(); + let state = UpgradeableLoaderState::ProgramData { + slot: deployment_slot, + upgrade_authority_address: None, + }; + let mut header = bincode::serialize(&state).unwrap(); + let mut complement = vec![ + 0; + std::cmp::max( + 0, + UpgradeableLoaderState::size_of_programdata_metadata().saturating_sub(header.len()) + ) + ]; + let mut buffer = load_program(name); + header.append(&mut complement); + header.append(&mut buffer); + account_data.set_data(header); + mock_bank + .account_shared_data + .write() + .unwrap() + .insert(program_data_account, account_data); + + program_account +} diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 0068ddbfb2bf0a..1d73cf10c097a9 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -18,7 +18,6 @@ serde_derive = { workspace = true } serde_json = { workspace = true } solana-accounts-db = { workspace = true } solana-cli-output = { workspace = true } -solana-client = { workspace = true } solana-compute-budget = { workspace = true } solana-core = { workspace = true } solana-geyser-plugin-manager = { workspace = true } @@ -29,6 +28,7 @@ solana-net-utils = { workspace = true } solana-program-test = { workspace = true } solana-rpc = { workspace = true } solana-rpc-client = { workspace = true } +solana-rpc-client-api = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } solana-streamer = { workspace = true } diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index 8c6fd289018113..39616c1fdd4936 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -9,7 +9,6 @@ use { utils::create_accounts_run_and_snapshot_dirs, }, solana_cli_output::CliAccount, - solana_client::rpc_request::MAX_MULTIPLE_ACCOUNTS, solana_compute_budget::compute_budget::ComputeBudget, solana_core::{ admin_rpc_post_init::AdminRpcRequestMetadataPostInit, @@ -32,6 +31,7 @@ use { solana_net_utils::PortRange, solana_rpc::{rpc::JsonRpcConfig, rpc_pubsub_service::PubSubConfig}, solana_rpc_client::{nonblocking, rpc_client::RpcClient}, + solana_rpc_client_api::request::MAX_MULTIPLE_ACCOUNTS, solana_runtime::{ bank_forks::BankForks, genesis_utils::create_genesis_config_with_leader_ex, runtime_config::RuntimeConfig, snapshot_config::SnapshotConfig, @@ -77,14 +77,6 @@ pub struct AccountInfo<'a> { pub filename: &'a str, } -#[deprecated(since = "1.16.0", note = "Please use `UpgradeableProgramInfo` instead")] -#[derive(Clone)] -pub struct ProgramInfo { - pub program_id: Pubkey, - pub loader: Pubkey, - pub program_path: PathBuf, -} - #[derive(Clone)] pub struct UpgradeableProgramInfo { pub program_id: Pubkey, @@ -126,8 +118,6 @@ pub struct TestValidatorGenesis { rpc_ports: Option<(u16, u16)>, // (JsonRpc, JsonRpcPubSub), None == random ports warp_slot: Option, accounts: HashMap, - #[allow(deprecated)] - programs: Vec, upgradeable_programs: Vec, ticks_per_slot: Option, epoch_schedule: Option, @@ -160,8 +150,6 @@ impl Default for TestValidatorGenesis { rpc_ports: Option::<(u16, u16)>::default(), warp_slot: Option::::default(), accounts: HashMap::::default(), - #[allow(deprecated)] - programs: Vec::::default(), upgradeable_programs: Vec::::default(), ticks_per_slot: Option::::default(), epoch_schedule: Option::::default(), @@ -319,11 +307,6 @@ impl TestValidatorGenesis { self } - #[deprecated(note = "Please use `compute_unit_limit` instead")] - pub fn max_compute_units(&mut self, max_compute_units: u64) -> &mut Self { - self.compute_unit_limit(max_compute_units) - } - /// Add an account to the test environment pub fn add_account(&mut self, address: Pubkey, account: AccountSharedData) -> &mut Self { self.accounts.insert(address, account); @@ -581,19 +564,6 @@ impl TestValidatorGenesis { self } - /// Add a list of programs to the test environment. - #[deprecated( - since = "1.16.0", - note = "Please use `add_upgradeable_programs_with_path()` instead" - )] - #[allow(deprecated)] - pub fn add_programs_with_path(&mut self, programs: &[ProgramInfo]) -> &mut Self { - for program in programs { - self.programs.push(program.clone()); - } - self - } - /// Add a list of upgradeable programs to the test environment. pub fn add_upgradeable_programs_with_path( &mut self, @@ -795,20 +765,6 @@ impl TestValidator { for (address, account) in solana_program_test::programs::spl_programs(&config.rent) { accounts.entry(address).or_insert(account); } - #[allow(deprecated)] - for program in &config.programs { - let data = solana_program_test::read_file(&program.program_path); - accounts.insert( - program.program_id, - AccountSharedData::from(Account { - lamports: Rent::default().minimum_balance(data.len()).max(1), - data, - owner: program.loader, - executable: true, - rent_epoch: 0, - }), - ); - } for upgradeable_program in &config.upgradeable_programs { let data = solana_program_test::read_file(&upgradeable_program.program_path); let (programdata_address, _) = Pubkey::find_program_address( diff --git a/thin-client/README.md b/thin-client/README.md index 147b287b2d62b2..43ca0825a4ab5d 100644 --- a/thin-client/README.md +++ b/thin-client/README.md @@ -1,4 +1,4 @@ # thin-client -This crate for `thin-client` is deprecated as of v1.19.0. It will receive no bugfixes or updates. +This crate for `thin-client` is deprecated as of v2.0.0. It will receive no bugfixes or updates. Please use `tpu-client` or `rpc-client`. \ No newline at end of file diff --git a/thin-client/src/thin_client.rs b/thin-client/src/thin_client.rs index 065363e1a462c0..f53ae499a8b68f 100644 --- a/thin-client/src/thin_client.rs +++ b/thin-client/src/thin_client.rs @@ -110,7 +110,7 @@ impl ClientOptimizer { } /// An object for querying and sending transactions to the network. -#[deprecated(since = "1.19.0", note = "Use [RpcClient] or [TpuClient] instead.")] +#[deprecated(since = "2.0.0", note = "Use [RpcClient] or [TpuClient] instead.")] pub struct ThinClient< P, // ConnectionPool M, // ConnectionManager diff --git a/timings/Cargo.toml b/timings/Cargo.toml new file mode 100644 index 00000000000000..e36975b192d725 --- /dev/null +++ b/timings/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "solana-timings" +description = "Solana Message Sanitization" +documentation = "https://docs.rs/solana-sanitize" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +eager = { workspace = true } +enum-iterator = { workspace = true } +solana-sdk = { workspace = true } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/program-runtime/src/timings.rs b/timings/src/lib.rs similarity index 98% rename from program-runtime/src/timings.rs rename to timings/src/lib.rs index 9ffc4702178676..46878559eb25cc 100644 --- a/program-runtime/src/timings.rs +++ b/timings/src/lib.rs @@ -1,3 +1,5 @@ +#[macro_use] +extern crate eager; use { core::fmt, enum_iterator::Sequence, @@ -48,6 +50,7 @@ pub enum ExecuteTimingType { ExecuteUs, StoreUs, UpdateStakesCacheUs, + UpdateExecutorsUs, NumExecuteBatches, CollectLogsUs, TotalBatchesLen, @@ -148,6 +151,11 @@ eager_macro_rules! { $eager_1 .index(ExecuteTimingType::UpdateStakesCacheUs), i64 ), + ( + "execute_accessories_update_executors_us", + *$self.metrics.index(ExecuteTimingType::UpdateExecutorsUs), + i64 + ), ( "total_batches_len", (if $is_unified_scheduler_enabled { @@ -257,11 +265,6 @@ eager_macro_rules! { $eager_1 $self.execute_accessories.process_message_us, i64 ), - ( - "execute_accessories_update_executors_us", - $self.execute_accessories.update_executors_us, - i64 - ), ( "execute_accessories_process_instructions_total_us", $self @@ -349,7 +352,6 @@ pub struct ExecuteAccessoryTimings { pub feature_set_clone_us: u64, pub get_executors_us: u64, pub process_message_us: u64, - pub update_executors_us: u64, pub process_instructions: ExecuteProcessInstructionTimings, } @@ -358,7 +360,6 @@ impl ExecuteAccessoryTimings { saturating_add_assign!(self.feature_set_clone_us, other.feature_set_clone_us); saturating_add_assign!(self.get_executors_us, other.get_executors_us); saturating_add_assign!(self.process_message_us, other.process_message_us); - saturating_add_assign!(self.update_executors_us, other.update_executors_us); self.process_instructions .accumulate(&other.process_instructions); } diff --git a/tpu-client/src/nonblocking/tpu_client.rs b/tpu-client/src/nonblocking/tpu_client.rs index ce274b8245d4d5..47d443af20a4ca 100644 --- a/tpu-client/src/nonblocking/tpu_client.rs +++ b/tpu-client/src/nonblocking/tpu_client.rs @@ -17,11 +17,12 @@ use { solana_pubsub_client::nonblocking::pubsub_client::{PubsubClient, PubsubClientError}, solana_rpc_client::nonblocking::rpc_client::RpcClient, solana_rpc_client_api::{ - client_error::{Error as ClientError, Result as ClientResult}, + client_error::{Error as ClientError, ErrorKind, Result as ClientResult}, + request::RpcError, response::{RpcContactInfo, SlotUpdate}, }, solana_sdk::{ - clock::Slot, + clock::{Slot, DEFAULT_MS_PER_SLOT}, commitment_config::CommitmentConfig, epoch_info::EpochInfo, pubkey::Pubkey, @@ -722,9 +723,42 @@ impl LeaderTpuService { let recent_slots = RecentLeaderSlots::new(start_slot); let slots_in_epoch = rpc_client.get_epoch_info().await?.slots_in_epoch; - let leaders = rpc_client - .get_slot_leaders(start_slot, LeaderTpuCache::fanout(slots_in_epoch)) - .await?; + + // When a cluster is starting, we observe an invalid slot range failure that goes away after a + // retry. It seems as if the leader schedule is not available, but it should be. The logic + // below retries the RPC call in case of an invalid slot range error. + let tpu_leader_service_creation_timeout = Duration::from_secs(20); + let retry_interval = Duration::from_secs(1); + let leaders = timeout(tpu_leader_service_creation_timeout, async { + loop { + // TODO: The root cause appears to lie within the `rpc_client.get_slot_leaders()`. + // It might be worth debugging further and trying to understand why the RPC + // call fails. There may be a bug in the `get_slot_leaders()` logic or in the + // RPC implementation + match rpc_client + .get_slot_leaders(start_slot, LeaderTpuCache::fanout(slots_in_epoch)) + .await + { + Ok(leaders) => return Ok(leaders), + Err(client_error) => { + if is_invalid_slot_range_error(&client_error) { + sleep(retry_interval).await; + continue; + } else { + return Err(client_error); + } + } + } + } + }) + .await + .map_err(|_| { + TpuSenderError::Custom(format!( + "Failed to get slot leaders connecting to: {}, timeout: {:?}. Invalid slot range", + websocket_url, tpu_leader_service_creation_timeout + )) + })??; + let cluster_nodes = rpc_client.get_cluster_nodes().await?; let leader_tpu_cache = Arc::new(RwLock::new(LeaderTpuCache::new( start_slot, @@ -784,48 +818,27 @@ impl LeaderTpuService { pubsub_client: Option, exit: Arc, ) -> Result<()> { - let (mut notifications, unsubscribe) = if let Some(pubsub_client) = &pubsub_client { - let (notifications, unsubscribe) = pubsub_client.slot_updates_subscribe().await?; - (Some(notifications), Some(unsubscribe)) - } else { - (None, None) - }; + tokio::try_join!( + Self::run_slot_watcher(recent_slots.clone(), pubsub_client, exit.clone()), + Self::run_cache_refresher(rpc_client, recent_slots, leader_tpu_cache, exit), + )?; + + Ok(()) + } + + async fn run_cache_refresher( + rpc_client: Arc, + recent_slots: RecentLeaderSlots, + leader_tpu_cache: Arc>, + exit: Arc, + ) -> Result<()> { let mut last_cluster_refresh = Instant::now(); - let mut sleep_ms = 1000; - loop { - if exit.load(Ordering::Relaxed) { - if let Some(unsubscribe) = unsubscribe { - (unsubscribe)().await; - } - // `notifications` requires a valid reference to `pubsub_client` - // so `notifications` must be dropped before moving `pubsub_client` - drop(notifications); - if let Some(pubsub_client) = pubsub_client { - pubsub_client.shutdown().await.unwrap(); - }; - break; - } + let mut sleep_ms = DEFAULT_MS_PER_SLOT; + while !exit.load(Ordering::Relaxed) { // Sleep a slot before checking if leader cache needs to be refreshed again sleep(Duration::from_millis(sleep_ms)).await; - sleep_ms = 1000; - - if let Some(notifications) = &mut notifications { - while let Ok(Some(update)) = - timeout(Duration::from_millis(10), notifications.next()).await - { - let current_slot = match update { - // This update indicates that a full slot was received by the connected - // node so we can stop sending transactions to the leader for that slot - SlotUpdate::Completed { slot, .. } => slot.saturating_add(1), - // This update indicates that we have just received the first shred from - // the leader for this slot and they are probably still accepting transactions. - SlotUpdate::FirstShredReceived { slot, .. } => slot, - _ => continue, - }; - recent_slots.record_slot(current_slot); - } - } + sleep_ms = DEFAULT_MS_PER_SLOT; let cache_update_info = maybe_fetch_cache_info( &leader_tpu_cache, @@ -847,6 +860,53 @@ impl LeaderTpuService { } } } + + Ok(()) + } + + async fn run_slot_watcher( + recent_slots: RecentLeaderSlots, + pubsub_client: Option, + exit: Arc, + ) -> Result<()> { + let Some(pubsub_client) = pubsub_client else { + return Ok(()); + }; + + let (mut notifications, unsubscribe) = pubsub_client.slot_updates_subscribe().await?; + // Time out slot update notification polling at 10ms. + // + // Rationale is two-fold: + // 1. Notifications are an unbounded stream -- polling them will block indefinitely if not + // interrupted, and the exit condition will never be checked. 10ms ensures negligible + // CPU overhead while keeping notification checking timely. + // 2. The timeout must be strictly less than the slot time (DEFAULT_MS_PER_SLOT: 400) to + // avoid timeout never being reached. For example, if notifications are received every + // 400ms and the timeout is >= 400ms, notifications may theoretically always be available + // before the timeout is reached, resulting in the exit condition never being checked. + const SLOT_UPDATE_TIMEOUT: Duration = Duration::from_millis(10); + + while !exit.load(Ordering::Relaxed) { + while let Ok(Some(update)) = timeout(SLOT_UPDATE_TIMEOUT, notifications.next()).await { + let current_slot = match update { + // This update indicates that a full slot was received by the connected + // node so we can stop sending transactions to the leader for that slot + SlotUpdate::Completed { slot, .. } => slot.saturating_add(1), + // This update indicates that we have just received the first shred from + // the leader for this slot and they are probably still accepting transactions. + SlotUpdate::FirstShredReceived { slot, .. } => slot, + _ => continue, + }; + recent_slots.record_slot(current_slot); + } + } + + // `notifications` requires a valid reference to `pubsub_client`, so `notifications` must be + // dropped before moving `pubsub_client` via `shutdown()`. + drop(notifications); + unsubscribe().await; + pubsub_client.shutdown().await?; + Ok(()) } } @@ -896,3 +956,13 @@ async fn maybe_fetch_cache_info( maybe_slot_leaders, } } + +fn is_invalid_slot_range_error(client_error: &ClientError) -> bool { + if let ErrorKind::RpcError(RpcError::RpcResponseError { code, message, .. }) = + &client_error.kind + { + return *code == -32602 + && message.contains("Invalid slot range: leader schedule for epoch"); + } + false +} diff --git a/transaction-dos/Cargo.toml b/transaction-dos/Cargo.toml index 1a112b2ff34e4a..ccf7b7094391e9 100644 --- a/transaction-dos/Cargo.toml +++ b/transaction-dos/Cargo.toml @@ -17,7 +17,6 @@ rayon = { workspace = true } solana-clap-utils = { workspace = true } solana-cli = { workspace = true } solana-client = { workspace = true } -solana-core = { workspace = true } solana-faucet = { workspace = true } solana-gossip = { workspace = true } solana-logger = { workspace = true } diff --git a/transaction-metrics-tracker/Cargo.toml b/transaction-metrics-tracker/Cargo.toml index c4882603174422..a908bee9abf30e 100644 --- a/transaction-metrics-tracker/Cargo.toml +++ b/transaction-metrics-tracker/Cargo.toml @@ -10,7 +10,6 @@ license = { workspace = true } edition = { workspace = true } [dependencies] -Inflector = { workspace = true } base64 = { workspace = true } bincode = { workspace = true } # Update this borsh dependency to the workspace version once @@ -19,6 +18,7 @@ log = { workspace = true } rand = { workspace = true } solana-perf = { workspace = true } solana-sdk = { workspace = true } +solana-short-vec = { workspace = true } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/transaction-metrics-tracker/src/lib.rs b/transaction-metrics-tracker/src/lib.rs index 2baec195de9b84..1ae0ab5da36630 100644 --- a/transaction-metrics-tracker/src/lib.rs +++ b/transaction-metrics-tracker/src/lib.rs @@ -3,7 +3,8 @@ use { log::*, rand::Rng, solana_perf::sigverify::PacketError, - solana_sdk::{packet::Packet, short_vec::decode_shortu16_len, signature::SIGNATURE_BYTES}, + solana_sdk::{packet::Packet, signature::SIGNATURE_BYTES}, + solana_short_vec::decode_shortu16_len, }; // The mask is 12 bits long (1<<12 = 4096), it means the probability of matching diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 489a213c525a94..6a5eb5fb8d6397 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -628,6 +628,7 @@ pub struct Reward { pub type Rewards = Vec; +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct RewardsAndNumPartitions { pub rewards: Rewards, pub num_partitions: Option, diff --git a/transaction-status/src/parse_stake.rs b/transaction-status/src/parse_stake.rs index f1586e58694950..aff72d5bbe8320 100644 --- a/transaction-status/src/parse_stake.rs +++ b/transaction-status/src/parse_stake.rs @@ -284,6 +284,7 @@ pub fn parse_stake( }), }) } + #[allow(deprecated)] StakeInstruction::Redelegate => { check_num_stake_accounts(&instruction.accounts, 5)?; Ok(ParsedInstructionEnum { diff --git a/transaction-status/src/parse_token.rs b/transaction-status/src/parse_token.rs index cbdc7ccbb54ab7..bc4e57fc467af7 100644 --- a/transaction-status/src/parse_token.rs +++ b/transaction-status/src/parse_token.rs @@ -14,17 +14,11 @@ use { parse_account_data::SplTokenAdditionalData, parse_token::{token_amount_to_ui_amount_v2, UiAccountState}, }, - solana_sdk::{ - instruction::{AccountMeta, CompiledInstruction, Instruction}, - message::AccountKeys, - }, + solana_sdk::{instruction::CompiledInstruction, message::AccountKeys}, spl_token_2022::{ extension::ExtensionType, instruction::{AuthorityType, TokenInstruction}, - solana_program::{ - instruction::Instruction as SplTokenInstruction, program_option::COption, - pubkey::Pubkey, - }, + solana_program::{program_option::COption, pubkey::Pubkey}, }, spl_token_group_interface::instruction::TokenGroupInstruction, spl_token_metadata_interface::instruction::TokenMetadataInstruction, @@ -851,23 +845,6 @@ fn check_num_token_accounts(accounts: &[u8], num: usize) -> Result<(), ParseInst check_num_accounts(accounts, num, ParsableProgram::SplToken) } -#[deprecated(since = "1.16.0", note = "Instruction conversions no longer needed")] -pub fn spl_token_instruction(instruction: SplTokenInstruction) -> Instruction { - Instruction { - program_id: instruction.program_id, - accounts: instruction - .accounts - .iter() - .map(|meta| AccountMeta { - pubkey: meta.pubkey, - is_signer: meta.is_signer, - is_writable: meta.is_writable, - }) - .collect(), - data: instruction.data, - } -} - fn map_coption_pubkey(pubkey: COption) -> Option { match pubkey { COption::Some(pubkey) => Some(pubkey.to_string()), diff --git a/transaction-view/Cargo.toml b/transaction-view/Cargo.toml new file mode 100644 index 00000000000000..0b3f4e828c969d --- /dev/null +++ b/transaction-view/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "agave-transaction-view" +description = "Agave TranactionView" +documentation = "https://docs.rs/agave-transaction-view" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-sdk = { workspace = true } + +[dev-dependencies] +# See order-crates-for-publishing.py for using this unusual `path = "."` +agave-transaction-view = { path = ".", features = ["dev-context-only-utils"] } +bincode = { workspace = true } +criterion = { workspace = true } + +[features] +dev-context-only-utils = [] + +[[bench]] +name = "bytes" +harness = false diff --git a/transaction-view/benches/bytes.rs b/transaction-view/benches/bytes.rs new file mode 100644 index 00000000000000..e5803cf1dec822 --- /dev/null +++ b/transaction-view/benches/bytes.rs @@ -0,0 +1,90 @@ +use { + agave_transaction_view::bytes::{optimized_read_compressed_u16, read_compressed_u16}, + bincode::{serialize_into, DefaultOptions, Options}, + criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput}, + solana_sdk::{ + packet::PACKET_DATA_SIZE, + short_vec::{decode_shortu16_len, ShortU16}, + }, +}; + +fn setup() -> Vec<(u16, usize, Vec)> { + let options = DefaultOptions::new().with_fixint_encoding(); // Ensure fixed-int encoding + + // Create a vector of all valid u16 values serialized into 16-byte buffers. + let mut values = Vec::with_capacity(PACKET_DATA_SIZE); + for value in 0..PACKET_DATA_SIZE as u16 { + let short_u16 = ShortU16(value); + let mut buffer = vec![0u8; 16]; + let serialized_len = options + .serialized_size(&short_u16) + .expect("Failed to get serialized size"); + serialize_into(&mut buffer[..], &short_u16).expect("Serialization failed"); + values.push((value, serialized_len as usize, buffer)); + } + + values +} + +fn bench_u16_parsing(c: &mut Criterion) { + let values_serialized_lengths_and_buffers = setup(); + let mut group = c.benchmark_group("compressed_u16_parsing"); + group.throughput(Throughput::Elements( + values_serialized_lengths_and_buffers.len() as u64, + )); + + // Benchmark the decode_shortu16_len function from `solana-sdk` + group.bench_function("short_u16_decode", |c| { + c.iter(|| { + decode_shortu16_len_iter(&values_serialized_lengths_and_buffers); + }) + }); + + // Benchmark `read_compressed_u16` + group.bench_function("read_compressed_u16", |c| { + c.iter(|| { + read_compressed_u16_iter(&values_serialized_lengths_and_buffers); + }) + }); + + group.bench_function("optimized_read_compressed_u16", |c| { + c.iter(|| { + optimized_read_compressed_u16_iter(&values_serialized_lengths_and_buffers); + }) + }); +} + +fn decode_shortu16_len_iter(values_serialized_lengths_and_buffers: &[(u16, usize, Vec)]) { + for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { + let (read_value, bytes_read) = decode_shortu16_len(black_box(buffer)).unwrap(); + assert_eq!(read_value, *value as usize, "Value mismatch for: {}", value); + assert_eq!( + bytes_read, *serialized_len, + "Offset mismatch for: {}", + value + ); + } +} + +fn read_compressed_u16_iter(values_serialized_lengths_and_buffers: &[(u16, usize, Vec)]) { + for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { + let mut offset = 0; + let read_value = read_compressed_u16(black_box(buffer), &mut offset).unwrap(); + assert_eq!(read_value, *value, "Value mismatch for: {}", value); + assert_eq!(offset, *serialized_len, "Offset mismatch for: {}", value); + } +} + +fn optimized_read_compressed_u16_iter( + values_serialized_lengths_and_buffers: &[(u16, usize, Vec)], +) { + for (value, serialized_len, buffer) in values_serialized_lengths_and_buffers.iter() { + let mut offset = 0; + let read_value = optimized_read_compressed_u16(black_box(buffer), &mut offset).unwrap(); + assert_eq!(read_value, *value, "Value mismatch for: {}", value); + assert_eq!(offset, *serialized_len, "Offset mismatch for: {}", value); + } +} + +criterion_group!(benches, bench_u16_parsing); +criterion_main!(benches); diff --git a/transaction-view/src/bytes.rs b/transaction-view/src/bytes.rs new file mode 100644 index 00000000000000..a67d8a2ddd8b35 --- /dev/null +++ b/transaction-view/src/bytes.rs @@ -0,0 +1,310 @@ +use crate::result::{Result, TransactionParsingError}; + +/// Check that the buffer has at least `len` bytes remaining starting at +/// `offset`. Returns Err if the buffer is too short. +/// +/// Assumptions: +/// - The current offset is not greater than `bytes.len()`. +#[inline(always)] +pub fn check_remaining(bytes: &[u8], offset: usize, len: usize) -> Result<()> { + if len > bytes.len().wrapping_sub(offset) { + Err(TransactionParsingError) + } else { + Ok(()) + } +} + +/// Check that the buffer has at least 1 byte remaining starting at `offset`. +/// Returns Err if the buffer is too short. +#[inline(always)] +pub fn read_byte(bytes: &[u8], offset: &mut usize) -> Result { + // Implicitly checks that the offset is within bounds, no need + // to call `check_remaining` explicitly here. + let value = bytes.get(*offset).copied().ok_or(TransactionParsingError); + *offset = offset.wrapping_add(1); + value +} + +/// Read a compressed u16 from `bytes` starting at `offset`. +/// If the buffer is too short or the encoding is invalid, return Err. +/// `offset` is updated to point to the byte after the compressed u16. +/// +/// Assumptions: +/// - The current offset is not greater than `bytes.len()`. +#[inline(always)] +pub fn read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { + let mut result = 0u16; + let mut shift = 0u16; + + for i in 0..3 { + // Implicitly checks that the offset is within bounds, no need + // to call check_remaining explicitly here. + let byte = *bytes + .get(offset.wrapping_add(i)) + .ok_or(TransactionParsingError)?; + // non-minimal encoding or overflow + if (i > 0 && byte == 0) || (i == 2 && byte > 3) { + return Err(TransactionParsingError); + } + result |= ((byte & 0x7F) as u16) << shift; + shift = shift.wrapping_add(7); + if byte & 0x80 == 0 { + *offset = offset.wrapping_add(i).wrapping_add(1); + return Ok(result); + } + } + + // if we reach here, it means that all 3 bytes were used + *offset = offset.wrapping_add(3); + Ok(result) +} + +/// Domain-specific optimization for reading a compressed u16. +/// The compressed u16's are only used for array-lengths in our transaction +/// format. The transaction packet has a maximum size of 1232 bytes. +/// This means that the maximum array length within a **valid** transaction is +/// 1232. This has a minimally encoded length of 2 bytes. +/// Although the encoding scheme allows for more, any arrays with this length +/// would be too large to fit in a packet. This function optimizes for this +/// case, and reads a maximum of 2 bytes. +/// If the buffer is too short or the encoding is invalid, return Err. +/// `offset` is updated to point to the byte after the compressed u16. +#[inline(always)] +pub fn optimized_read_compressed_u16(bytes: &[u8], offset: &mut usize) -> Result { + let mut result = 0u16; + + // First byte + let byte1 = *bytes.get(*offset).ok_or(TransactionParsingError)?; + result |= (byte1 & 0x7F) as u16; + if byte1 & 0x80 == 0 { + *offset = offset.wrapping_add(1); + return Ok(result); + } + + // Second byte + let byte2 = *bytes + .get(offset.wrapping_add(1)) + .ok_or(TransactionParsingError)?; + if byte2 == 0 || byte2 & 0x80 != 0 { + return Err(TransactionParsingError); // non-minimal encoding or overflow + } + result |= ((byte2 & 0x7F) as u16) << 7; + *offset = offset.wrapping_add(2); + + Ok(result) +} + +/// Update the `offset` to point to the byte after an array of length `len` and +/// of type `T`. If the buffer is too short, return Err. +/// +/// Assumptions: +/// 1. The current offset is not greater than `bytes.len()`. +/// 2. The size of `T` is small enough such that a usize will not overflow if +/// given the maximum array size (u16::MAX). +#[inline(always)] +pub fn offset_array_len(bytes: &[u8], offset: &mut usize, len: u16) -> Result<()> { + let array_len_bytes = usize::from(len).wrapping_mul(core::mem::size_of::()); + check_remaining(bytes, *offset, array_len_bytes)?; + *offset = offset.wrapping_add(array_len_bytes); + Ok(()) +} + +/// Update the `offset` to point t the byte after the `T`. +/// If the buffer is too short, return Err. +/// +/// Assumptions: +/// 1. The current offset is not greater than `bytes.len()`. +/// 2. The size of `T` is small enough such that a usize will not overflow. +#[inline(always)] +pub fn offset_type(bytes: &[u8], offset: &mut usize) -> Result<()> { + let type_size = core::mem::size_of::(); + check_remaining(bytes, *offset, type_size)?; + *offset = offset.wrapping_add(type_size); + Ok(()) +} + +#[cfg(test)] +mod tests { + use { + super::*, + bincode::{serialize_into, DefaultOptions, Options}, + solana_sdk::{packet::PACKET_DATA_SIZE, short_vec::ShortU16}, + }; + + #[test] + fn test_check_remaining() { + // Empty buffer checks + assert!(check_remaining(&[], 0, 0).is_ok()); + assert!(check_remaining(&[], 0, 1).is_err()); + + // Buffer with data checks + assert!(check_remaining(&[1, 2, 3], 0, 0).is_ok()); + assert!(check_remaining(&[1, 2, 3], 0, 1).is_ok()); + assert!(check_remaining(&[1, 2, 3], 0, 3).is_ok()); + assert!(check_remaining(&[1, 2, 3], 0, 4).is_err()); + + // Non-zero offset. + assert!(check_remaining(&[1, 2, 3], 1, 0).is_ok()); + assert!(check_remaining(&[1, 2, 3], 1, 1).is_ok()); + assert!(check_remaining(&[1, 2, 3], 1, 2).is_ok()); + assert!(check_remaining(&[1, 2, 3], 1, usize::MAX).is_err()); + } + + #[test] + fn test_read_byte() { + let bytes = [5, 6, 7]; + let mut offset = 0; + assert_eq!(read_byte(&bytes, &mut offset), Ok(5)); + assert_eq!(offset, 1); + assert_eq!(read_byte(&bytes, &mut offset), Ok(6)); + assert_eq!(offset, 2); + assert_eq!(read_byte(&bytes, &mut offset), Ok(7)); + assert_eq!(offset, 3); + assert!(read_byte(&bytes, &mut offset).is_err()); + } + + #[test] + fn test_read_compressed_u16() { + let mut buffer = [0u8; 1024]; + let options = DefaultOptions::new().with_fixint_encoding(); // Ensure fixed-int encoding + + // Test all possible u16 values + for value in 0..=u16::MAX { + let mut offset; + let short_u16 = ShortU16(value); + + // Serialize the value into the buffer + serialize_into(&mut buffer[..], &short_u16).expect("Serialization failed"); + + // Use bincode's size calculation to determine the length of the serialized data + let serialized_len = options + .serialized_size(&short_u16) + .expect("Failed to get serialized size"); + + // Reset offset + offset = 0; + + // Read the value back using unchecked_read_u16_compressed + let read_value = read_compressed_u16(&buffer, &mut offset); + + // Assert that the read value matches the original value + assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value); + + // Assert that the offset matches the serialized length + assert_eq!( + offset, serialized_len as usize, + "Offset mismatch for: {}", + value + ); + } + + // Test bounds. + // All 0s => 0 + assert_eq!(Ok(0), read_compressed_u16(&[0; 3], &mut 0)); + // Overflow + assert!(read_compressed_u16(&[0xFF, 0xFF, 0x04], &mut 0).is_err()); + assert_eq!( + read_compressed_u16(&[0xFF, 0xFF, 0x03], &mut 0), + Ok(u16::MAX) + ); + + // overflow errors + assert!(read_compressed_u16(&[u8::MAX; 1], &mut 0).is_err()); + assert!(read_compressed_u16(&[u8::MAX; 2], &mut 0).is_err()); + + // Minimal encoding checks + assert!(read_compressed_u16(&[0x81, 0x80, 0x00], &mut 0).is_err()); + } + + #[test] + fn test_optimized_read_compressed_u16() { + let mut buffer = [0u8; 1024]; + let options = DefaultOptions::new().with_fixint_encoding(); // Ensure fixed-int encoding + + // Test all possible u16 values under the packet length + for value in 0..=PACKET_DATA_SIZE as u16 { + let mut offset; + let short_u16 = ShortU16(value); + + // Serialize the value into the buffer + serialize_into(&mut buffer[..], &short_u16).expect("Serialization failed"); + + // Use bincode's size calculation to determine the length of the serialized data + let serialized_len = options + .serialized_size(&short_u16) + .expect("Failed to get serialized size"); + + // Reset offset + offset = 0; + + // Read the value back using unchecked_read_u16_compressed + let read_value = optimized_read_compressed_u16(&buffer, &mut offset); + + // Assert that the read value matches the original value + assert_eq!(read_value, Ok(value), "Value mismatch for: {}", value); + + // Assert that the offset matches the serialized length + assert_eq!( + offset, serialized_len as usize, + "Offset mismatch for: {}", + value + ); + } + + // Test bounds. + // All 0s => 0 + assert_eq!(Ok(0), optimized_read_compressed_u16(&[0; 3], &mut 0)); + // Overflow + assert!(optimized_read_compressed_u16(&[0xFF, 0xFF, 0x04], &mut 0).is_err()); + assert!(optimized_read_compressed_u16(&[0xFF, 0x80], &mut 0).is_err()); + + // overflow errors + assert!(optimized_read_compressed_u16(&[u8::MAX; 1], &mut 0).is_err()); + assert!(optimized_read_compressed_u16(&[u8::MAX; 2], &mut 0).is_err()); + + // Minimal encoding checks + assert!(optimized_read_compressed_u16(&[0x81, 0x00], &mut 0).is_err()); + } + + #[test] + fn test_offset_array_len() { + #[repr(C)] + struct MyStruct { + _a: u8, + _b: u8, + } + const _: () = assert!(core::mem::size_of::() == 2); + + // Test with a buffer that is too short + let bytes = [0u8; 1]; + let mut offset = 0; + assert!(offset_array_len::(&bytes, &mut offset, 1).is_err()); + + // Test with a buffer that is long enough + let bytes = [0u8; 4]; + let mut offset = 0; + assert!(offset_array_len::(&bytes, &mut offset, 2).is_ok()); + assert_eq!(offset, 4); + } + + #[test] + fn test_offset_type() { + #[repr(C)] + struct MyStruct { + _a: u8, + _b: u8, + } + const _: () = assert!(core::mem::size_of::() == 2); + + // Test with a buffer that is too short + let bytes = [0u8; 1]; + let mut offset = 0; + assert!(offset_type::(&bytes, &mut offset).is_err()); + + // Test with a buffer that is long enough + let bytes = [0u8; 4]; + let mut offset = 0; + assert!(offset_type::(&bytes, &mut offset).is_ok()); + assert_eq!(offset, 2); + } +} diff --git a/transaction-view/src/lib.rs b/transaction-view/src/lib.rs new file mode 100644 index 00000000000000..a16187f62ccd82 --- /dev/null +++ b/transaction-view/src/lib.rs @@ -0,0 +1,9 @@ +// Parsing helpers only need to be public for benchmarks. +#[cfg(feature = "dev-context-only-utils")] +#[allow(dead_code)] +pub mod bytes; +#[cfg(not(feature = "dev-context-only-utils"))] +#[allow(dead_code)] +mod bytes; + +pub mod result; diff --git a/transaction-view/src/result.rs b/transaction-view/src/result.rs new file mode 100644 index 00000000000000..1997a784b73650 --- /dev/null +++ b/transaction-view/src/result.rs @@ -0,0 +1,3 @@ +#[derive(Debug, PartialEq, Eq)] +pub struct TransactionParsingError; +pub type Result = core::result::Result; // no distinction between errors for now diff --git a/turbine/src/broadcast_stage.rs b/turbine/src/broadcast_stage.rs index c13d72ee46159a..cce43f4fa5aabd 100644 --- a/turbine/src/broadcast_stage.rs +++ b/turbine/src/broadcast_stage.rs @@ -20,7 +20,7 @@ use { solana_measure::measure::Measure, solana_metrics::{inc_new_counter_error, inc_new_counter_info}, solana_poh::poh_recorder::WorkingBankEntry, - solana_runtime::bank_forks::BankForks, + solana_runtime::{bank::MAX_LEADER_SCHEDULE_STAKES, bank_forks::BankForks}, solana_sdk::{ clock::Slot, pubkey::Pubkey, @@ -31,6 +31,7 @@ use { sendmmsg::{batch_send, SendPktsError}, socket::SocketAddrSpace, }, + static_assertions::const_assert_eq, std::{ collections::{HashMap, HashSet}, net::{SocketAddr, UdpSocket}, @@ -52,7 +53,8 @@ pub(crate) mod broadcast_utils; mod fail_entry_verification_broadcast_run; mod standard_broadcast_run; -const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8; +const_assert_eq!(CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, 5); +const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = MAX_LEADER_SCHEDULE_STAKES as usize; const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5); pub(crate) type RecordReceiver = Receiver<(Arc>, Option)>; diff --git a/turbine/src/broadcast_stage/standard_broadcast_run.rs b/turbine/src/broadcast_stage/standard_broadcast_run.rs index f108fc08226a6b..0ddbe1020f5f98 100644 --- a/turbine/src/broadcast_stage/standard_broadcast_run.rs +++ b/turbine/src/broadcast_stage/standard_broadcast_run.rs @@ -506,8 +506,14 @@ impl BroadcastRun for StandardBroadcastRun { } } -fn should_chain_merkle_shreds(_slot: Slot, cluster_type: ClusterType) -> bool { - cluster_type == ClusterType::Development +fn should_chain_merkle_shreds(slot: Slot, cluster_type: ClusterType) -> bool { + match cluster_type { + ClusterType::Development => true, + ClusterType::Devnet => false, + ClusterType::MainnetBeta => false, + // Roll out chained Merkle shreds to ~5% of testnet. + ClusterType::Testnet => slot % 19 == 1, + } } #[cfg(test)] diff --git a/turbine/src/retransmit_stage.rs b/turbine/src/retransmit_stage.rs index c4c7a751ab24ce..32537db6b9abe9 100644 --- a/turbine/src/retransmit_stage.rs +++ b/turbine/src/retransmit_stage.rs @@ -19,12 +19,16 @@ use { solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_rpc_client_api::response::SlotUpdate, - solana_runtime::{bank::Bank, bank_forks::BankForks}, + solana_runtime::{ + bank::{Bank, MAX_LEADER_SCHEDULE_STAKES}, + bank_forks::BankForks, + }, solana_sdk::{clock::Slot, pubkey::Pubkey, timing::timestamp}, solana_streamer::{ sendmmsg::{multi_target_send, SendPktsError}, socket::SocketAddrSpace, }, + static_assertions::const_assert_eq, std::{ collections::HashMap, iter::repeat, @@ -47,7 +51,8 @@ const DEDUPER_RESET_CYCLE: Duration = Duration::from_secs(5 * 60); // Minimum number of shreds to use rayon parallel iterators. const PAR_ITER_MIN_NUM_SHREDS: usize = 2; -const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8; +const_assert_eq!(CLUSTER_NODES_CACHE_NUM_EPOCH_CAP, 5); +const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = MAX_LEADER_SCHEDULE_STAKES as usize; const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5); #[derive(Default)] diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml index 8528fef348c649..46a020c661280d 100644 --- a/unified-scheduler-pool/Cargo.toml +++ b/unified-scheduler-pool/Cargo.toml @@ -18,9 +18,9 @@ log = { workspace = true } qualifier_attr = { workspace = true } scopeguard = { workspace = true } solana-ledger = { workspace = true } -solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-timings = { workspace = true } solana-unified-scheduler-logic = { workspace = true } vec_extract_if_polyfill = { workspace = true } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs index c57217285f4762..bf1dfe886e2430 100644 --- a/unified-scheduler-pool/src/lib.rs +++ b/unified-scheduler-pool/src/lib.rs @@ -20,7 +20,6 @@ use { solana_ledger::blockstore_processor::{ execute_batch, TransactionBatchWithIndexes, TransactionStatusSender, }, - solana_program_runtime::timings::ExecuteTimings, solana_runtime::{ bank::Bank, installed_scheduler_pool::{ @@ -36,6 +35,7 @@ use { pubkey::Pubkey, transaction::{Result, SanitizedTransaction, TransactionError}, }, + solana_timings::ExecuteTimings, solana_unified_scheduler_logic::{SchedulingStateMachine, Task, UsageQueue}, std::{ fmt::Debug, @@ -1462,7 +1462,6 @@ mod tests { super::*, crate::sleepless_testing, assert_matches::assert_matches, - solana_program_runtime::timings::ExecuteTimingType, solana_runtime::{ bank::Bank, bank_forks::BankForks, @@ -1477,7 +1476,11 @@ mod tests { system_transaction, transaction::{SanitizedTransaction, TransactionError}, }, - std::{sync::Arc, thread::JoinHandle}, + solana_timings::ExecuteTimingType, + std::{ + sync::{Arc, RwLock}, + thread::JoinHandle, + }, }; #[derive(Debug)] @@ -1756,7 +1759,7 @@ mod tests { .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let context = SchedulingContext::new(bank.clone()); @@ -1824,7 +1827,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let context = SchedulingContext::new(bank.clone()); @@ -1876,7 +1879,7 @@ mod tests { .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let context = SchedulingContext::new(bank.clone()); @@ -1957,7 +1960,7 @@ mod tests { )); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = SchedulerPool::, _>::new( None, @@ -2050,7 +2053,7 @@ mod tests { } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = SchedulerPool::, _>::new( None, @@ -2197,12 +2200,12 @@ mod tests { assert!(!child_bank.has_installed_scheduler()); } - fn setup_dummy_fork_graph(bank: Bank) -> Arc { + fn setup_dummy_fork_graph(bank: Bank) -> (Arc, Arc>) { let slot = bank.slot(); let bank_fork = BankForks::new_rw_arc(bank); let bank = bank_fork.read().unwrap().get(slot).unwrap(); - bank.set_fork_graph_in_program_cache(bank_fork); - bank + bank.set_fork_graph_in_program_cache(Arc::downgrade(&bank_fork)); + (bank, bank_fork) } #[test] @@ -2221,7 +2224,7 @@ mod tests { genesis_config.hash(), )); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = DefaultSchedulerPool::new_dyn(None, None, None, None, ignored_prioritization_fee_cache); @@ -2255,7 +2258,7 @@ mod tests { .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool_raw = DefaultSchedulerPool::do_new( @@ -2384,7 +2387,7 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); // Use 2 transactions with different timings to deliberately cover the two code paths of // notifying panics in the handler threads, taken conditionally depending on whether the @@ -2466,7 +2469,7 @@ mod tests { } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = SchedulerPool::, _>::new( None, @@ -2558,7 +2561,7 @@ mod tests { )); let bank = Bank::new_for_tests(&genesis_config); - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); let pool = SchedulerPool::, _>::new_dyn( None, @@ -2619,7 +2622,7 @@ mod tests { // Create two banks for two contexts let bank0 = Bank::new_for_tests(&genesis_config); - let bank0 = setup_dummy_fork_graph(bank0); + let bank0 = setup_dummy_fork_graph(bank0).0; let bank1 = Arc::new(Bank::new_from_parent( bank0.clone(), &Pubkey::default(), @@ -2817,7 +2820,7 @@ mod tests { slot.checked_add(1).unwrap(), ); } - let bank = setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = setup_dummy_fork_graph(bank); let context = SchedulingContext::new(bank.clone()); let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); @@ -2888,7 +2891,7 @@ mod tests { .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); - let bank = &setup_dummy_fork_graph(bank); + let (bank, _bank_forks) = &setup_dummy_fork_graph(bank); let mut tx = system_transaction::transfer( mint_keypair, diff --git a/validator/src/cli.rs b/validator/src/cli.rs index f32dd9f5cae8e1..e1665383e777e3 100644 --- a/validator/src/cli.rs +++ b/validator/src/cli.rs @@ -47,6 +47,7 @@ use { solana_send_transaction_service::send_transaction_service::{ self, MAX_BATCH_SEND_RATE_MS, MAX_TRANSACTION_BATCH_SIZE, }, + solana_streamer::quic::DEFAULT_QUIC_ENDPOINTS, solana_tpu_client::tpu_client::DEFAULT_TPU_CONNECTION_POOL_SIZE, solana_unified_scheduler_pool::DefaultSchedulerPool, std::{path::PathBuf, str::FromStr}, @@ -72,8 +73,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { return App::new(crate_name!()) .about(crate_description!()) .version(version) - .setting(AppSettings::VersionlessSubcommands) - .setting(AppSettings::InferSubcommands) + .global_setting(AppSettings::ColoredHelp) + .global_setting(AppSettings::InferSubcommands) + .global_setting(AppSettings::UnifiedHelpMessage) + .global_setting(AppSettings::VersionlessSubcommands) .arg( Arg::with_name(SKIP_SEED_PHRASE_VALIDATION_ARG.name) .long(SKIP_SEED_PHRASE_VALIDATION_ARG.long) @@ -499,20 +502,21 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .long("no-incremental-snapshots") .takes_value(false) .help("Disable incremental snapshots") - .long_help( - "Disable incremental snapshots by setting this flag. When enabled, \ - --snapshot-interval-slots will set the incremental snapshot interval. To set \ - the full snapshot interval, use --full-snapshot-interval-slots.", - ), ) .arg( - Arg::with_name("incremental_snapshot_interval_slots") - .long("incremental-snapshot-interval-slots") - .alias("snapshot-interval-slots") + Arg::with_name("snapshot_interval_slots") + .long("snapshot-interval-slots") + .alias("incremental-snapshot-interval-slots") .value_name("NUMBER") .takes_value(true) .default_value(&default_args.incremental_snapshot_archive_interval_slots) - .help("Number of slots between generating snapshots, 0 to disable snapshots"), + .help("Number of slots between generating snapshots") + .long_help( + "Number of slots between generating snapshots. \ + If incremental snapshots are enabled, this sets the incremental snapshot interval. \ + If incremental snapshots are disabled, this sets the full snapshot interval. \ + Setting this to 0 disables all snapshots.", + ), ) .arg( Arg::with_name("full_snapshot_interval_slots") @@ -520,9 +524,10 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .value_name("NUMBER") .takes_value(true) .default_value(&default_args.full_snapshot_archive_interval_slots) - .help( + .help("Number of slots between generating full snapshots") + .long_help( "Number of slots between generating full snapshots. Must be a multiple of the \ - incremental snapshot interval.", + incremental snapshot interval. Only used when incremental snapshots are enabled.", ), ) .arg( @@ -901,6 +906,17 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .hidden(hidden_unless_forced()) .help("Controls the rate of the clients connections per IpAddr per minute."), ) + .arg( + Arg::with_name("num_quic_endpoints") + .long("num-quic-endpoints") + .takes_value(true) + .default_value(&default_args.num_quic_endpoints) + .validator(is_parsable::) + .hidden(hidden_unless_forced()) + .help("The number of QUIC endpoints used for TPU and TPU-Forward. It can be increased to \ + increase network ingest throughput, at the expense of higher CPU and general \ + validator load."), + ) .arg( Arg::with_name("staked_nodes_overrides") .long("staked-nodes-overrides") @@ -1178,6 +1194,14 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .multiple(true) .help("Specify the configuration file for the Geyser plugin."), ) + .arg( + Arg::with_name("geyser_plugin_always_enabled") + .long("geyser-plugin-always-enabled") + .value_name("BOOLEAN") + .takes_value(true) + .default_value("false") + .help("Еnable Geyser interface even if no Geyser configs are specified."), + ) .arg( Arg::with_name("snapshot_archive_format") .long("snapshot-archive-format") @@ -1529,6 +1553,16 @@ pub fn app<'a>(version: &'a str, default_args: &'a DefaultArgs) -> App<'a, 'a> { .possible_values(BlockProductionMethod::cli_names()) .help(BlockProductionMethod::cli_message()), ) + .arg( + Arg::with_name("disable_block_production_forwarding") + .long("disable-block-production-forwarding") + .requires("staked_nodes_overrides") + .takes_value(false) + .help("Disable forwarding of non-vote transactions in block production. \ + By default, forwarding is already disabled, it is enabled by setting \ + \"staked-nodes-overrides\". This flag can be used to disable forwarding \ + even when \"staked-nodes-overrides\" is set."), + ) .arg( Arg::with_name("unified_scheduler_handler_threads") .long("unified-scheduler-handler-threads") @@ -1958,16 +1992,6 @@ fn deprecated_arguments() -> Vec { (@into-option $v:expr) => { Some($v) }; } - add_arg!(Arg::with_name("accounts_db_caching_enabled").long("accounts-db-caching-enabled")); - add_arg!( - Arg::with_name("accounts_db_index_hashing") - .long("accounts-db-index-hashing") - .help( - "Enables the use of the index in hash calculation in \ - AccountsHashVerifier/Accounts Background Service.", - ), - usage_warning: "The accounts hash is only calculated without using the index.", - ); add_arg!( Arg::with_name("accounts_db_skip_shrink") .long("accounts-db-skip-shrink") @@ -2037,41 +2061,10 @@ fn deprecated_arguments() -> Vec { .long("enable-quic-servers"), usage_warning: "The quic server is now enabled by default.", ); - add_arg!( - Arg::with_name("halt_on_known_validators_accounts_hash_mismatch") - .alias("halt-on-trusted-validators-accounts-hash-mismatch") - .long("halt-on-known-validators-accounts-hash-mismatch") - .requires("known_validators") - .takes_value(false) - .help( - "Abort the validator if a bank hash mismatch is detected within known validator \ - set" - ), - ); - add_arg!(Arg::with_name("incremental_snapshots") - .long("incremental-snapshots") - .takes_value(false) - .conflicts_with("no_incremental_snapshots") - .help("Enable incremental snapshots") - .long_help( - "Enable incremental snapshots by setting this flag. When enabled, \ - --snapshot-interval-slots will set the incremental snapshot interval. To set the \ - full snapshot interval, use --full-snapshot-interval-slots.", - )); add_arg!(Arg::with_name("minimal_rpc_api") .long("minimal-rpc-api") .takes_value(false) .help("Only expose the RPC methods required to serve snapshots to other nodes")); - add_arg!( - Arg::with_name("no_accounts_db_index_hashing") - .long("no-accounts-db-index-hashing") - .help( - "This is obsolete. See --accounts-db-index-hashing. \ - Disables the use of the index in hash calculation in \ - AccountsHashVerifier/Accounts Background Service.", - ), - usage_warning: "The accounts hash is only calculated without using the index.", - ); add_arg!( Arg::with_name("no_check_vote_account") .long("no-check-vote-account") @@ -2234,6 +2227,7 @@ pub struct DefaultArgs { pub accounts_shrink_ratio: String, pub tpu_connection_pool_size: String, pub tpu_max_connections_per_ipaddr_per_minute: String, + pub num_quic_endpoints: String, // Exit subcommand pub exit_min_idle_time: String, @@ -2325,6 +2319,7 @@ impl DefaultArgs { tpu_connection_pool_size: DEFAULT_TPU_CONNECTION_POOL_SIZE.to_string(), tpu_max_connections_per_ipaddr_per_minute: DEFAULT_MAX_CONNECTIONS_PER_IPADDR_PER_MINUTE.to_string(), + num_quic_endpoints: DEFAULT_QUIC_ENDPOINTS.to_string(), rpc_max_request_body_size: MAX_REQUEST_BODY_SIZE.to_string(), exit_min_idle_time: "10".to_string(), exit_max_delinquent_stake: "5".to_string(), diff --git a/validator/src/main.rs b/validator/src/main.rs index e8df7beb8b42b5..45c35a43ef5392 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -957,10 +957,10 @@ pub fn main() { .value_of("staked_nodes_overrides") .map(str::to_string); let staked_nodes_overrides = Arc::new(RwLock::new( - match staked_nodes_overrides_path { + match &staked_nodes_overrides_path { None => StakedNodesOverrides::default(), - Some(p) => load_staked_nodes_overrides(&p).unwrap_or_else(|err| { - error!("Failed to load stake-nodes-overrides from {}: {}", &p, err); + Some(p) => load_staked_nodes_overrides(p).unwrap_or_else(|err| { + error!("Failed to load stake-nodes-overrides from {}: {}", p, err); clap::Error::with_description( "Failed to load configuration of stake-nodes-overrides argument", clap::ErrorKind::InvalidValue, @@ -1303,7 +1303,7 @@ pub fn main() { .collect(), ) } else { - None + value_t_or_exit!(matches, "geyser_plugin_always_enabled", bool).then(Vec::new) }; let starting_with_geyser_plugins: bool = on_start_geyser_plugin_config_files.is_some(); @@ -1665,27 +1665,42 @@ pub fn main() { }) }); - let incremental_snapshot_interval_slots = - value_t_or_exit!(matches, "incremental_snapshot_interval_slots", u64); - let (full_snapshot_archive_interval_slots, incremental_snapshot_archive_interval_slots) = - if incremental_snapshot_interval_slots > 0 { - if !matches.is_present("no_incremental_snapshots") { - ( - value_t_or_exit!(matches, "full_snapshot_interval_slots", u64), - incremental_snapshot_interval_slots, - ) - } else { - ( - incremental_snapshot_interval_slots, - DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, - ) - } - } else { + let (full_snapshot_archive_interval_slots, incremental_snapshot_archive_interval_slots) = match ( + !matches.is_present("no_incremental_snapshots"), + value_t_or_exit!(matches, "snapshot_interval_slots", u64), + ) { + (_, 0) => { + // snapshots are disabled ( DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, ) - }; + } + (true, incremental_snapshot_interval_slots) => { + // incremental snapshots are enabled + // use --snapshot-interval-slots for the incremental snapshot interval + ( + value_t_or_exit!(matches, "full_snapshot_interval_slots", u64), + incremental_snapshot_interval_slots, + ) + } + (false, full_snapshot_interval_slots) => { + // incremental snapshots are *disabled* + // use --snapshot-interval-slots for the *full* snapshot interval + // also warn if --full-snapshot-interval-slots was specified + if matches.occurrences_of("full_snapshot_interval_slots") > 0 { + warn!( + "Incremental snapshots are disabled, yet --full-snapshot-interval-slots was specified! \ + Note that --full-snapshot-interval-slots is *ignored* when incremental snapshots are disabled. \ + Use --snapshot-interval-slots instead.", + ); + } + ( + full_snapshot_interval_slots, + DISABLED_SNAPSHOT_ARCHIVE_INTERVAL, + ) + } + }; validator_config.snapshot_config = SnapshotConfig { usage: if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { @@ -1712,29 +1727,30 @@ pub fn main() { incremental_snapshot_archive_interval_slots, ); + info!( + "Snapshot configuration: full snapshot interval: {} slots, incremental snapshot interval: {} slots", + if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { + "disabled".to_string() + } else { + full_snapshot_archive_interval_slots.to_string() + }, + if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { + "disabled".to_string() + } else { + incremental_snapshot_archive_interval_slots.to_string() + }, + ); + if !is_snapshot_config_valid( &validator_config.snapshot_config, validator_config.accounts_hash_interval_slots, ) { eprintln!( "Invalid snapshot configuration provided: snapshot intervals are incompatible. \ - \n\t- full snapshot interval MUST be a multiple of incremental snapshot interval (if \ - enabled)\ - \n\t- full snapshot interval MUST be larger than incremental snapshot \ - interval (if enabled)\ - \nSnapshot configuration values:\ - \n\tfull snapshot interval: {}\ - \n\tincremental snapshot interval: {}", - if full_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { - "disabled".to_string() - } else { - full_snapshot_archive_interval_slots.to_string() - }, - if incremental_snapshot_archive_interval_slots == DISABLED_SNAPSHOT_ARCHIVE_INTERVAL { - "disabled".to_string() - } else { - incremental_snapshot_archive_interval_slots.to_string() - }, + \n\t- full snapshot interval MUST be a multiple of incremental snapshot interval \ + (if enabled) \ + \n\t- full snapshot interval MUST be larger than incremental snapshot interval \ + (if enabled)", ); exit(1); } @@ -1767,6 +1783,10 @@ pub fn main() { BlockProductionMethod ) .unwrap_or_default(); + validator_config.enable_block_production_forwarding = staked_nodes_overrides_path + .as_ref() + .map(|_| !matches.is_present("disable_block_production_forwarding")) + .unwrap_or_default(); validator_config.unified_scheduler_handler_threads = value_t!(matches, "unified_scheduler_handler_threads", usize).ok(); @@ -1926,6 +1946,7 @@ pub fn main() { }) }); + let num_quic_endpoints = value_t_or_exit!(matches, "num_quic_endpoints", NonZeroUsize); let node_config = NodeConfig { gossip_addr, port_range: dynamic_port_range, @@ -1933,6 +1954,7 @@ pub fn main() { public_tpu_addr, public_tpu_forwards_addr, num_tvu_sockets: tvu_receive_threads, + num_quic_endpoints, }; let cluster_entrypoints = entrypoint_addrs diff --git a/vote/src/vote_account.rs b/vote/src/vote_account.rs index 11df8c734d1c7b..14cc788cca13d9 100644 --- a/vote/src/vote_account.rs +++ b/vote/src/vote_account.rs @@ -11,6 +11,7 @@ use { cmp::Ordering, collections::{hash_map::Entry, HashMap}, iter::FromIterator, + mem, sync::{Arc, OnceLock}, }, thiserror::Error, @@ -81,8 +82,8 @@ impl VoteAccount { } /// VoteState.node_pubkey of this vote-account. - pub fn node_pubkey(&self) -> Option { - Some(self.vote_state().ok()?.node_pubkey) + pub fn node_pubkey(&self) -> Option<&Pubkey> { + self.vote_state().ok().map(|s| &s.node_pubkey) } } @@ -103,7 +104,7 @@ impl VoteAccounts { .values() .filter(|(stake, _)| *stake != 0u64) .filter_map(|(stake, vote_account)| { - Some((vote_account.node_pubkey()?, stake)) + Some((*vote_account.node_pubkey()?, stake)) }) .into_grouping_map() .aggregate(|acc, _node_pubkey, stake| { @@ -144,11 +145,44 @@ impl VoteAccounts { Some(vote_account) } - pub fn insert(&mut self, pubkey: Pubkey, (stake, vote_account): (u64, VoteAccount)) { - self.add_node_stake(stake, &vote_account); + pub fn insert( + &mut self, + pubkey: Pubkey, + new_vote_account: VoteAccount, + calculate_stake: impl FnOnce() -> u64, + ) -> Option { let vote_accounts = Arc::make_mut(&mut self.vote_accounts); - if let Some((stake, vote_account)) = vote_accounts.insert(pubkey, (stake, vote_account)) { - self.sub_node_stake(stake, &vote_account); + match vote_accounts.entry(pubkey) { + Entry::Occupied(mut entry) => { + // This is an upsert, we need to update the vote state and move the stake if needed. + let (stake, old_vote_account) = entry.get_mut(); + + if let Some(staked_nodes) = self.staked_nodes.get_mut() { + let old_node_pubkey = old_vote_account.node_pubkey(); + let new_node_pubkey = new_vote_account.node_pubkey(); + if new_node_pubkey != old_node_pubkey { + // The node keys have changed, we move the stake from the old node to the + // new one + Self::do_sub_node_stake(staked_nodes, *stake, old_node_pubkey); + Self::do_add_node_stake(staked_nodes, *stake, new_node_pubkey.copied()); + } + } + + // Update the vote state + Some(mem::replace(old_vote_account, new_vote_account)) + } + Entry::Vacant(entry) => { + // This is a new vote account. We don't know the stake yet, so we need to compute it. + let (stake, vote_account) = entry.insert((calculate_stake(), new_vote_account)); + if let Some(staked_nodes) = self.staked_nodes.get_mut() { + Self::do_add_node_stake( + staked_nodes, + *stake, + vote_account.node_pubkey().copied(), + ); + } + None + } } } @@ -182,37 +216,58 @@ impl VoteAccounts { } fn add_node_stake(&mut self, stake: u64, vote_account: &VoteAccount) { - if stake == 0u64 { - return; - } let Some(staked_nodes) = self.staked_nodes.get_mut() else { return; }; - if let Some(node_pubkey) = vote_account.node_pubkey() { + + VoteAccounts::do_add_node_stake(staked_nodes, stake, vote_account.node_pubkey().copied()); + } + + fn do_add_node_stake( + staked_nodes: &mut Arc>, + stake: u64, + node_pubkey: Option, + ) { + if stake == 0u64 { + return; + } + + node_pubkey.map(|node_pubkey| { Arc::make_mut(staked_nodes) .entry(node_pubkey) .and_modify(|s| *s += stake) - .or_insert(stake); - } + .or_insert(stake) + }); } fn sub_node_stake(&mut self, stake: u64, vote_account: &VoteAccount) { - if stake == 0u64 { - return; - } let Some(staked_nodes) = self.staked_nodes.get_mut() else { return; }; - if let Some(node_pubkey) = vote_account.node_pubkey() { - let Entry::Occupied(mut entry) = Arc::make_mut(staked_nodes).entry(node_pubkey) else { - panic!("this should not happen!"); - }; - match entry.get().cmp(&stake) { + + VoteAccounts::do_sub_node_stake(staked_nodes, stake, vote_account.node_pubkey()); + } + + fn do_sub_node_stake( + staked_nodes: &mut Arc>, + stake: u64, + node_pubkey: Option<&Pubkey>, + ) { + if stake == 0u64 { + return; + } + + if let Some(node_pubkey) = node_pubkey { + let staked_nodes = Arc::make_mut(staked_nodes); + let current_stake = staked_nodes + .get_mut(node_pubkey) + .expect("this should not happen"); + match (*current_stake).cmp(&stake) { Ordering::Less => panic!("subtraction value exceeds node's stake"), Ordering::Equal => { - entry.remove_entry(); + staked_nodes.remove(node_pubkey); } - Ordering::Greater => *entry.get_mut() -= stake, + Ordering::Greater => *current_stake -= stake, } } } @@ -388,7 +443,7 @@ mod tests { { if let Some(node_pubkey) = vote_account.node_pubkey() { staked_nodes - .entry(node_pubkey) + .entry(*node_pubkey) .and_modify(|s| *s += *stake) .or_insert(*stake); } @@ -488,7 +543,7 @@ mod tests { let mut vote_accounts = VoteAccounts::default(); // Add vote accounts. for (k, (pubkey, (stake, vote_account))) in accounts.iter().enumerate() { - vote_accounts.insert(*pubkey, (*stake, vote_account.clone())); + vote_accounts.insert(*pubkey, vote_account.clone(), || *stake); if (k + 1) % 128 == 0 { assert_eq!( staked_nodes(&accounts[..k + 1]), @@ -532,6 +587,90 @@ mod tests { assert!(vote_accounts.staked_nodes.get().unwrap().is_empty()); } + #[test] + fn test_staked_nodes_update() { + let mut vote_accounts = VoteAccounts::default(); + + let mut rng = rand::thread_rng(); + let pubkey = Pubkey::new_unique(); + let node_pubkey = Pubkey::new_unique(); + let (account1, _) = new_rand_vote_account(&mut rng, Some(node_pubkey)); + let vote_account1 = VoteAccount::try_from(account1).unwrap(); + + // first insert + let ret = vote_accounts.insert(pubkey, vote_account1.clone(), || 42); + assert_eq!(ret, None); + assert_eq!(vote_accounts.get_delegated_stake(&pubkey), 42); + assert_eq!(vote_accounts.staked_nodes().get(&node_pubkey), Some(&42)); + + // update with unchanged state + let ret = vote_accounts.insert(pubkey, vote_account1.clone(), || { + panic!("should not be called") + }); + assert_eq!(ret, Some(vote_account1.clone())); + assert_eq!(vote_accounts.get(&pubkey), Some(&vote_account1)); + // stake is unchanged + assert_eq!(vote_accounts.get_delegated_stake(&pubkey), 42); + assert_eq!(vote_accounts.staked_nodes().get(&node_pubkey), Some(&42)); + + // update with changed state, same node pubkey + let (account2, _) = new_rand_vote_account(&mut rng, Some(node_pubkey)); + let vote_account2 = VoteAccount::try_from(account2).unwrap(); + let ret = vote_accounts.insert(pubkey, vote_account2.clone(), || { + panic!("should not be called") + }); + assert_eq!(ret, Some(vote_account1.clone())); + assert_eq!(vote_accounts.get(&pubkey), Some(&vote_account2)); + // stake is unchanged + assert_eq!(vote_accounts.get_delegated_stake(&pubkey), 42); + assert_eq!(vote_accounts.staked_nodes().get(&node_pubkey), Some(&42)); + + // update with new node pubkey, stake must be moved + let new_node_pubkey = Pubkey::new_unique(); + let (account3, _) = new_rand_vote_account(&mut rng, Some(new_node_pubkey)); + let vote_account3 = VoteAccount::try_from(account3).unwrap(); + let ret = vote_accounts.insert(pubkey, vote_account3.clone(), || { + panic!("should not be called") + }); + assert_eq!(ret, Some(vote_account2.clone())); + assert_eq!(vote_accounts.staked_nodes().get(&node_pubkey), None); + assert_eq!( + vote_accounts.staked_nodes().get(&new_node_pubkey), + Some(&42) + ); + } + + #[test] + fn test_staked_nodes_zero_stake() { + let mut vote_accounts = VoteAccounts::default(); + + let mut rng = rand::thread_rng(); + let pubkey = Pubkey::new_unique(); + let node_pubkey = Pubkey::new_unique(); + let (account1, _) = new_rand_vote_account(&mut rng, Some(node_pubkey)); + let vote_account1 = VoteAccount::try_from(account1).unwrap(); + + // we call this here to initialize VoteAccounts::staked_nodes which is a OnceLock + assert!(vote_accounts.staked_nodes().is_empty()); + let ret = vote_accounts.insert(pubkey, vote_account1.clone(), || 0); + assert_eq!(ret, None); + assert_eq!(vote_accounts.get_delegated_stake(&pubkey), 0); + // ensure that we didn't add a 0 stake entry to staked_nodes + assert_eq!(vote_accounts.staked_nodes().get(&node_pubkey), None); + + // update with new node pubkey, stake is 0 and should remain 0 + let new_node_pubkey = Pubkey::new_unique(); + let (account2, _) = new_rand_vote_account(&mut rng, Some(new_node_pubkey)); + let vote_account2 = VoteAccount::try_from(account2).unwrap(); + let ret = vote_accounts.insert(pubkey, vote_account2.clone(), || { + panic!("should not be called") + }); + assert_eq!(ret, Some(vote_account1)); + assert_eq!(vote_accounts.get_delegated_stake(&pubkey), 0); + assert_eq!(vote_accounts.staked_nodes().get(&node_pubkey), None); + assert_eq!(vote_accounts.staked_nodes().get(&new_node_pubkey), None); + } + // Asserts that returned staked-nodes are copy-on-write references. #[test] fn test_staked_nodes_cow() { @@ -540,20 +679,20 @@ mod tests { // Add vote accounts. let mut vote_accounts = VoteAccounts::default(); for (pubkey, (stake, vote_account)) in (&mut accounts).take(1024) { - vote_accounts.insert(pubkey, (stake, vote_account)); + vote_accounts.insert(pubkey, vote_account, || stake); } let staked_nodes = vote_accounts.staked_nodes(); let (pubkey, (more_stake, vote_account)) = accounts.find(|(_, (stake, _))| *stake != 0).unwrap(); - let node_pubkey = vote_account.node_pubkey().unwrap(); - vote_accounts.insert(pubkey, (more_stake, vote_account)); + let node_pubkey = *vote_account.node_pubkey().unwrap(); + vote_accounts.insert(pubkey, vote_account, || more_stake); assert_ne!(staked_nodes, vote_accounts.staked_nodes()); assert_eq!( vote_accounts.staked_nodes()[&node_pubkey], more_stake + staked_nodes.get(&node_pubkey).copied().unwrap_or_default() ); for (pubkey, stake) in vote_accounts.staked_nodes().iter() { - if *pubkey != node_pubkey { + if pubkey != &node_pubkey { assert_eq!(*stake, staked_nodes[pubkey]); } else { assert_eq!( @@ -572,7 +711,7 @@ mod tests { // Add vote accounts. let mut vote_accounts = VoteAccounts::default(); for (pubkey, (stake, vote_account)) in (&mut accounts).take(1024) { - vote_accounts.insert(pubkey, (stake, vote_account)); + vote_accounts.insert(pubkey, vote_account, || stake); } let vote_accounts_hashmap = Arc::::from(&vote_accounts); assert_eq!(vote_accounts_hashmap, vote_accounts.vote_accounts); @@ -582,7 +721,7 @@ mod tests { )); let (pubkey, (more_stake, vote_account)) = accounts.find(|(_, (stake, _))| *stake != 0).unwrap(); - vote_accounts.insert(pubkey, (more_stake, vote_account.clone())); + vote_accounts.insert(pubkey, vote_account.clone(), || more_stake); assert!(!Arc::ptr_eq( &vote_accounts_hashmap, &vote_accounts.vote_accounts diff --git a/wen-restart/Cargo.toml b/wen-restart/Cargo.toml index 57f3451263cabd..25823c9d777855 100644 --- a/wen-restart/Cargo.toml +++ b/wen-restart/Cargo.toml @@ -19,11 +19,10 @@ rayon = { workspace = true } solana-entry = { workspace = true } solana-gossip = { workspace = true } solana-ledger = { workspace = true } -solana-logger = { workspace = true } solana-program = { workspace = true } -solana-program-runtime = { workspace = true } solana-runtime = { workspace = true } solana-sdk = { workspace = true } +solana-timings = { workspace = true } solana-vote-program = { workspace = true } [dev-dependencies] @@ -32,6 +31,7 @@ rand = { workspace = true } serial_test = { workspace = true } solana-accounts-db = { workspace = true } solana-entry = { workspace = true } +solana-logger = { workspace = true } solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } solana-streamer = { workspace = true } tempfile = { workspace = true } diff --git a/wen-restart/proto/wen_restart.proto b/wen-restart/proto/wen_restart.proto index ef2b7e7346ed5f..e3fc0743ef5dc8 100644 --- a/wen-restart/proto/wen_restart.proto +++ b/wen-restart/proto/wen_restart.proto @@ -6,8 +6,7 @@ enum State { LAST_VOTED_FORK_SLOTS = 1; HEAVIEST_FORK = 2; GENERATE_SNAPSHOT = 3; - WAITING_FOR_SUPERMAJORITY = 4; - DONE = 5; + DONE = 4; } message LastVotedForkSlotsRecord { diff --git a/wen-restart/src/wen_restart.rs b/wen-restart/src/wen_restart.rs index 71b8ea027b80e9..5e794ce2f43560 100644 --- a/wen-restart/src/wen_restart.rs +++ b/wen-restart/src/wen_restart.rs @@ -28,7 +28,6 @@ use { leader_schedule_cache::LeaderScheduleCache, }, solana_program::{clock::Slot, hash::Hash}, - solana_program_runtime::timings::ExecuteTimings, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, @@ -42,6 +41,7 @@ use { }, }, solana_sdk::{shred_version::compute_shred_version, timing::timestamp}, + solana_timings::ExecuteTimings, solana_vote_program::vote_state::VoteTransaction, std::{ collections::{HashMap, HashSet}, @@ -68,8 +68,8 @@ const REPAIR_THRESHOLD: f64 = 0.42; const HEAVIEST_FORK_THRESHOLD_DELTA: f64 = 0.38; // We allow at most 5% of the stake to disagree with us. const HEAVIEST_FORK_DISAGREE_THRESHOLD_PERCENT: f64 = 5.0; -// We update HeaviestFork every 30 minutes or when we can exit. -const HEAVIEST_REFRESH_INTERVAL_IN_SECONDS: u64 = 1800; +// We update HeaviestFork every 5 minutes at least. +const HEAVIEST_REFRESH_INTERVAL_IN_SECONDS: u64 = 300; #[derive(Debug, PartialEq)] pub enum WenRestartError { @@ -82,6 +82,7 @@ pub enum WenRestartError { FutureSnapshotExists(Slot, Slot, String), GenerateSnapshotWhenOneExists(Slot, String), MalformedLastVotedForkSlotsProtobuf(Option), + MalformedProgress(RestartState, String), MissingLastVotedForkSlots, MissingFullSnapshot(String), NotEnoughStakeAgreeingWithUs(Slot, Hash, HashMap<(Slot, Hash), u64>), @@ -135,6 +136,9 @@ impl std::fmt::Display for WenRestartError { WenRestartError::MalformedLastVotedForkSlotsProtobuf(record) => { write!(f, "Malformed last voted fork slots protobuf: {:?}", record) } + WenRestartError::MalformedProgress(state, missing) => { + write!(f, "Malformed progress: {:?} missing {}", state, missing) + } WenRestartError::MissingLastVotedForkSlots => { write!(f, "Missing last voted fork slots") } @@ -462,7 +466,7 @@ pub(crate) fn generate_snapshot( // slot new_root_slot is less than the the current highest full_snapshot_slot, that means the // locally rooted full_snapshot_slot will be rolled back. this requires human inspection。 // - // In even rarer cases, the selected slot might be the last full snapshot slot. We could + // In even rarer cases, the selected slot might be the latest full snapshot slot. We could // just re-generate a new snapshot to make sure the snapshot is up to date after hard fork, // but for now we just return an error to keep the code simple. check_slot_smaller_than_intended_snapshot_slot(full_snapshot_slot, new_root_slot, directory)?; @@ -590,7 +594,11 @@ pub(crate) fn aggregate_restart_heaviest_fork( let epoch_stakes = root_bank.epoch_stakes(root_bank.epoch()).unwrap(); let total_stake = epoch_stakes.total_stake(); if progress.my_heaviest_fork.is_none() { - return Err(WenRestartError::UnexpectedState(RestartState::HeaviestFork).into()); + return Err(WenRestartError::MalformedProgress( + RestartState::HeaviestFork, + "my_heaviest_fork".to_string(), + ) + .into()); } let my_heaviest_fork = progress.my_heaviest_fork.clone().unwrap(); let heaviest_fork_slot = my_heaviest_fork.slot; @@ -631,17 +639,14 @@ pub(crate) fn aggregate_restart_heaviest_fork( .as_mut() .unwrap() .total_active_stake = total_active_stake; - cluster_info.push_restart_heaviest_fork( - heaviest_fork_slot, - heaviest_fork_hash, - total_active_stake, - ); let mut progress_last_sent = Instant::now(); let mut cursor = solana_gossip::crds::Cursor::default(); let mut progress_changed = false; let majority_stake_required = (total_stake as f64 / 100.0 * adjusted_threshold_percent as f64).round() as u64; + let mut total_active_stake_higher_than_supermajority = false; + let mut first_time_entering_loop = true; loop { if exit.load(Ordering::Relaxed) { return Err(WenRestartError::Exiting.into()); @@ -682,10 +687,21 @@ pub(crate) fn aggregate_restart_heaviest_fork( total_stake ); let can_exit = total_active_stake_seen_supermajority >= majority_stake_required; - // Only send out updates every 30 minutes or when we can exit. + let saw_supermajority_first_time = current_total_active_stake + >= majority_stake_required + && !total_active_stake_higher_than_supermajority + && { + total_active_stake_higher_than_supermajority = true; + true + }; + // Only send out updates every 5 minutes or when we can exit or active stake passes supermajority + // the first time. if progress_last_sent.elapsed().as_secs() >= HEAVIEST_REFRESH_INTERVAL_IN_SECONDS || can_exit + || first_time_entering_loop + || saw_supermajority_first_time { + first_time_entering_loop = false; cluster_info.push_restart_heaviest_fork( heaviest_fork_slot, heaviest_fork_hash, @@ -1086,12 +1102,28 @@ pub(crate) fn initialize( total_active_stake: result.total_active_stake, }) }) - .unwrap(), + .ok_or(WenRestartError::MalformedProgress( + RestartState::HeaviestFork, + "final_result in last_voted_fork_slots_aggregate".to_string(), + ))?, my_heaviest_fork: progress.my_heaviest_fork.clone(), }, progress, )), - _ => Err(WenRestartError::UnexpectedState(progress.state()).into()), + RestartState::GenerateSnapshot => Ok(( + WenRestartProgressInternalState::GenerateSnapshot { + new_root_slot: progress + .my_heaviest_fork + .as_ref() + .ok_or(WenRestartError::MalformedProgress( + RestartState::GenerateSnapshot, + "my_heaviest_fork".to_string(), + ))? + .slot, + my_snapshot: progress.my_snapshot.clone(), + }, + progress, + )), } } @@ -1119,7 +1151,6 @@ pub(crate) fn write_wen_restart_records( mod tests { use { crate::wen_restart::{tests::wen_restart_proto::LastVotedForkSlotsAggregateFinal, *}, - assert_matches::assert_matches, solana_accounts_db::hardened_unpack::MAX_GENESIS_ARCHIVE_UNPACKED_SIZE, solana_entry::entry::create_ticks, solana_gossip::{ @@ -1160,8 +1191,10 @@ mod tests { const EXPECTED_SLOTS: Slot = 90; const TICKS_PER_SLOT: u64 = 2; const TOTAL_VALIDATOR_COUNT: u16 = 20; - const MY_INDEX: usize = 0; + const MY_INDEX: usize = TOTAL_VALIDATOR_COUNT as usize - 1; const WAIT_FOR_THREAD_TIMEOUT: u64 = 10_000; + const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; + const NON_CONFORMING_VALIDATOR_PERCENT: u64 = 5; fn push_restart_last_voted_fork_slots( cluster_info: Arc, @@ -1469,11 +1502,18 @@ mod tests { .unwrap(); let mut rng = rand::thread_rng(); let mut expected_received_last_voted_fork_slots = HashMap::new(); - // Skip the first 5 validators, because 0 is myself, we only need 15 more to reach 80%. + let validators_to_take: usize = + (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64 / 100 - 1) + .try_into() + .unwrap(); let mut last_voted_fork_slots_from_others = test_state.last_voted_fork_slots.clone(); last_voted_fork_slots_from_others.reverse(); last_voted_fork_slots_from_others.append(&mut expected_slots_to_repair.clone()); - for keypairs in test_state.validator_voting_keypairs.iter().skip(5) { + for keypairs in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypairs.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); let last_vote_hash = Hash::new_unique(); @@ -1522,8 +1562,20 @@ mod tests { } // Now simulate receiving HeaviestFork messages. let mut expected_received_heaviest_fork = HashMap::new(); + let validators_to_take: usize = ((WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64 + / 100 + - 1) + .try_into() + .unwrap(); // HeaviestFork only requires 75% vs 80% required for LastVotedForkSlots. We have 5% stake, so we need 70%. - for keypairs in test_state.validator_voting_keypairs.iter().skip(6) { + let total_active_stake_during_heaviest_fork = (validators_to_take + 1) as u64 * 100; + for keypairs in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypairs.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rng, Some(node_pubkey)); let now = timestamp(); @@ -1532,7 +1584,7 @@ mod tests { &node, expected_heaviest_fork_slot, &expected_heaviest_fork_bankhash, - 1500, + total_active_stake_during_heaviest_fork, &keypairs.node_keypair, now, ); @@ -1541,7 +1593,7 @@ mod tests { HeaviestForkRecord { slot: expected_heaviest_fork_slot, bankhash: expected_heaviest_fork_bankhash.to_string(), - total_active_stake: 1500, + total_active_stake: total_active_stake_during_heaviest_fork, shred_version: SHRED_VERSION as u32, wallclock: now, }, @@ -1559,9 +1611,18 @@ mod tests { let mut expected_slots_stake_map: HashMap = test_state .last_voted_fork_slots .iter() - .map(|slot| (*slot, 1600)) + .map(|slot| { + ( + *slot, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64, + ) + }) .collect(); - expected_slots_stake_map.extend(expected_slots_to_repair.iter().map(|slot| (*slot, 1500))); + expected_slots_stake_map.extend( + expected_slots_to_repair + .iter() + .map(|slot| (*slot, total_active_stake_during_heaviest_fork)), + ); assert_eq!( progress, WenRestartProgress { @@ -1576,7 +1637,8 @@ mod tests { received: expected_received_last_voted_fork_slots, final_result: Some(LastVotedForkSlotsAggregateFinal { slots_stake_map: expected_slots_stake_map, - total_active_stake: 1600, + // We are simulating 5% joined LastVotedForkSlots but not HeaviestFork. + total_active_stake: total_active_stake_during_heaviest_fork + 100, }), }), my_heaviest_fork: Some(HeaviestForkRecord { @@ -1587,16 +1649,17 @@ mod tests { .unwrap() .bankhash .to_string(), - total_active_stake: 1500, + total_active_stake: total_active_stake_during_heaviest_fork, shred_version: SHRED_VERSION as u32, wallclock: 0, }), heaviest_fork_aggregate: Some(HeaviestForkAggregateRecord { received: expected_received_heaviest_fork, final_result: Some(HeaviestForkAggregateFinal { - total_active_stake: 1500, - total_active_stake_seen_supermajority: 1500, - total_active_stake_agreed_with_me: 1500, + total_active_stake: total_active_stake_during_heaviest_fork, + total_active_stake_seen_supermajority: + total_active_stake_during_heaviest_fork, + total_active_stake_agreed_with_me: total_active_stake_during_heaviest_fork, }), }), my_snapshot: Some(GenerateSnapshotRecord { @@ -1618,7 +1681,7 @@ mod tests { } #[test] - fn test_wen_restart_initialize_failures() { + fn test_wen_restart_initialize() { solana_logger::setup(); let ledger_path = get_tmp_ledger_path_auto_delete!(); let test_state = wen_restart_test_init(&ledger_path); @@ -1638,14 +1701,8 @@ mod tests { .unwrap(), prost::DecodeError::new("invalid wire type value: 7") ); - remove_file(&test_state.wen_restart_proto_path).unwrap(); - let last_vote = VoteTransaction::from(TowerSync::from(vec![(0, 8), (1, 1)])); - assert!(initialize( - &test_state.wen_restart_proto_path, - last_vote.clone(), - test_state.blockstore.clone() - ) - .is_ok()); + assert!(remove_file(&test_state.wen_restart_proto_path).is_ok()); + let last_vote_bankhash = Hash::new_unique(); let empty_last_vote = VoteTransaction::from(Vote::new(vec![], last_vote_bankhash)); assert_eq!( initialize( @@ -1658,19 +1715,6 @@ mod tests { .unwrap(), WenRestartError::MissingLastVotedForkSlots, ); - // Test the case where the file is not found. - let _ = remove_file(&test_state.wen_restart_proto_path); - assert_matches!( - initialize(&test_state.wen_restart_proto_path, VoteTransaction::from(Vote::new(last_voted_fork_slots.clone(), last_vote_bankhash)), test_state.blockstore.clone()), - Ok((WenRestartProgressInternalState::Init { last_voted_fork_slots, last_vote_bankhash: bankhash }, progress)) => { - assert_eq!(last_voted_fork_slots, test_state.last_voted_fork_slots); - assert_eq!(bankhash, last_vote_bankhash); - assert_eq!(progress, WenRestartProgress { - state: RestartState::Init.into(), - ..Default::default() - }); - } - ); assert!(write_wen_restart_records( &test_state.wen_restart_proto_path, &WenRestartProgress { @@ -1690,24 +1734,218 @@ mod tests { .to_string(), "Malformed last voted fork slots protobuf: None" ); + let progress_missing_heaviest_fork_aggregate = WenRestartProgress { + state: RestartState::HeaviestFork.into(), + my_heaviest_fork: Some(HeaviestForkRecord { + slot: 0, + bankhash: Hash::new_unique().to_string(), + total_active_stake: 0, + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + ..Default::default() + }; assert!(write_wen_restart_records( &test_state.wen_restart_proto_path, - &WenRestartProgress { - state: RestartState::WaitingForSupermajority.into(), - ..Default::default() - }, + &progress_missing_heaviest_fork_aggregate, ) .is_ok()); assert_eq!( initialize( &test_state.wen_restart_proto_path, - VoteTransaction::from(Vote::new(last_voted_fork_slots, last_vote_bankhash)), + VoteTransaction::from(Vote::new(last_voted_fork_slots.clone(), last_vote_bankhash)), + test_state.blockstore.clone() + ).err() + .unwrap() + .to_string(), + "Malformed progress: HeaviestFork missing final_result in last_voted_fork_slots_aggregate", + ); + let progress_missing_my_heaviestfork = WenRestartProgress { + state: RestartState::GenerateSnapshot.into(), + my_snapshot: Some(GenerateSnapshotRecord { + slot: 0, + bankhash: Hash::new_unique().to_string(), + shred_version: SHRED_VERSION as u32, + path: "/path/to/snapshot".to_string(), + }), + ..Default::default() + }; + assert!(write_wen_restart_records( + &test_state.wen_restart_proto_path, + &progress_missing_my_heaviestfork, + ) + .is_ok()); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + VoteTransaction::from(Vote::new(last_voted_fork_slots.clone(), last_vote_bankhash)), test_state.blockstore.clone() ) .err() .unwrap() .to_string(), - "Unexpected state: WaitingForSupermajority" + "Malformed progress: GenerateSnapshot missing my_heaviest_fork", + ); + + // Now test successful initialization. + assert!(remove_file(&test_state.wen_restart_proto_path).is_ok()); + // Test the case where the file is not found. + let mut vote = TowerSync::from(vec![(test_state.last_voted_fork_slots[0], 1)]); + vote.hash = last_vote_bankhash; + let last_vote = VoteTransaction::from(vote); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + last_vote.clone(), + test_state.blockstore.clone() + ) + .unwrap(), + ( + WenRestartProgressInternalState::Init { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash + }, + WenRestartProgress { + state: RestartState::Init.into(), + ..Default::default() + } + ) + ); + let progress = WenRestartProgress { + state: RestartState::Init.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + ..Default::default() + }; + assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress,).is_ok()); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + last_vote.clone(), + test_state.blockstore.clone() + ) + .unwrap(), + ( + WenRestartProgressInternalState::Init { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash, + }, + progress + ) + ); + let progress = WenRestartProgress { + state: RestartState::LastVotedForkSlots.into(), + my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + last_vote_bankhash: last_vote_bankhash.to_string(), + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + ..Default::default() + }; + assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress,).is_ok()); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + last_vote.clone(), + test_state.blockstore.clone() + ) + .unwrap(), + ( + WenRestartProgressInternalState::LastVotedForkSlots { + last_voted_fork_slots: test_state.last_voted_fork_slots.clone(), + aggregate_final_result: None, + }, + progress + ) + ); + let progress = WenRestartProgress { + state: RestartState::HeaviestFork.into(), + my_heaviest_fork: Some(HeaviestForkRecord { + slot: 0, + bankhash: Hash::new_unique().to_string(), + total_active_stake: 0, + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + last_voted_fork_slots_aggregate: Some(LastVotedForkSlotsAggregateRecord { + received: HashMap::new(), + final_result: Some(LastVotedForkSlotsAggregateFinal { + slots_stake_map: HashMap::new(), + total_active_stake: 1000, + }), + }), + ..Default::default() + }; + assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress,).is_ok()); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + last_vote.clone(), + test_state.blockstore.clone() + ) + .unwrap(), + ( + WenRestartProgressInternalState::FindHeaviestFork { + aggregate_final_result: LastVotedForkSlotsFinalResult { + slots_stake_map: HashMap::new(), + total_active_stake: 1000, + }, + my_heaviest_fork: progress.my_heaviest_fork.clone(), + }, + progress + ) + ); + let progress = WenRestartProgress { + state: RestartState::GenerateSnapshot.into(), + my_heaviest_fork: Some(HeaviestForkRecord { + slot: 0, + bankhash: Hash::new_unique().to_string(), + total_active_stake: 0, + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + my_snapshot: Some(GenerateSnapshotRecord { + slot: 0, + bankhash: Hash::new_unique().to_string(), + shred_version: SHRED_VERSION as u32, + path: "/path/to/snapshot".to_string(), + }), + ..Default::default() + }; + assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress,).is_ok()); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + VoteTransaction::from(Vote::new(last_voted_fork_slots.clone(), last_vote_bankhash)), + test_state.blockstore.clone() + ) + .unwrap(), + ( + WenRestartProgressInternalState::GenerateSnapshot { + new_root_slot: 0, + my_snapshot: progress.my_snapshot.clone(), + }, + progress, + ) + ); + let progress = WenRestartProgress { + state: RestartState::Done.into(), + ..Default::default() + }; + assert!(write_wen_restart_records(&test_state.wen_restart_proto_path, &progress,).is_ok()); + assert_eq!( + initialize( + &test_state.wen_restart_proto_path, + VoteTransaction::from(Vote::new(last_voted_fork_slots, last_vote_bankhash)), + test_state.blockstore.clone() + ) + .unwrap(), + (WenRestartProgressInternalState::Done, progress) ); } @@ -1828,7 +2066,6 @@ mod tests { let mut last_voted_fork_slots_from_others = test_state.last_voted_fork_slots.clone(); last_voted_fork_slots_from_others.reverse(); last_voted_fork_slots_from_others.append(&mut expected_slots_to_repair.clone()); - // Skip the first 5 validators, because 0 is myself, we need 15 so it hits 80%. let progress = WenRestartProgress { state: RestartState::LastVotedForkSlots.into(), my_last_voted_fork_slots: Some(LastVotedForkSlotsRecord { @@ -1839,7 +2076,15 @@ mod tests { }), ..Default::default() }; - for keypairs in test_state.validator_voting_keypairs.iter().skip(5) { + let validators_to_take: usize = + (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64 / 100 - 1) + .try_into() + .unwrap(); + for keypairs in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let wen_restart_proto_path_clone = test_state.wen_restart_proto_path.clone(); let cluster_info_clone = test_state.cluster_info.clone(); let bank_forks_clone = test_state.bank_forks.clone(); @@ -1853,7 +2098,7 @@ mod tests { .spawn(move || { assert!(aggregate_restart_last_voted_fork_slots( &wen_restart_proto_path_clone, - 80, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, cluster_info_clone, &last_voted_fork_slots, bank_forks_clone, @@ -2287,25 +2532,157 @@ mod tests { ); } + fn start_aggregate_heaviest_fork_thread( + test_state: &WenRestartTestInitResult, + heaviest_fork_slot: Slot, + heaviest_fork_bankhash: Hash, + exit: Arc, + expected_error: Option, + ) -> std::thread::JoinHandle<()> { + let progress = wen_restart_proto::WenRestartProgress { + state: RestartState::HeaviestFork.into(), + my_heaviest_fork: Some(HeaviestForkRecord { + slot: heaviest_fork_slot, + bankhash: heaviest_fork_bankhash.to_string(), + total_active_stake: WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + .saturating_mul(TOTAL_VALIDATOR_COUNT as u64), + shred_version: SHRED_VERSION as u32, + wallclock: 0, + }), + ..Default::default() + }; + let wen_restart_path = test_state.wen_restart_proto_path.clone(); + let cluster_info = test_state.cluster_info.clone(); + let bank_forks = test_state.bank_forks.clone(); + Builder::new() + .name("solana-wen-restart-aggregate-heaviest-fork".to_string()) + .spawn(move || { + let result = aggregate_restart_heaviest_fork( + &wen_restart_path, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, + cluster_info, + bank_forks, + exit, + &mut progress.clone(), + ); + if let Some(expected_error) = expected_error { + assert_eq!( + result.unwrap_err().downcast::().unwrap(), + expected_error + ); + } else { + assert!(result.is_ok()); + } + }) + .unwrap() + } + + #[test] + fn test_aggregate_heaviest_fork_send_gossip_early() { + let ledger_path = get_tmp_ledger_path_auto_delete!(); + let test_state = wen_restart_test_init(&ledger_path); + let heaviest_fork_slot = test_state.last_voted_fork_slots[0] + 3; + let heaviest_fork_bankhash = Hash::new_unique(); + + let mut cursor = solana_gossip::crds::Cursor::default(); + // clear the heaviest fork queue so we make sure a new HeaviestFork is sent out later. + let _ = test_state + .cluster_info + .get_restart_heaviest_fork(&mut cursor); + + let exit = Arc::new(AtomicBool::new(false)); + let thread = start_aggregate_heaviest_fork_thread( + &test_state, + heaviest_fork_slot, + heaviest_fork_bankhash, + exit.clone(), + Some(WenRestartError::Exiting), + ); + // Simulating everyone sending out the first RestartHeaviestFork message, Gossip propagation takes + // time, so the observed_stake is probably smaller than actual active stake. We should send out + // heaviest fork indicating we have active stake exceeding supermajority. + let validators_to_take: usize = ((WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64 + / 100 + - 1) + .try_into() + .unwrap(); + for keypair in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { + let node_pubkey = keypair.node_keypair.pubkey(); + let node = ContactInfo::new_rand(&mut rand::thread_rng(), Some(node_pubkey)); + let now = timestamp(); + push_restart_heaviest_fork( + test_state.cluster_info.clone(), + &node, + heaviest_fork_slot, + &heaviest_fork_bankhash, + 100, + &keypair.node_keypair, + now, + ); + } + let my_pubkey = test_state.cluster_info.id(); + let mut found_myself = false; + let expected_active_stake = (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64; + while !found_myself { + sleep(Duration::from_millis(100)); + test_state.cluster_info.flush_push_queue(); + for gossip_record in test_state + .cluster_info + .get_restart_heaviest_fork(&mut cursor) + { + if gossip_record.from == my_pubkey + && gossip_record.observed_stake == expected_active_stake + { + found_myself = true; + break; + } + } + } + exit.store(true, Ordering::Relaxed); + assert!(thread.join().is_ok()); + } + #[test] fn test_aggregate_heaviest_fork() { let ledger_path = get_tmp_ledger_path_auto_delete!(); let test_state = wen_restart_test_init(&ledger_path); let heaviest_fork_slot = test_state.last_voted_fork_slots[0] + 3; let heaviest_fork_bankhash = Hash::new_unique(); + let expected_active_stake = (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64; let progress = wen_restart_proto::WenRestartProgress { state: RestartState::HeaviestFork.into(), my_heaviest_fork: Some(HeaviestForkRecord { slot: heaviest_fork_slot, bankhash: heaviest_fork_bankhash.to_string(), - total_active_stake: 1500, + total_active_stake: expected_active_stake, shred_version: SHRED_VERSION as u32, wallclock: 0, }), ..Default::default() }; let different_bankhash = Hash::new_unique(); - for keypair in test_state.validator_voting_keypairs.iter().skip(6) { + let validators_to_take: usize = ((WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT + - NON_CONFORMING_VALIDATOR_PERCENT) + * TOTAL_VALIDATOR_COUNT as u64 + / 100 + - 1) + .try_into() + .unwrap(); + for keypair in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypair.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rand::thread_rng(), Some(node_pubkey)); let now = timestamp(); @@ -2314,7 +2691,7 @@ mod tests { &node, heaviest_fork_slot, &different_bankhash, - 1500, + expected_active_stake, &keypair.node_keypair, now, ); @@ -2325,7 +2702,7 @@ mod tests { assert_eq!( aggregate_restart_heaviest_fork( &test_state.wen_restart_proto_path, - 80, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, test_state.cluster_info.clone(), test_state.bank_forks.clone(), Arc::new(AtomicBool::new(false)), @@ -2341,7 +2718,15 @@ mod tests { ), ); // If we have enough stake agreeing with us, we should be able to aggregate the heaviest fork. - for keypair in test_state.validator_voting_keypairs.iter().skip(6) { + let validators_to_take: usize = + (WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT * TOTAL_VALIDATOR_COUNT as u64 / 100 - 1) + .try_into() + .unwrap(); + for keypair in test_state + .validator_voting_keypairs + .iter() + .take(validators_to_take) + { let node_pubkey = keypair.node_keypair.pubkey(); let node = ContactInfo::new_rand(&mut rand::thread_rng(), Some(node_pubkey)); let now = timestamp(); @@ -2350,14 +2735,14 @@ mod tests { &node, heaviest_fork_slot, &heaviest_fork_bankhash, - 1500, + expected_active_stake, &keypair.node_keypair, now, ); } assert!(aggregate_restart_heaviest_fork( &test_state.wen_restart_proto_path, - 80, + WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT, test_state.cluster_info.clone(), test_state.bank_forks.clone(), Arc::new(AtomicBool::new(false)), diff --git a/zk-keygen/Cargo.toml b/zk-keygen/Cargo.toml index 0a1602045e6b01..cdda986d0c0153 100644 --- a/zk-keygen/Cargo.toml +++ b/zk-keygen/Cargo.toml @@ -19,9 +19,7 @@ edition = { workspace = true } bs58 = { workspace = true } clap = { version = "3.1.5", features = ["cargo", "derive"] } dirs-next = { workspace = true } -num_cpus = { workspace = true } solana-clap-v3-utils = { workspace = true } -solana-cli-config = { workspace = true } solana-remote-wallet = { workspace = true, features = ["default"] } solana-sdk = { workspace = true } solana-version = { workspace = true } diff --git a/zk-sdk/src/zk_elgamal_proof_program/proof_data/mod.rs b/zk-sdk/src/zk_elgamal_proof_program/proof_data/mod.rs index 7bce5f5f66a1f9..3d061bdd36e848 100644 --- a/zk-sdk/src/zk_elgamal_proof_program/proof_data/mod.rs +++ b/zk-sdk/src/zk_elgamal_proof_program/proof_data/mod.rs @@ -57,8 +57,8 @@ pub enum ProofType { BatchedRangeProofU128, BatchedRangeProofU256, GroupedCiphertext2HandlesValidity, - GroupedCiphertext3HandlesValidity, BatchedGroupedCiphertext2HandlesValidity, + GroupedCiphertext3HandlesValidity, BatchedGroupedCiphertext3HandlesValidity, } diff --git a/zk-token-sdk/src/encryption/elgamal.rs b/zk-token-sdk/src/encryption/elgamal.rs index 7f0a48820a6f35..130aacef669545 100644 --- a/zk-token-sdk/src/encryption/elgamal.rs +++ b/zk-token-sdk/src/encryption/elgamal.rs @@ -221,7 +221,7 @@ impl ElGamalKeypair { &self.secret } - #[deprecated(note = "please use `into()` instead")] + #[deprecated(since = "2.0.0", note = "please use `into()` instead")] #[allow(deprecated)] pub fn to_bytes(&self) -> [u8; ELGAMAL_KEYPAIR_LEN] { let mut bytes = [0u8; ELGAMAL_KEYPAIR_LEN]; @@ -230,7 +230,7 @@ impl ElGamalKeypair { bytes } - #[deprecated(note = "please use `try_from()` instead")] + #[deprecated(since = "2.0.0", note = "please use `try_from()` instead")] #[allow(deprecated)] pub fn from_bytes(bytes: &[u8]) -> Option { if bytes.len() != ELGAMAL_KEYPAIR_LEN { @@ -367,12 +367,12 @@ impl ElGamalPubkey { &self.0 } - #[deprecated(note = "please use `into()` instead")] + #[deprecated(since = "2.0.0", note = "please use `into()` instead")] pub fn to_bytes(&self) -> [u8; ELGAMAL_PUBKEY_LEN] { self.0.compress().to_bytes() } - #[deprecated(note = "please use `try_from()` instead")] + #[deprecated(since = "2.0.0", note = "please use `try_from()` instead")] pub fn from_bytes(bytes: &[u8]) -> Option { if bytes.len() != ELGAMAL_PUBKEY_LEN { return None; @@ -544,12 +544,12 @@ impl ElGamalSecretKey { self.0.as_bytes() } - #[deprecated(note = "please use `into()` instead")] + #[deprecated(since = "2.0.0", note = "please use `into()` instead")] pub fn to_bytes(&self) -> [u8; ELGAMAL_SECRET_KEY_LEN] { self.0.to_bytes() } - #[deprecated(note = "please use `try_from()` instead")] + #[deprecated(since = "2.0.0", note = "please use `try_from()` instead")] pub fn from_bytes(bytes: &[u8]) -> Option { match bytes.try_into() { Ok(bytes) => Scalar::from_canonical_bytes(bytes).map(ElGamalSecretKey),