diff --git a/.clang-tidy b/.clang-tidy index 86d494a10..9cb779fe1 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -8,6 +8,7 @@ Checks: '-*, bugprone-chained-comparison, bugprone-compare-pointer-to-member-virtual-function, bugprone-copy-constructor-init, + bugprone-crtp-constructor-accessibility, bugprone-dangling-handle, bugprone-dynamic-static-initializers, bugprone-empty-catch, @@ -33,9 +34,11 @@ Checks: '-*, bugprone-non-zero-enum-to-bool-conversion, bugprone-optional-value-conversion, bugprone-parent-virtual-call, + bugprone-pointer-arithmetic-on-polymorphic-object, bugprone-posix-return, bugprone-redundant-branch-condition, bugprone-reserved-identifier, + bugprone-return-const-ref-from-parameter, bugprone-shared-ptr-array-mismatch, bugprone-signal-handler, bugprone-signed-char-misuse, @@ -55,6 +58,7 @@ Checks: '-*, bugprone-suspicious-realloc-usage, bugprone-suspicious-semicolon, bugprone-suspicious-string-compare, + bugprone-suspicious-stringview-data-usage, bugprone-swapped-arguments, bugprone-switch-missing-default-case, bugprone-terminating-continue, @@ -91,16 +95,19 @@ Checks: '-*, misc-throw-by-value-catch-by-reference, misc-unused-alias-decls, misc-unused-using-decls, + misc-use-internal-linkage, modernize-concat-nested-namespaces, modernize-deprecated-headers, modernize-make-shared, modernize-make-unique, modernize-pass-by-value, modernize-type-traits, + modernize-use-designated-initializers, modernize-use-emplace, modernize-use-equals-default, modernize-use-equals-delete, modernize-use-override, + modernize-use-ranges, modernize-use-starts-ends-with, modernize-use-std-numbers, modernize-use-using, @@ -121,9 +128,11 @@ Checks: '-*, readability-convert-member-functions-to-static, readability-duplicate-include, readability-else-after-return, + readability-enum-initial-value, readability-implicit-bool-conversion, readability-inconsistent-declaration-parameter-name, readability-make-member-function-const, + readability-math-missing-parentheses, readability-misleading-indentation, readability-non-const-parameter, readability-redundant-casting, @@ -135,7 +144,8 @@ Checks: '-*, readability-simplify-boolean-expr, readability-static-accessed-through-instance, readability-static-definition-in-anonymous-namespace, - readability-suspicious-call-argument + readability-suspicious-call-argument, + readability-use-std-min-max ' CheckOptions: diff --git a/benchmarks/util/async/ExecutionContextBenchmarks.cpp b/benchmarks/util/async/ExecutionContextBenchmarks.cpp index ce78751d2..47158a09d 100644 --- a/benchmarks/util/async/ExecutionContextBenchmarks.cpp +++ b/benchmarks/util/async/ExecutionContextBenchmarks.cpp @@ -208,7 +208,7 @@ benchmarkThreads(benchmark::State& state) } template -void +static void benchmarkExecutionContextBatched(benchmark::State& state) { auto data = generateData(); @@ -219,7 +219,7 @@ benchmarkExecutionContextBatched(benchmark::State& state) } template -void +static void benchmarkAnyExecutionContextBatched(benchmark::State& state) { auto data = generateData(); diff --git a/src/data/BackendInterface.cpp b/src/data/BackendInterface.cpp index 2406e5ea0..3c43f2bd3 100644 --- a/src/data/BackendInterface.cpp +++ b/src/data/BackendInterface.cpp @@ -176,9 +176,9 @@ BackendInterface::fetchSuccessorObject( if (succ) { auto obj = fetchLedgerObject(*succ, ledgerSequence, yield); if (!obj) - return {{*succ, {}}}; + return {{.key = *succ, .blob = {}}}; - return {{*succ, *obj}}; + return {{.key = *succ, .blob = *obj}}; } return {}; } @@ -283,7 +283,7 @@ BackendInterface::updateRange(uint32_t newMax) ); if (!range) { - range = {newMax, newMax}; + range = {.minSequence = newMax, .maxSequence = newMax}; } else { range->maxSequence = newMax; } @@ -299,7 +299,7 @@ BackendInterface::setRange(uint32_t min, uint32_t max, bool force) ASSERT(not range.has_value(), "Range was already set"); } - range = {min, max}; + range = {.minSequence = min, .maxSequence = max}; } LedgerPage diff --git a/src/data/CassandraBackend.hpp b/src/data/CassandraBackend.hpp index 0c9591616..fd304aef5 100644 --- a/src/data/CassandraBackend.hpp +++ b/src/data/CassandraBackend.hpp @@ -128,7 +128,7 @@ class BasicCassandraBackend : public BackendInterface { { auto rng = fetchLedgerRange(); if (!rng) - return {{}, {}}; + return {.txns = {}, .cursor = {}}; Statement const statement = [this, forward, &account]() { if (forward) @@ -399,7 +399,7 @@ class BasicCassandraBackend : public BackendInterface { { auto rng = fetchLedgerRange(); if (!rng) - return {{}, {}}; + return {.txns = {}, .cursor = {}}; Statement const statement = [this, forward, &tokenID]() { if (forward) diff --git a/src/data/LedgerCache.cpp b/src/data/LedgerCache.cpp index ef3bc821a..11f0b8162 100644 --- a/src/data/LedgerCache.cpp +++ b/src/data/LedgerCache.cpp @@ -75,7 +75,7 @@ LedgerCache::update(std::vector const& objs, uint32_t seq, bool is auto& e = map_[obj.key]; if (seq > e.seq) { - e = {seq, obj.blob}; + e = {.seq = seq, .blob = obj.blob}; } } else { map_.erase(obj.key); @@ -101,7 +101,7 @@ LedgerCache::getSuccessor(ripple::uint256 const& key, uint32_t seq) const if (e == map_.end()) return {}; ++successorHitCounter_.get(); - return {{e->first, e->second.blob}}; + return {{.key = e->first, .blob = e->second.blob}}; } std::optional @@ -117,7 +117,7 @@ LedgerCache::getPredecessor(ripple::uint256 const& key, uint32_t seq) const if (e == map_.begin()) return {}; --e; - return {{e->first, e->second.blob}}; + return {{.key = e->first, .blob = e->second.blob}}; } std::optional diff --git a/src/data/cassandra/Handle.cpp b/src/data/cassandra/Handle.cpp index e4f406f53..d01e055f3 100644 --- a/src/data/cassandra/Handle.cpp +++ b/src/data/cassandra/Handle.cpp @@ -60,7 +60,7 @@ Handle::connect() const Handle::FutureType Handle::asyncConnect(std::string_view keyspace) const { - return cass_session_connect_keyspace(session_, cluster_, keyspace.data()); + return cass_session_connect_keyspace_n(session_, cluster_, keyspace.data(), keyspace.size()); } Handle::MaybeErrorType @@ -155,7 +155,7 @@ Handle::asyncExecute(std::vector const& statements, std::function Handle::PreparedStatementType Handle::prepare(std::string_view query) const { - Handle::FutureType const future = cass_session_prepare(session_, query.data()); + Handle::FutureType const future = cass_session_prepare_n(session_, query.data(), query.size()); auto const rc = future.await(); if (rc) return cass_future_get_prepared(future); diff --git a/src/data/cassandra/SettingsProvider.cpp b/src/data/cassandra/SettingsProvider.cpp index e29c31848..38cca8cdc 100644 --- a/src/data/cassandra/SettingsProvider.cpp +++ b/src/data/cassandra/SettingsProvider.cpp @@ -43,7 +43,7 @@ namespace data::cassandra { namespace impl { -inline Settings::ContactPoints +inline static Settings::ContactPoints tag_invoke(boost::json::value_to_tag, boost::json::value const& value) { if (not value.is_object()) { @@ -59,7 +59,7 @@ tag_invoke(boost::json::value_to_tag, boost::json::valu return out; } -inline Settings::SecureConnectionBundle +inline static Settings::SecureConnectionBundle tag_invoke(boost::json::value_to_tag, boost::json::value const& value) { if (not value.is_string()) diff --git a/src/data/cassandra/impl/Statement.hpp b/src/data/cassandra/impl/Statement.hpp index 8aa5ae253..b3a223cde 100644 --- a/src/data/cassandra/impl/Statement.hpp +++ b/src/data/cassandra/impl/Statement.hpp @@ -54,7 +54,7 @@ class Statement : public ManagedObject { */ template explicit Statement(std::string_view query, Args&&... args) - : ManagedObject{cass_statement_new(query.data(), sizeof...(args)), deleter} + : ManagedObject{cass_statement_new_n(query.data(), query.size(), sizeof...(args)), deleter} { cass_statement_set_consistency(*this, CASS_CONSISTENCY_QUORUM); cass_statement_set_is_idempotent(*this, cass_true); diff --git a/src/etl/MPTHelpers.cpp b/src/etl/MPTHelpers.cpp index ba17d2b18..9b3912bca 100644 --- a/src/etl/MPTHelpers.cpp +++ b/src/etl/MPTHelpers.cpp @@ -27,6 +27,7 @@ #include #include #include +#include #include #include @@ -41,7 +42,8 @@ namespace etl { * @param txMeta Transaction metadata * @return MPT and holder account pair */ -static std::optional +std::optional +// NOLINTNEXTLINE(misc-use-internal-linkage) getMPTokenAuthorize(ripple::TxMeta const& txMeta) { for (ripple::STObject const& node : txMeta.getNodes()) { @@ -50,13 +52,16 @@ getMPTokenAuthorize(ripple::TxMeta const& txMeta) if (node.getFName() == ripple::sfCreatedNode) { auto const& newMPT = node.peekAtField(ripple::sfNewFields).downcast(); - return MPTHolderData{newMPT[ripple::sfMPTokenIssuanceID], newMPT.getAccountID(ripple::sfAccount)}; + return MPTHolderData{ + .mptID = newMPT[ripple::sfMPTokenIssuanceID], .holder = newMPT.getAccountID(ripple::sfAccount) + }; } } return {}; } std::optional +// NOLINTNEXTLINE(misc-use-internal-linkage) getMPTHolderFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { if (txMeta.getResultTER() != ripple::tesSUCCESS || sttx.getTxnType() != ripple::TxType::ttMPTOKEN_AUTHORIZE) @@ -66,6 +71,7 @@ getMPTHolderFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) } std::optional +// NOLINTNEXTLINE(misc-use-internal-linkage) getMPTHolderFromObj(std::string const& key, std::string const& blob) { ripple::STLedgerEntry const sle = @@ -77,7 +83,7 @@ getMPTHolderFromObj(std::string const& key, std::string const& blob) auto const mptIssuanceID = sle[ripple::sfMPTokenIssuanceID]; auto const holder = sle.getAccountID(ripple::sfAccount); - return MPTHolderData{mptIssuanceID, holder}; + return MPTHolderData{.mptID = mptIssuanceID, .holder = holder}; } } // namespace etl diff --git a/src/etl/NFTHelpers.cpp b/src/etl/NFTHelpers.cpp index 9b70086cd..f58a64e2f 100644 --- a/src/etl/NFTHelpers.cpp +++ b/src/etl/NFTHelpers.cpp @@ -48,6 +48,7 @@ namespace etl { std::pair, std::optional> +// NOLINTNEXTLINE(misc-use-internal-linkage) getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { // To find the minted token ID, we put all tokenIDs referenced in the @@ -73,9 +74,9 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) if (node.getFName() == ripple::sfCreatedNode) { ripple::STArray const& toAddNFTs = node.peekAtField(ripple::sfNewFields).downcast().getFieldArray(ripple::sfNFTokens); - std::transform( - toAddNFTs.begin(), - toAddNFTs.end(), + std::ranges::transform( + toAddNFTs, + std::back_inserter(finalIDs), [](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); } ); @@ -98,18 +99,18 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) continue; ripple::STArray const& toAddNFTs = previousFields.getFieldArray(ripple::sfNFTokens); - std::transform( - toAddNFTs.begin(), - toAddNFTs.end(), + std::ranges::transform( + toAddNFTs, + std::back_inserter(prevIDs), [](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); } ); ripple::STArray const& toAddFinalNFTs = node.peekAtField(ripple::sfFinalFields).downcast().getFieldArray(ripple::sfNFTokens); - std::transform( - toAddFinalNFTs.begin(), - toAddFinalNFTs.end(), + std::ranges::transform( + toAddFinalNFTs, + std::back_inserter(finalIDs), [](ripple::STObject const& nft) { return nft.getFieldH256(ripple::sfNFTokenID); } ); @@ -121,6 +122,7 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) // Find the first NFT ID that doesn't match. We're looking for an // added NFT, so the one we want will be the mismatch in finalIDs. + // NOLINTNEXTLINE(modernize-use-ranges) auto const diff = std::mismatch(finalIDs.begin(), finalIDs.end(), prevIDs.begin(), prevIDs.end()); // There should always be a difference so the returned finalIDs @@ -137,6 +139,7 @@ getNFTokenMintData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) } std::pair, std::optional> +// NOLINTNEXTLINE(misc-use-internal-linkage) getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { ripple::uint256 const tokenID = sttx.getFieldH256(ripple::sfNFTokenID); @@ -193,6 +196,7 @@ getNFTokenBurnData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) } std::pair, std::optional> +// NOLINTNEXTLINE(misc-use-internal-linkage) getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { // If we have the buy offer from this tx, we can determine the owner @@ -261,7 +265,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx .getFieldArray(ripple::sfNFTokens); }(); - auto const nft = std::find_if(nfts.begin(), nfts.end(), [&tokenID](ripple::STObject const& candidate) { + auto const nft = std::ranges::find_if(nfts, [&tokenID](ripple::STObject const& candidate) { return candidate.getFieldH256(ripple::sfNFTokenID) == tokenID; }); if (nft != nfts.end()) { @@ -282,6 +286,7 @@ getNFTokenAcceptOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx // transaction using this feature. This transaction also never returns an // NFTsData because it does not change the state of an NFT itself. std::pair, std::optional> +// NOLINTNEXTLINE(misc-use-internal-linkage) getNFTokenCancelOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { std::vector txs; @@ -298,22 +303,24 @@ getNFTokenCancelOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx std::ranges::sort(txs, [](NFTTransactionsData const& a, NFTTransactionsData const& b) { return a.tokenID < b.tokenID; }); - auto last = std::unique(txs.begin(), txs.end(), [](NFTTransactionsData const& a, NFTTransactionsData const& b) { + auto [last, end] = std::ranges::unique(txs, [](NFTTransactionsData const& a, NFTTransactionsData const& b) { return a.tokenID == b.tokenID; }); - txs.erase(last, txs.end()); + txs.erase(last, end); return {txs, {}}; } // This transaction never returns an NFTokensData because it does not // change the state of an NFT itself. std::pair, std::optional> +// NOLINTNEXTLINE(misc-use-internal-linkage) getNFTokenCreateOfferData(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { return {{NFTTransactionsData(sttx.getFieldH256(ripple::sfNFTokenID), txMeta, sttx.getTransactionID())}, {}}; } std::pair, std::optional> +// NOLINTNEXTLINE(misc-use-internal-linkage) getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) { if (txMeta.getResultTER() != ripple::tesSUCCESS) @@ -341,6 +348,7 @@ getNFTDataFromTx(ripple::TxMeta const& txMeta, ripple::STTx const& sttx) } std::vector +// NOLINTNEXTLINE(misc-use-internal-linkage) getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string const& blob) { std::vector nfts; @@ -358,6 +366,7 @@ getNFTDataFromObj(std::uint32_t const seq, std::string const& key, std::string c } std::vector +// NOLINTNEXTLINE(misc-use-internal-linkage) getUniqueNFTsDatas(std::vector const& nfts) { std::vector results = nfts; @@ -366,10 +375,9 @@ getUniqueNFTsDatas(std::vector const& nfts) return a.tokenID == b.tokenID ? a.transactionIndex > b.transactionIndex : a.tokenID > b.tokenID; }); - auto const last = std::unique(results.begin(), results.end(), [](NFTsData const& a, NFTsData const& b) { - return a.tokenID == b.tokenID; - }); - results.erase(last, results.end()); + auto const [last, end] = + std::ranges::unique(results, [](NFTsData const& a, NFTsData const& b) { return a.tokenID == b.tokenID; }); + results.erase(last, end); return results; } diff --git a/src/etl/impl/CursorFromFixDiffNumProvider.hpp b/src/etl/impl/CursorFromFixDiffNumProvider.hpp index 8f410a683..e79246204 100644 --- a/src/etl/impl/CursorFromFixDiffNumProvider.hpp +++ b/src/etl/impl/CursorFromFixDiffNumProvider.hpp @@ -70,12 +70,9 @@ class CursorFromFixDiffNumProvider : public BaseCursorProvider { return a.key < b.key or (a.key == b.key and std::size(a.blob) < std::size(b.blob)); }); - diffs.erase( - std::unique( - std::begin(diffs), std::end(diffs), [](auto const& a, auto const& b) { return a.key == b.key; } - ), - std::end(diffs) - ); + auto const [removalCursor, last] = + rg::unique(diffs, [](auto const& a, auto const& b) { return a.key == b.key; }); + diffs.erase(removalCursor, last); std::vector cursors{data::firstKey}; rg::copy( diff --git a/src/etl/impl/SubscriptionSource.cpp b/src/etl/impl/SubscriptionSource.cpp index a6494c798..3d177e513 100644 --- a/src/etl/impl/SubscriptionSource.cpp +++ b/src/etl/impl/SubscriptionSource.cpp @@ -353,7 +353,7 @@ SubscriptionSource::setValidatedRange(std::string range) pairs.emplace_back(min, max); } } - std::sort(pairs.begin(), pairs.end(), [](auto left, auto right) { return left.first < right.first; }); + std::ranges::sort(pairs, [](auto left, auto right) { return left.first < right.first; }); auto dataLock = validatedLedgersData_.lock(); dataLock->validatedLedgers = std::move(pairs); diff --git a/src/etl/impl/Transformer.hpp b/src/etl/impl/Transformer.hpp index b018caddd..a9c31cad5 100644 --- a/src/etl/impl/Transformer.hpp +++ b/src/etl/impl/Transformer.hpp @@ -308,11 +308,11 @@ class Transformer { auto lb = backend_->cache().getPredecessor(obj.key, lgrInfo.seq); if (!lb) - lb = {data::firstKey, {}}; + lb = {.key = data::firstKey, .blob = {}}; auto ub = backend_->cache().getSuccessor(obj.key, lgrInfo.seq); if (!ub) - ub = {data::lastKey, {}}; + ub = {.key = data::lastKey, .blob = {}}; if (obj.blob.empty()) { LOG(log_.debug()) << "writing successor for deleted object " << ripple::strHex(obj.key) << " - " diff --git a/src/rpc/CredentialHelpers.cpp b/src/rpc/CredentialHelpers.cpp index bbea85ca7..306b97096 100644 --- a/src/rpc/CredentialHelpers.cpp +++ b/src/rpc/CredentialHelpers.cpp @@ -53,6 +53,7 @@ namespace rpc::credentials { bool +// NOLINTNEXTLINE(misc-use-internal-linkage) checkExpired(ripple::SLE const& sleCred, ripple::LedgerHeader const& ledger) { if (sleCred.isFieldPresent(ripple::sfExpiration)) { @@ -64,6 +65,7 @@ checkExpired(ripple::SLE const& sleCred, ripple::LedgerHeader const& ledger) } std::set> +// NOLINTNEXTLINE(misc-use-internal-linkage) createAuthCredentials(ripple::STArray const& in) { std::set> out; @@ -74,6 +76,7 @@ createAuthCredentials(ripple::STArray const& in) } ripple::STArray +// NOLINTNEXTLINE(misc-use-internal-linkage) parseAuthorizeCredentials(boost::json::array const& jv) { ripple::STArray arr; @@ -108,6 +111,7 @@ parseAuthorizeCredentials(boost::json::array const& jv) } std::expected +// NOLINTNEXTLINE(misc-use-internal-linkage) fetchCredentialArray( std::optional const& credID, ripple::AccountID const& srcAcc, diff --git a/src/rpc/Errors.cpp b/src/rpc/Errors.cpp index b63d77b43..f7585903c 100644 --- a/src/rpc/Errors.cpp +++ b/src/rpc/Errors.cpp @@ -55,7 +55,7 @@ getWarningInfo(WarningCode code) }; auto matchByCode = [code](auto const& info) { return info.code == code; }; - if (auto it = find_if(begin(infos), end(infos), matchByCode); it != end(infos)) + if (auto it = std::ranges::find_if(infos, matchByCode); it != end(infos)) return *it; throw(out_of_range("Invalid WarningCode")); @@ -75,32 +75,43 @@ ClioErrorInfo const& getErrorInfo(ClioError code) { constexpr static ClioErrorInfo infos[]{ - {ClioError::rpcMALFORMED_CURRENCY, "malformedCurrency", "Malformed currency."}, - {ClioError::rpcMALFORMED_REQUEST, "malformedRequest", "Malformed request."}, - {ClioError::rpcMALFORMED_OWNER, "malformedOwner", "Malformed owner."}, - {ClioError::rpcMALFORMED_ADDRESS, "malformedAddress", "Malformed address."}, - {ClioError::rpcINVALID_HOT_WALLET, "invalidHotWallet", "Invalid hot wallet."}, - {ClioError::rpcUNKNOWN_OPTION, "unknownOption", "Unknown option."}, - {ClioError::rpcFIELD_NOT_FOUND_TRANSACTION, "fieldNotFoundTransaction", "Missing field."}, - {ClioError::rpcMALFORMED_ORACLE_DOCUMENT_ID, "malformedDocumentID", "Malformed oracle_document_id."}, - {ClioError::rpcMALFORMED_AUTHORIZED_CREDENTIALS, - "malformedAuthorizedCredentials", - "Malformed authorized credentials."}, + {.code = ClioError::rpcMALFORMED_CURRENCY, .error = "malformedCurrency", .message = "Malformed currency."}, + {.code = ClioError::rpcMALFORMED_REQUEST, .error = "malformedRequest", .message = "Malformed request."}, + {.code = ClioError::rpcMALFORMED_OWNER, .error = "malformedOwner", .message = "Malformed owner."}, + {.code = ClioError::rpcMALFORMED_ADDRESS, .error = "malformedAddress", .message = "Malformed address."}, + {.code = ClioError::rpcINVALID_HOT_WALLET, .error = "invalidHotWallet", .message = "Invalid hot wallet."}, + {.code = ClioError::rpcUNKNOWN_OPTION, .error = "unknownOption", .message = "Unknown option."}, + {.code = ClioError::rpcFIELD_NOT_FOUND_TRANSACTION, + .error = "fieldNotFoundTransaction", + .message = "Missing field."}, + {.code = ClioError::rpcMALFORMED_ORACLE_DOCUMENT_ID, + .error = "malformedDocumentID", + .message = "Malformed oracle_document_id."}, + {.code = ClioError::rpcMALFORMED_AUTHORIZED_CREDENTIALS, + .error = "malformedAuthorizedCredentials", + .message = "Malformed authorized credentials."}, // special system errors - {ClioError::rpcINVALID_API_VERSION, JS(invalid_API_version), "Invalid API version."}, - {ClioError::rpcCOMMAND_IS_MISSING, JS(missingCommand), "Method is not specified or is not a string."}, - {ClioError::rpcCOMMAND_NOT_STRING, "commandNotString", "Method is not a string."}, - {ClioError::rpcCOMMAND_IS_EMPTY, "emptyCommand", "Method is an empty string."}, - {ClioError::rpcPARAMS_UNPARSEABLE, "paramsUnparseable", "Params must be an array holding exactly one object."}, + {.code = ClioError::rpcINVALID_API_VERSION, .error = JS(invalid_API_version), .message = "Invalid API version." + }, + {.code = ClioError::rpcCOMMAND_IS_MISSING, + .error = JS(missingCommand), + .message = "Method is not specified or is not a string."}, + {.code = ClioError::rpcCOMMAND_NOT_STRING, .error = "commandNotString", .message = "Method is not a string."}, + {.code = ClioError::rpcCOMMAND_IS_EMPTY, .error = "emptyCommand", .message = "Method is an empty string."}, + {.code = ClioError::rpcPARAMS_UNPARSEABLE, + .error = "paramsUnparseable", + .message = "Params must be an array holding exactly one object."}, // etl related errors - {ClioError::etlCONNECTION_ERROR, "connectionError", "Couldn't connect to rippled."}, - {ClioError::etlREQUEST_ERROR, "requestError", "Error sending request to rippled."}, - {ClioError::etlREQUEST_TIMEOUT, "timeout", "Request to rippled timed out."}, - {ClioError::etlINVALID_RESPONSE, "invalidResponse", "Rippled returned an invalid response."} + {.code = ClioError::etlCONNECTION_ERROR, .error = "connectionError", .message = "Couldn't connect to rippled."}, + {.code = ClioError::etlREQUEST_ERROR, .error = "requestError", .message = "Error sending request to rippled."}, + {.code = ClioError::etlREQUEST_TIMEOUT, .error = "timeout", .message = "Request to rippled timed out."}, + {.code = ClioError::etlINVALID_RESPONSE, + .error = "invalidResponse", + .message = "Rippled returned an invalid response."} }; auto matchByCode = [code](auto const& info) { return info.code == code; }; - if (auto it = find_if(begin(infos), end(infos), matchByCode); it != end(infos)) + if (auto it = std::ranges::find_if(infos, matchByCode); it != end(infos)) return *it; throw(out_of_range("Invalid error code")); diff --git a/src/rpc/RPCEngine.hpp b/src/rpc/RPCEngine.hpp index 127878a8c..2194b22f9 100644 --- a/src/rpc/RPCEngine.hpp +++ b/src/rpc/RPCEngine.hpp @@ -174,7 +174,13 @@ class RPCEngine { try { LOG(perfLog_.debug()) << ctx.tag() << " start executing rpc `" << ctx.method << '`'; - auto const context = Context{ctx.yield, ctx.session, ctx.isAdmin, ctx.clientIp, ctx.apiVersion}; + auto const context = Context{ + .yield = ctx.yield, + .session = ctx.session, + .isAdmin = ctx.isAdmin, + .clientIp = ctx.clientIp, + .apiVersion = ctx.apiVersion + }; auto v = (*method).process(ctx.params, context); LOG(perfLog_.debug()) << ctx.tag() << " finish executing rpc `" << ctx.method << '`'; diff --git a/src/rpc/RPCHelpers.cpp b/src/rpc/RPCHelpers.cpp index 1db828f12..99d3bd610 100644 --- a/src/rpc/RPCHelpers.cpp +++ b/src/rpc/RPCHelpers.cpp @@ -113,7 +113,7 @@ parseAccountCursor(std::optional jsonCursor) std::uint64_t startHint = 0; if (!jsonCursor) - return AccountCursor({cursorIndex, startHint}); + return AccountCursor({.index = cursorIndex, .hint = startHint}); // Cursor is composed of a comma separated index and start hint. The // former will be read as hex, and the latter using boost lexical cast. @@ -134,7 +134,7 @@ parseAccountCursor(std::optional jsonCursor) return {}; } - return AccountCursor({cursorIndex, startHint}); + return AccountCursor({.index = cursorIndex, .hint = startHint}); } std::optional @@ -220,8 +220,8 @@ deserializeTxPlusMeta(data::TransactionAndMetadata const& blobs) } catch (std::exception const& e) { std::stringstream txn; std::stringstream meta; - std::copy(blobs.transaction.begin(), blobs.transaction.end(), std::ostream_iterator(txn)); - std::copy(blobs.metadata.begin(), blobs.metadata.end(), std::ostream_iterator(meta)); + std::ranges::copy(blobs.transaction, std::ostream_iterator(txn)); + std::ranges::copy(blobs.metadata, std::ostream_iterator(meta)); LOG(gLog.error()) << "Failed to deserialize transaction. txn = " << txn.str() << " - meta = " << meta.str() << " txn length = " << std::to_string(blobs.transaction.size()) << " meta length = " << std::to_string(blobs.metadata.size()); @@ -609,7 +609,7 @@ traverseNFTObjects( if (!page) { if (nextPage == beast::zero) { // no nft objects in lastNFTPage - return AccountCursor{beast::zero, 0}; + return AccountCursor{.index = beast::zero, .hint = 0}; } // marker is in the right range, but still invalid return Status{RippledError::rpcINVALID_PARAMS, "Invalid marker."}; @@ -626,13 +626,13 @@ traverseNFTObjects( count++; if (count == limit or nftPreviousPage == beast::zero) - return AccountCursor{nftPreviousPage, count}; + return AccountCursor{.index = nftPreviousPage, .hint = count}; page = backend.fetchLedgerObject(nftPreviousPage, sequence, yield); pageSLE = ripple::SLE{ripple::SerialIter{page->data(), page->size()}, nftPreviousPage}; } - return AccountCursor{beast::zero, 0}; + return AccountCursor{.index = beast::zero, .hint = 0}; } std::variant @@ -671,7 +671,7 @@ traverseOwnedNodes( // if limit reach , we return the next page and max as marker if (nftsCount >= limit) - return AccountCursor{nextNFTPage, std::numeric_limits::max()}; + return AccountCursor{.index = nextNFTPage, .hint = std::numeric_limits::max()}; // adjust limit ,continue traversing owned nodes limit -= nftsCount; @@ -700,7 +700,7 @@ traverseOwnedNodes( std::function atOwnedNode ) { - auto cursor = AccountCursor({beast::zero, 0}); + auto cursor = AccountCursor({.index = beast::zero, .hint = 0}); auto const rootIndex = owner; auto currentIndex = rootIndex; @@ -728,7 +728,7 @@ traverseOwnedNodes( ripple::SLE const hintDirSle{hintDirIt, hintIndex.key}; if (auto const& indexes = hintDirSle.getFieldV256(ripple::sfIndexes); - std::find(std::begin(indexes), std::end(indexes), hexMarker) == std::end(indexes)) { + std::ranges::find(indexes, hexMarker) == std::end(indexes)) { // the index specified by marker is not in the page specified by marker return Status(ripple::rpcINVALID_PARAMS, "Invalid marker."); } @@ -758,7 +758,7 @@ traverseOwnedNodes( } if (limit == 0) { - cursor = AccountCursor({keys.back(), currentPage}); + cursor = AccountCursor({.index = keys.back(), .hint = currentPage}); break; } // the next page @@ -787,7 +787,7 @@ traverseOwnedNodes( } if (limit == 0) { - cursor = AccountCursor({keys.back(), currentPage}); + cursor = AccountCursor({.index = keys.back(), .hint = currentPage}); break; } @@ -819,7 +819,7 @@ traverseOwnedNodes( if (limit == 0) return cursor; - return AccountCursor({beast::zero, 0}); + return AccountCursor({.index = beast::zero, .hint = 0}); } std::shared_ptr diff --git a/src/rpc/common/MetaProcessors.cpp b/src/rpc/common/MetaProcessors.cpp index 8cf28c799..37efc6a48 100644 --- a/src/rpc/common/MetaProcessors.cpp +++ b/src/rpc/common/MetaProcessors.cpp @@ -31,10 +31,10 @@ namespace rpc::meta { [[nodiscard]] MaybeError Section::verify(boost::json::value& value, std::string_view key) const { - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - auto& res = value.as_object().at(key.data()); + auto& res = value.as_object().at(key); // if it is not a json object, let other validators fail if (!res.is_object()) @@ -51,13 +51,13 @@ Section::verify(boost::json::value& value, std::string_view key) const [[nodiscard]] MaybeError ValidateArrayAt::verify(boost::json::value& value, std::string_view key) const { - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - if (not value.as_object().at(key.data()).is_array()) + if (not value.as_object().at(key).is_array()) return Error{Status{RippledError::rpcINVALID_PARAMS}}; - auto& arr = value.as_object().at(key.data()).as_array(); + auto& arr = value.as_object().at(key).as_array(); if (idx_ >= arr.size()) return Error{Status{RippledError::rpcINVALID_PARAMS}}; diff --git a/src/rpc/common/Modifiers.hpp b/src/rpc/common/Modifiers.hpp index 7c4819d1e..35a6b0d3f 100644 --- a/src/rpc/common/Modifiers.hpp +++ b/src/rpc/common/Modifiers.hpp @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -65,12 +66,12 @@ class Clamp final { { using boost::json::value_to; - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead // clamp to min_ and max_ - auto const oldValue = value_to(value.as_object().at(key.data())); - value.as_object()[key.data()] = std::clamp(oldValue, min_, max_); + auto const oldValue = value_to(value.as_object().at(key)); + value.as_object()[key] = std::clamp(oldValue, min_, max_); return {}; } @@ -92,14 +93,13 @@ struct ToLower final { [[nodiscard]] static MaybeError modify(boost::json::value& value, std::string_view key) { - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - if (not value.as_object().at(key.data()).is_string()) + if (not value.as_object().at(key).is_string()) return {}; // ignore for non-string types - value.as_object()[key.data()] = - util::toLower(boost::json::value_to(value.as_object().at(key.data()))); + value.as_object()[key] = util::toLower(boost::json::value_to(value.as_object().at(key))); return {}; } }; @@ -131,7 +131,7 @@ struct ToNumber final { return Error{Status{RippledError::rpcINVALID_PARAMS}}; // maybe a float try { - value.as_object()[key.data()] = std::stoi(strInt); + value.as_object()[key] = std::stoi(strInt); } catch (std::exception& e) { return Error{Status{RippledError::rpcINVALID_PARAMS}}; } @@ -171,7 +171,7 @@ class CustomModifier final { if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - return modifier_(value.as_object().at(key.data()), key); + return modifier_(value.as_object().at(key), key); }; }; diff --git a/src/rpc/common/Validators.hpp b/src/rpc/common/Validators.hpp index 36e499ada..6fcf37c3c 100644 --- a/src/rpc/common/Validators.hpp +++ b/src/rpc/common/Validators.hpp @@ -92,9 +92,9 @@ class NotSupported final { [[nodiscard]] MaybeError verify(boost::json::value const& value, std::string_view key) const { - if (value.is_object() and value.as_object().contains(key.data())) { + if (value.is_object() and value.as_object().contains(key)) { using boost::json::value_to; - auto const res = value_to(value.as_object().at(key.data())); + auto const res = value_to(value.as_object().at(key)); if (value_ == res) { return Error{Status{ RippledError::rpcNOT_SUPPORTED, @@ -122,7 +122,7 @@ class NotSupported<> final { [[nodiscard]] static MaybeError verify(boost::json::value const& value, std::string_view key) { - if (value.is_object() and value.as_object().contains(key.data())) + if (value.is_object() and value.as_object().contains(key)) return Error{Status{RippledError::rpcNOT_SUPPORTED, "Not supported field '" + std::string{key} + '\''}}; return {}; @@ -150,10 +150,10 @@ struct Type final { [[nodiscard]] MaybeError verify(boost::json::value const& value, std::string_view key) const { - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. If field is supposed to exist, let 'required' fail instead - auto const& res = value.as_object().at(key.data()); + auto const& res = value.as_object().at(key); auto const convertible = (checkType(res) || ...); if (not convertible) @@ -194,10 +194,10 @@ class Between final { { using boost::json::value_to; - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - auto const res = value_to(value.as_object().at(key.data())); + auto const res = value_to(value.as_object().at(key)); // TODO: may want a way to make this code more generic (e.g. use a free // function that can be overridden for this comparison) @@ -237,10 +237,10 @@ class Min final { { using boost::json::value_to; - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - auto const res = value_to(value.as_object().at(key.data())); + auto const res = value_to(value.as_object().at(key)); if (res < min_) return Error{Status{RippledError::rpcINVALID_PARAMS}}; @@ -278,10 +278,10 @@ class Max final { { using boost::json::value_to; - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - auto const res = value_to(value.as_object().at(key.data())); + auto const res = value_to(value.as_object().at(key)); if (res > max_) return Error{Status{RippledError::rpcINVALID_PARAMS}}; @@ -346,10 +346,10 @@ class EqualTo final { { using boost::json::value_to; - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - auto const res = value_to(value.as_object().at(key.data())); + auto const res = value_to(value.as_object().at(key)); if (res != original_) return Error{Status{RippledError::rpcINVALID_PARAMS}}; @@ -401,10 +401,10 @@ class OneOf final { { using boost::json::value_to; - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. field does not exist, let 'required' fail instead - auto const res = value_to(value.as_object().at(key.data())); + auto const res = value_to(value.as_object().at(key)); if (std::find(std::begin(options_), std::end(options_), res) == std::end(options_)) return Error{Status{RippledError::rpcINVALID_PARAMS, fmt::format("Invalid field '{}'.", key)}}; @@ -588,10 +588,10 @@ struct Hex256ItemType final { [[nodiscard]] static MaybeError verify(boost::json::value const& value, std::string_view key) { - if (not value.is_object() or not value.as_object().contains(key.data())) + if (not value.is_object() or not value.as_object().contains(key)) return {}; // ignore. If field is supposed to exist, let 'required' fail instead - auto const& res = value.as_object().at(key.data()); + auto const& res = value.as_object().at(key); // loop through each item in the array and make sure it is uint256 hex string for (auto const& elem : res.as_array()) { diff --git a/src/rpc/common/impl/HandlerProvider.cpp b/src/rpc/common/impl/HandlerProvider.cpp index a1be78015..1b905e560 100644 --- a/src/rpc/common/impl/HandlerProvider.cpp +++ b/src/rpc/common/impl/HandlerProvider.cpp @@ -78,41 +78,41 @@ ProductionHandlerProvider::ProductionHandlerProvider( Counters const& counters ) : handlerMap_{ - {"account_channels", {AccountChannelsHandler{backend}}}, - {"account_currencies", {AccountCurrenciesHandler{backend}}}, - {"account_info", {AccountInfoHandler{backend, amendmentCenter}}}, - {"account_lines", {AccountLinesHandler{backend}}}, - {"account_nfts", {AccountNFTsHandler{backend}}}, - {"account_objects", {AccountObjectsHandler{backend}}}, - {"account_offers", {AccountOffersHandler{backend}}}, - {"account_tx", {AccountTxHandler{backend}}}, - {"amm_info", {AMMInfoHandler{backend}}}, - {"book_changes", {BookChangesHandler{backend}}}, - {"book_offers", {BookOffersHandler{backend}}}, - {"deposit_authorized", {DepositAuthorizedHandler{backend}}}, - {"feature", {FeatureHandler{backend, amendmentCenter}}}, - {"gateway_balances", {GatewayBalancesHandler{backend}}}, - {"get_aggregate_price", {GetAggregatePriceHandler{backend}}}, - {"ledger", {LedgerHandler{backend}}}, - {"ledger_data", {LedgerDataHandler{backend}}}, - {"ledger_entry", {LedgerEntryHandler{backend}}}, - {"ledger_index", {LedgerIndexHandler{backend}, true}}, // clio only - {"ledger_range", {LedgerRangeHandler{backend}}}, - {"mpt_holders", {MPTHoldersHandler{backend}, true}}, // clio only - {"nfts_by_issuer", {NFTsByIssuerHandler{backend}, true}}, // clio only - {"nft_history", {NFTHistoryHandler{backend}, true}}, // clio only - {"nft_buy_offers", {NFTBuyOffersHandler{backend}}}, - {"nft_info", {NFTInfoHandler{backend}, true}}, // clio only - {"nft_sell_offers", {NFTSellOffersHandler{backend}}}, - {"noripple_check", {NoRippleCheckHandler{backend}}}, - {"ping", {PingHandler{}}}, - {"random", {RandomHandler{}}}, - {"server_info", {ServerInfoHandler{backend, subscriptionManager, balancer, etl, counters}}}, - {"transaction_entry", {TransactionEntryHandler{backend}}}, - {"tx", {TxHandler{backend, etl}}}, - {"subscribe", {SubscribeHandler{backend, subscriptionManager}}}, - {"unsubscribe", {UnsubscribeHandler{subscriptionManager}}}, - {"version", {VersionHandler{config}}}, + {"account_channels", {.handler = AccountChannelsHandler{backend}}}, + {"account_currencies", {.handler = AccountCurrenciesHandler{backend}}}, + {"account_info", {.handler = AccountInfoHandler{backend, amendmentCenter}}}, + {"account_lines", {.handler = AccountLinesHandler{backend}}}, + {"account_nfts", {.handler = AccountNFTsHandler{backend}}}, + {"account_objects", {.handler = AccountObjectsHandler{backend}}}, + {"account_offers", {.handler = AccountOffersHandler{backend}}}, + {"account_tx", {.handler = AccountTxHandler{backend}}}, + {"amm_info", {.handler = AMMInfoHandler{backend}}}, + {"book_changes", {.handler = BookChangesHandler{backend}}}, + {"book_offers", {.handler = BookOffersHandler{backend}}}, + {"deposit_authorized", {.handler = DepositAuthorizedHandler{backend}}}, + {"feature", {.handler = FeatureHandler{backend, amendmentCenter}}}, + {"gateway_balances", {.handler = GatewayBalancesHandler{backend}}}, + {"get_aggregate_price", {.handler = GetAggregatePriceHandler{backend}}}, + {"ledger", {.handler = LedgerHandler{backend}}}, + {"ledger_data", {.handler = LedgerDataHandler{backend}}}, + {"ledger_entry", {.handler = LedgerEntryHandler{backend}}}, + {"ledger_index", {.handler = LedgerIndexHandler{backend}, .isClioOnly = true}}, // clio only + {"ledger_range", {.handler = LedgerRangeHandler{backend}}}, + {"mpt_holders", {.handler = MPTHoldersHandler{backend}, .isClioOnly = true}}, // clio only + {"nfts_by_issuer", {.handler = NFTsByIssuerHandler{backend}, .isClioOnly = true}}, // clio only + {"nft_history", {.handler = NFTHistoryHandler{backend}, .isClioOnly = true}}, // clio only + {"nft_buy_offers", {.handler = NFTBuyOffersHandler{backend}}}, + {"nft_info", {.handler = NFTInfoHandler{backend}, .isClioOnly = true}}, // clio only + {"nft_sell_offers", {.handler = NFTSellOffersHandler{backend}}}, + {"noripple_check", {.handler = NoRippleCheckHandler{backend}}}, + {"ping", {.handler = PingHandler{}}}, + {"random", {.handler = RandomHandler{}}}, + {"server_info", {.handler = ServerInfoHandler{backend, subscriptionManager, balancer, etl, counters}}}, + {"transaction_entry", {.handler = TransactionEntryHandler{backend}}}, + {"tx", {.handler = TxHandler{backend, etl}}}, + {"subscribe", {.handler = SubscribeHandler{backend, subscriptionManager}}}, + {"unsubscribe", {.handler = UnsubscribeHandler{subscriptionManager}}}, + {"version", {.handler = VersionHandler{config}}}, } { } diff --git a/src/rpc/handlers/AccountInfo.cpp b/src/rpc/handlers/AccountInfo.cpp index 1b53a17a0..b9ee7c364 100644 --- a/src/rpc/handlers/AccountInfo.cpp +++ b/src/rpc/handlers/AccountInfo.cpp @@ -166,7 +166,7 @@ tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountInfoHandl boost::json::object acctFlags; for (auto const& lsf : lsFlags) - acctFlags[lsf.first.data()] = output.accountData.isFlag(lsf.second); + acctFlags[lsf.first] = output.accountData.isFlag(lsf.second); jv.as_object()[JS(account_flags)] = std::move(acctFlags); diff --git a/src/rpc/handlers/AccountObjects.cpp b/src/rpc/handlers/AccountObjects.cpp index 0de8ab4b0..e6d601277 100644 --- a/src/rpc/handlers/AccountObjects.cpp +++ b/src/rpc/handlers/AccountObjects.cpp @@ -121,9 +121,9 @@ void tag_invoke(boost::json::value_from_tag, boost::json::value& jv, AccountObjectsHandler::Output const& output) { auto objects = boost::json::array{}; - std::transform( - std::cbegin(output.accountObjects), - std::cend(output.accountObjects), + std::ranges::transform( + output.accountObjects, + std::back_inserter(objects), [](auto const& sle) { return toJson(sle); } ); diff --git a/src/rpc/handlers/AccountTx.cpp b/src/rpc/handlers/AccountTx.cpp index 9955d48f5..5339cb49a 100644 --- a/src/rpc/handlers/AccountTx.cpp +++ b/src/rpc/handlers/AccountTx.cpp @@ -132,7 +132,7 @@ AccountTxHandler::process(AccountTxHandler::Input input, Context const& ctx) con Output response; if (retCursor) - response.marker = {retCursor->ledgerSequence, retCursor->transactionIndex}; + response.marker = {.ledger = retCursor->ledgerSequence, .seq = retCursor->transactionIndex}; for (auto const& txnPlusMeta : blobs) { // over the range @@ -265,8 +265,8 @@ tag_invoke(boost::json::value_to_tag, boost::json::valu if (jsonObject.contains(JS(marker))) { input.marker = AccountTxHandler::Marker{ - jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(), - jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64() + .ledger = jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(), + .seq = jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64() }; } diff --git a/src/rpc/handlers/GatewayBalances.cpp b/src/rpc/handlers/GatewayBalances.cpp index adfba6448..3695d061c 100644 --- a/src/rpc/handlers/GatewayBalances.cpp +++ b/src/rpc/handlers/GatewayBalances.cpp @@ -146,7 +146,7 @@ GatewayBalancesHandler::process(GatewayBalancesHandler::Input input, Context con return Error{*status}; auto inHotbalances = [&](auto const& hw) { return output.hotBalances.contains(hw); }; - if (not std::all_of(input.hotWallets.begin(), input.hotWallets.end(), inHotbalances)) + if (not std::ranges::all_of(input.hotWallets, inHotbalances)) return Error{Status{ClioError::rpcINVALID_HOT_WALLET}}; output.accountID = input.account; @@ -233,9 +233,9 @@ tag_invoke(boost::json::value_to_tag, boost::json input.hotWallets.insert(*accountFromStringStrict(boost::json::value_to(jv.at(JS(hotwallet))))); } else { auto const& hotWallets = jv.at(JS(hotwallet)).as_array(); - std::transform( - hotWallets.begin(), - hotWallets.end(), + std::ranges::transform( + hotWallets, + std::inserter(input.hotWallets, input.hotWallets.begin()), [](auto const& hotWallet) { return *accountFromStringStrict(boost::json::value_to(hotWallet)); diff --git a/src/rpc/handlers/GetAggregatePrice.cpp b/src/rpc/handlers/GetAggregatePrice.cpp index 23de6eca9..525bdec6a 100644 --- a/src/rpc/handlers/GetAggregatePrice.cpp +++ b/src/rpc/handlers/GetAggregatePrice.cpp @@ -147,11 +147,11 @@ GetAggregatePriceHandler::process(GetAggregatePriceHandler::Input input, Context avg = divide(avg, ripple::STAmount{ripple::noIssue(), size, 0}, ripple::noIssue()); if (size > 1) { sd = std::accumulate(begin, end, sd, [&](ripple::Number const& acc, auto const& it) { - return acc + (it.first - avg) * (it.first - avg); + return acc + ((it.first - avg) * (it.first - avg)); }); sd = root2(sd / (size - 1)); } - return {avg, sd, size}; + return {.avg = avg, .sd = sd, .size = size}; }; out.extireStats = getStats(timestampPricesBiMap.right.begin(), timestampPricesBiMap.right.end()); diff --git a/src/rpc/handlers/LedgerEntry.cpp b/src/rpc/handlers/LedgerEntry.cpp index cdbb70260..a39470af3 100644 --- a/src/rpc/handlers/LedgerEntry.cpp +++ b/src/rpc/handlers/LedgerEntry.cpp @@ -348,11 +348,10 @@ tag_invoke(boost::json::value_to_tag, boost::json::va return ripple::keylet::credential(*subject, *issuer, ripple::Slice(credType->data(), credType->size())).key; }; - auto const indexFieldType = - std::find_if(indexFieldTypeMap.begin(), indexFieldTypeMap.end(), [&jsonObject](auto const& pair) { - auto const& [field, _] = pair; - return jsonObject.contains(field) && jsonObject.at(field).is_string(); - }); + auto const indexFieldType = std::ranges::find_if(indexFieldTypeMap, [&jsonObject](auto const& pair) { + auto const& [field, _] = pair; + return jsonObject.contains(field) && jsonObject.at(field).is_string(); + }); if (indexFieldType != indexFieldTypeMap.end()) { input.index = boost::json::value_to(jv.at(indexFieldType->first)); diff --git a/src/rpc/handlers/NFTHistory.cpp b/src/rpc/handlers/NFTHistory.cpp index ae4a66c1c..40a94ef4d 100644 --- a/src/rpc/handlers/NFTHistory.cpp +++ b/src/rpc/handlers/NFTHistory.cpp @@ -115,7 +115,7 @@ NFTHistoryHandler::process(NFTHistoryHandler::Input input, Context const& ctx) c auto const [blobs, retCursor] = txnsAndCursor; if (retCursor) - response.marker = {retCursor->ledgerSequence, retCursor->transactionIndex}; + response.marker = {.ledger = retCursor->ledgerSequence, .seq = retCursor->transactionIndex}; for (auto const& txnPlusMeta : blobs) { // over the range @@ -232,8 +232,8 @@ tag_invoke(boost::json::value_to_tag, boost::json::val if (jsonObject.contains(JS(marker))) { input.marker = NFTHistoryHandler::Marker{ - jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(), - jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64() + .ledger = jsonObject.at(JS(marker)).as_object().at(JS(ledger)).as_int64(), + .seq = jsonObject.at(JS(marker)).as_object().at(JS(seq)).as_int64() }; } diff --git a/src/rpc/handlers/NFTOffersCommon.cpp b/src/rpc/handlers/NFTOffersCommon.cpp index 3945916a5..e0ff6e8bb 100644 --- a/src/rpc/handlers/NFTOffersCommon.cpp +++ b/src/rpc/handlers/NFTOffersCommon.cpp @@ -57,7 +57,7 @@ using namespace ::rpc; namespace ripple { // TODO: move to some common serialization impl place -inline void +inline static void tag_invoke(boost::json::value_from_tag, boost::json::value& jv, SLE const& offer) { auto amount = ::toBoostJson(offer.getFieldAmount(sfAmount).getJson(JsonOptions::none)); @@ -165,7 +165,7 @@ NFTOffersHandlerBase::iterateOfferDirectory( offers.pop_back(); } - std::move(std::begin(offers), std::end(offers), std::back_inserter(output.offers)); + std::ranges::move(offers, std::back_inserter(output.offers)); return output; } diff --git a/src/util/JsonUtils.hpp b/src/util/JsonUtils.hpp index 76a079746..216f13090 100644 --- a/src/util/JsonUtils.hpp +++ b/src/util/JsonUtils.hpp @@ -40,7 +40,7 @@ namespace util { inline std::string toLower(std::string str) { - std::transform(std::begin(str), std::end(str), std::begin(str), [](unsigned char c) { return std::tolower(c); }); + std::ranges::transform(str, std::begin(str), [](unsigned char c) { return std::tolower(c); }); return str; } @@ -53,7 +53,7 @@ toLower(std::string str) inline std::string toUpper(std::string str) { - std::transform(std::begin(str), std::end(str), std::begin(str), [](unsigned char c) { return std::toupper(c); }); + std::ranges::transform(str, std::begin(str), [](unsigned char c) { return std::toupper(c); }); return str; } diff --git a/src/util/LedgerUtils.cpp b/src/util/LedgerUtils.cpp index e309964b5..7739af658 100644 --- a/src/util/LedgerUtils.cpp +++ b/src/util/LedgerUtils.cpp @@ -32,9 +32,7 @@ LedgerTypes::GetLedgerEntryTypeFromStr(std::string const& entryName) { static std::unordered_map typeMap = []() { std::unordered_map map; - std::for_each(std::begin(LEDGER_TYPES), std::end(LEDGER_TYPES), [&map](auto const& item) { - map[item.name] = item.type; - }); + std::ranges::for_each(LEDGER_TYPES, [&map](auto const& item) { map[item.name] = item.type; }); return map; }(); diff --git a/src/util/LedgerUtils.hpp b/src/util/LedgerUtils.hpp index 1a4b3ff04..002c4011b 100644 --- a/src/util/LedgerUtils.hpp +++ b/src/util/LedgerUtils.hpp @@ -127,9 +127,7 @@ class LedgerTypes { GetLedgerEntryTypeStrList() { std::array res{}; - std::transform(std::begin(LEDGER_TYPES), std::end(LEDGER_TYPES), std::begin(res), [](auto const& item) { - return item.name; - }); + std::ranges::transform(LEDGER_TYPES, std::begin(res), [](auto const& item) { return item.name; }); return res; } @@ -148,7 +146,7 @@ class LedgerTypes { auto constexpr accountOwnedCount = std::count_if(std::begin(LEDGER_TYPES), std::end(LEDGER_TYPES), filter); std::array res{}; auto it = std::begin(res); - std::for_each(std::begin(LEDGER_TYPES), std::end(LEDGER_TYPES), [&](auto const& item) { + std::ranges::for_each(LEDGER_TYPES, [&](auto const& item) { if (filter(item)) { *it = item.name; ++it; @@ -172,7 +170,7 @@ class LedgerTypes { auto constexpr deletionBlockersCount = std::count_if(std::begin(LEDGER_TYPES), std::end(LEDGER_TYPES), filter); std::array res{}; auto it = std::begin(res); - std::for_each(std::begin(LEDGER_TYPES), std::end(LEDGER_TYPES), [&](auto const& item) { + std::ranges::for_each(LEDGER_TYPES, [&](auto const& item) { if (filter(item)) { *it = item.type; ++it; diff --git a/src/util/TxUtils.cpp b/src/util/TxUtils.cpp index 6ae6e29d3..d0a7f61e2 100644 --- a/src/util/TxUtils.cpp +++ b/src/util/TxUtils.cpp @@ -34,6 +34,7 @@ namespace util { * @return The transaction types in lowercase */ [[nodiscard]] std::unordered_set const& +// NOLINTNEXTLINE(misc-use-internal-linkage) getTxTypesInLowercase() { static std::unordered_set const typesKeysInLowercase = []() { diff --git a/src/util/config/Config.cpp b/src/util/config/Config.cpp index a54b94e3b..f88ce89f5 100644 --- a/src/util/config/Config.cpp +++ b/src/util/config/Config.cpp @@ -110,7 +110,7 @@ Config::maybeArray(KeyType key) const ArrayType out; out.reserve(arr.size()); - std::transform(std::begin(arr), std::end(arr), std::back_inserter(out), [](auto&& element) { + std::ranges::transform(arr, std::back_inserter(out), [](auto&& element) { return Config{std::forward(element)}; }); return std::make_optional(std::move(out)); @@ -144,7 +144,7 @@ Config::arrayOrThrow(KeyType key, std::string_view err) const try { return maybeArray(key).value(); } catch (std::exception const&) { - throw std::runtime_error(err.data()); + throw std::runtime_error(std::string{err}); } } @@ -176,9 +176,7 @@ Config::array() const auto const& arr = store_.as_array(); out.reserve(arr.size()); - std::transform(std::cbegin(arr), std::cend(arr), std::back_inserter(out), [](auto const& element) { - return Config{element}; - }); + std::ranges::transform(arr, std::back_inserter(out), [](auto const& element) { return Config{element}; }); return out; } diff --git a/src/util/config/Config.hpp b/src/util/config/Config.hpp index 64403f5cd..85c17e8e6 100644 --- a/src/util/config/Config.hpp +++ b/src/util/config/Config.hpp @@ -187,7 +187,7 @@ class Config final { try { return maybeValue(key).value(); } catch (std::exception const&) { - throw std::runtime_error(err.data()); + throw std::runtime_error(std::string{err}); } } @@ -349,7 +349,7 @@ class Config final { try { return maybeValue().value(); } catch (std::exception const&) { - throw std::runtime_error(err.data()); + throw std::runtime_error(std::string{err}); } } diff --git a/src/util/newconfig/ConfigDescription.hpp b/src/util/newconfig/ConfigDescription.hpp index 1fbb5406c..094a8850e 100644 --- a/src/util/newconfig/ConfigDescription.hpp +++ b/src/util/newconfig/ConfigDescription.hpp @@ -63,64 +63,71 @@ struct ClioConfigDescription { private: static constexpr auto configDescription = std::array{ - KV{"database.type", "Type of database to use."}, - KV{"database.cassandra.contact_points", "Comma-separated list of contact points for Cassandra nodes."}, - KV{"database.cassandra.port", "Port number to connect to Cassandra."}, - KV{"database.cassandra.keyspace", "Keyspace to use in Cassandra."}, - KV{"database.cassandra.replication_factor", "Number of replicated nodes for Scylladb."}, - KV{"database.cassandra.table_prefix", "Prefix for Cassandra table names."}, - KV{"database.cassandra.max_write_requests_outstanding", "Maximum number of outstanding write requests."}, - KV{"database.cassandra.max_read_requests_outstanding", "Maximum number of outstanding read requests."}, - KV{"database.cassandra.threads", "Number of threads for Cassandra operations."}, - KV{"database.cassandra.core_connections_per_host", "Number of core connections per host for Cassandra."}, - KV{"database.cassandra.queue_size_io", "Queue size for I/O operations in Cassandra."}, - KV{"database.cassandra.write_batch_size", "Batch size for write operations in Cassandra."}, - KV{"etl_source.[].ip", "IP address of the ETL source."}, - KV{"etl_source.[].ws_port", "WebSocket port of the ETL source."}, - KV{"etl_source.[].grpc_port", "gRPC port of the ETL source."}, - KV{"forwarding.cache_timeout", "Timeout duration for the forwarding cache used in Rippled communication."}, - KV{"forwarding.request_timeout", "Timeout duration for the forwarding request used in Rippled communication."}, - KV{"dos_guard.[].whitelist", "List of IP addresses to whitelist for DOS protection."}, - KV{"dos_guard.max_fetches", "Maximum number of fetch operations allowed by DOS guard."}, - KV{"dos_guard.max_connections", "Maximum number of concurrent connections allowed by DOS guard."}, - KV{"dos_guard.max_requests", "Maximum number of requests allowed by DOS guard."}, - KV{"dos_guard.sweep_interval", "Interval in seconds for DOS guard to sweep/clear its state."}, - KV{"cache.peers.[].ip", "IP address of peer nodes to cache."}, - KV{"cache.peers.[].port", "Port number of peer nodes to cache."}, - KV{"server.ip", "IP address of the Clio HTTP server."}, - KV{"server.port", "Port number of the Clio HTTP server."}, - KV{"server.max_queue_size", "Maximum size of the server's request queue."}, - KV{"server.workers", "Maximum number of threads for server to run with."}, - KV{"server.local_admin", "Indicates if the server should run with admin privileges."}, - KV{"server.admin_password", "Password for Clio admin-only APIs."}, - KV{"prometheus.enabled", "Enable or disable Prometheus metrics."}, - KV{"prometheus.compress_reply", "Enable or disable compression of Prometheus responses."}, - KV{"io_threads", "Number of I/O threads."}, - KV{"cache.num_diffs", "Number of diffs to cache."}, - KV{"cache.num_markers", "Number of markers to cache."}, - KV{"cache.num_cursors_from_diff", "Num of cursors that are different."}, - KV{"cache.num_cursors_from_account", "Number of cursors from an account."}, - KV{"cache.page_fetch_size", "Page fetch size for cache operations."}, - KV{"cache.load", "Cache loading strategy ('sync' or 'async')."}, - KV{"log_channels.[].channel", "Name of the log channel."}, - KV{"log_channels.[].log_level", "Log level for the log channel."}, - KV{"log_level", "General logging level of Clio."}, - KV{"log_format", "Format string for log messages."}, - KV{"log_to_console", "Enable or disable logging to console."}, - KV{"log_directory", "Directory path for log files."}, - KV{"log_rotation_size", "Log rotation size in megabytes."}, - KV{"log_directory_max_size", "Maximum size of the log directory in megabytes."}, - KV{"log_rotation_hour_interval", "Interval in hours for log rotation."}, - KV{"log_tag_style", "Style for log tags."}, - KV{"extractor_threads", "Number of extractor threads."}, - KV{"read_only", "Indicates if the server should have read-only privileges."}, - KV{"txn_threshold", "Transaction threshold value."}, - KV{"start_sequence", "Starting ledger index."}, - KV{"finish_sequence", "Ending ledger index."}, - KV{"ssl_cert_file", "Path to the SSL certificate file."}, - KV{"ssl_key_file", "Path to the SSL key file."}, - KV{"api_version.min", "Minimum API version."}, - KV{"api_version.max", "Maximum API version."} + KV{.key = "database.type", .value = "Type of database to use."}, + KV{.key = "database.cassandra.contact_points", + .value = "Comma-separated list of contact points for Cassandra nodes."}, + KV{.key = "database.cassandra.port", .value = "Port number to connect to Cassandra."}, + KV{.key = "database.cassandra.keyspace", .value = "Keyspace to use in Cassandra."}, + KV{.key = "database.cassandra.replication_factor", .value = "Number of replicated nodes for Scylladb."}, + KV{.key = "database.cassandra.table_prefix", .value = "Prefix for Cassandra table names."}, + KV{.key = "database.cassandra.max_write_requests_outstanding", + .value = "Maximum number of outstanding write requests."}, + KV{.key = "database.cassandra.max_read_requests_outstanding", + .value = "Maximum number of outstanding read requests."}, + KV{.key = "database.cassandra.threads", .value = "Number of threads for Cassandra operations."}, + KV{.key = "database.cassandra.core_connections_per_host", + .value = "Number of core connections per host for Cassandra."}, + KV{.key = "database.cassandra.queue_size_io", .value = "Queue size for I/O operations in Cassandra."}, + KV{.key = "database.cassandra.write_batch_size", .value = "Batch size for write operations in Cassandra."}, + KV{.key = "etl_source.[].ip", .value = "IP address of the ETL source."}, + KV{.key = "etl_source.[].ws_port", .value = "WebSocket port of the ETL source."}, + KV{.key = "etl_source.[].grpc_port", .value = "gRPC port of the ETL source."}, + KV{.key = "forwarding.cache_timeout", + .value = "Timeout duration for the forwarding cache used in Rippled communication."}, + KV{.key = "forwarding.request_timeout", + .value = "Timeout duration for the forwarding request used in Rippled communication."}, + KV{.key = "dos_guard.[].whitelist", .value = "List of IP addresses to whitelist for DOS protection."}, + KV{.key = "dos_guard.max_fetches", .value = "Maximum number of fetch operations allowed by DOS guard."}, + KV{.key = "dos_guard.max_connections", .value = "Maximum number of concurrent connections allowed by DOS guard." + }, + KV{.key = "dos_guard.max_requests", .value = "Maximum number of requests allowed by DOS guard."}, + KV{.key = "dos_guard.sweep_interval", .value = "Interval in seconds for DOS guard to sweep/clear its state."}, + KV{.key = "cache.peers.[].ip", .value = "IP address of peer nodes to cache."}, + KV{.key = "cache.peers.[].port", .value = "Port number of peer nodes to cache."}, + KV{.key = "server.ip", .value = "IP address of the Clio HTTP server."}, + KV{.key = "server.port", .value = "Port number of the Clio HTTP server."}, + KV{.key = "server.max_queue_size", .value = "Maximum size of the server's request queue."}, + KV{.key = "server.workers", .value = "Maximum number of threads for server to run with."}, + KV{.key = "server.local_admin", .value = "Indicates if the server should run with admin privileges."}, + KV{.key = "server.admin_password", .value = "Password for Clio admin-only APIs."}, + KV{.key = "prometheus.enabled", .value = "Enable or disable Prometheus metrics."}, + KV{.key = "prometheus.compress_reply", .value = "Enable or disable compression of Prometheus responses."}, + KV{.key = "io_threads", .value = "Number of I/O threads."}, + KV{.key = "cache.num_diffs", .value = "Number of diffs to cache."}, + KV{.key = "cache.num_markers", .value = "Number of markers to cache."}, + KV{.key = "cache.num_cursors_from_diff", .value = "Num of cursors that are different."}, + KV{.key = "cache.num_cursors_from_account", .value = "Number of cursors from an account."}, + KV{.key = "cache.page_fetch_size", .value = "Page fetch size for cache operations."}, + KV{.key = "cache.load", .value = "Cache loading strategy ('sync' or 'async')."}, + KV{.key = "log_channels.[].channel", .value = "Name of the log channel."}, + KV{.key = "log_channels.[].log_level", .value = "Log level for the log channel."}, + KV{.key = "log_level", .value = "General logging level of Clio."}, + KV{.key = "log_format", .value = "Format string for log messages."}, + KV{.key = "log_to_console", .value = "Enable or disable logging to console."}, + KV{.key = "log_directory", .value = "Directory path for log files."}, + KV{.key = "log_rotation_size", .value = "Log rotation size in megabytes."}, + KV{.key = "log_directory_max_size", .value = "Maximum size of the log directory in megabytes."}, + KV{.key = "log_rotation_hour_interval", .value = "Interval in hours for log rotation."}, + KV{.key = "log_tag_style", .value = "Style for log tags."}, + KV{.key = "extractor_threads", .value = "Number of extractor threads."}, + KV{.key = "read_only", .value = "Indicates if the server should have read-only privileges."}, + KV{.key = "txn_threshold", .value = "Transaction threshold value."}, + KV{.key = "start_sequence", .value = "Starting ledger index."}, + KV{.key = "finish_sequence", .value = "Ending ledger index."}, + KV{.key = "ssl_cert_file", .value = "Path to the SSL certificate file."}, + KV{.key = "ssl_key_file", .value = "Path to the SSL key file."}, + KV{.key = "api_version.min", .value = "Minimum API version."}, + KV{.key = "api_version.max", .value = "Maximum API version."} }; }; diff --git a/src/util/prometheus/Label.cpp b/src/util/prometheus/Label.cpp index 176fd1df4..60be4bfee 100644 --- a/src/util/prometheus/Label.cpp +++ b/src/util/prometheus/Label.cpp @@ -23,7 +23,6 @@ #include #include -#include #include #include @@ -33,18 +32,6 @@ Label::Label(std::string name, std::string value) : name_(std::move(name)), valu { } -bool -Label::operator<(Label const& rhs) const -{ - return std::tie(name_, value_) < std::tie(rhs.name_, rhs.value_); -} - -bool -Label::operator==(Label const& rhs) const -{ - return std::tie(name_, value_) == std::tie(rhs.name_, rhs.value_); -} - std::string Label::serialize() const { @@ -73,7 +60,7 @@ Label::serialize() const Labels::Labels(std::vector