From 0ee6f15b3501b089ad68ad862248a364663c575c Mon Sep 17 00:00:00 2001 From: RichardAH Date: Sat, 9 Jul 2022 19:37:13 +0200 Subject: [PATCH 001/508] Create `CONTRIBUTING.md`, list maintainers and outline repo policies (#4214) --- CONTRIBUTING.md | 67 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..44530b4f7d7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing +The XRP Ledger has many and diverse stakeholders, and everyone deserves a chance to contribute meaningful changes to the code that runs the XRPL. +To contribute, please: +1. Fork the repository under your own user. +2. Create a new branch on which to write your changes. Please note that changes which alter transaction processing must be composed via and guarded using [Amendments](https://xrpl.org/amendments.html). Changes which are _read only_ i.e. RPC, or changes which are only refactors and maintain the existing behaviour do not need to be made through an Amendment. +3. Write and test your code. +4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate. +5. Write test cases for your code and include those in `src/test` such that they are runnable from the command line using `./rippled -u`. (Some changes will not be able to be tested this way.) +6. Ensure your code passes automated checks (e.g. clang-format and levelization.) +7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change.) +8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template. + +# Major Changes +If your code change is a major feature, a breaking change or in some other way makes a significant alteration to the way the XRPL will operate, then you must first write an XLS document (XRP Ledger Standard) describing your change. +To do this: +1. Go to [XLS Standards](https://github.com/XRPLF/XRPL-Standards/discussions). +2. Choose the next available standard number. +3. Open a discussion with the appropriate title to propose your draft standard. +4. Link your XLS in your PR. + +# Style guide +This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments. + +## Formatting +All code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain. +To change your code to conform use `clang-format -i `. + +## Avoid +1. Proliferation of nearly identical code. +2. Proliferation of new files and classes. +3. Complex inheritance and complex OOP patterns. +4. Unmanaged memory allocation and raw pointers. +5. Macros and non-trivial templates (unless they add significant value.) +6. Lambda patterns (unless these add significant value.) +7. CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments. +8. Importing new libraries unless there is a very good reason to do so. + +## Seek to +9. Extend functionality of existing code rather than creating new code. +10. Prefer readability over terseness where important logic is concerned. +11. Inline functions that are not used or are not likely to be used elsewhere in the codebase. +12. Use clear and self-explanatory names for functions, variables, structs and classes. +13. Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders. +14. Provide as many comments as you feel that a competent programmer would need to understand what your code does. + +# Maintainers +Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc. + +## Code Review +New contributors' PRs must be reviewed by at least two of the maintainers. Well established prior contributors can be reviewed by a single maintainer. + +## Adding and Removing +New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected. + +Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote. + +## Existing Maintainers +* [JoelKatz](https://github.com/JoelKatz) (Ripple) +* [Manojsdoshi](https://github.com/manojsdoshi) (Ripple) +* [N3tc4t](https://github.com/n3tc4t) (XRPL Labs) +* [Nikolaos D Bougalis](https://github.com/nbougalis) (Ripple) +* [Nixer89](https://github.com/nixer89) (XRP Ledger Foundation) +* [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation) +* [Seelabs](https://github.com/seelabs) (Ripple) +* [Silkjaer](https://github.com/Silkjaer) (XRP Ledger Foundation) +* [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation) +* [Ximinez](https://github.com/ximinez) (Ripple) From 610436d737a03925febde7e6aab7e81add06b3ab Mon Sep 17 00:00:00 2001 From: RichardAH Date: Mon, 11 Jul 2022 09:29:31 +0200 Subject: [PATCH 002/508] Add book_changes rpc (#4212) --- src/ripple/app/main/Main.cpp | 1 + src/ripple/app/misc/NetworkOPs.cpp | 47 ++++- src/ripple/net/InfoSub.h | 5 + src/ripple/net/impl/RPCCall.cpp | 1 + src/ripple/protocol/jss.h | 8 + src/ripple/rpc/BookChanges.h | 213 ++++++++++++++++++++++ src/ripple/rpc/handlers/BookOffers.cpp | 13 ++ src/ripple/rpc/handlers/Handlers.h | 2 + src/ripple/rpc/handlers/LedgerRequest.cpp | 123 +------------ src/ripple/rpc/handlers/Subscribe.cpp | 4 + src/ripple/rpc/impl/Handler.cpp | 1 + src/ripple/rpc/impl/RPCHandler.cpp | 5 + src/ripple/rpc/impl/RPCHelpers.cpp | 116 ++++++++++++ src/ripple/rpc/impl/RPCHelpers.h | 6 + 14 files changed, 430 insertions(+), 115 deletions(-) create mode 100644 src/ripple/rpc/BookChanges.h diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index c08836e79e7..64b6464a4c6 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -135,6 +135,7 @@ printHelp(const po::options_description& desc) "[strict]\n" " account_tx accountID [ledger_min [ledger_max [limit " "[offset]]]] [binary] [count] [descending]\n" + " book_changes []\n" " book_offers [ " "[ [ []]]]]\n" " can_delete [||now|always|never]\n" diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 9203a83ad41..6ff644ba846 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -62,6 +62,7 @@ #include #include #include +#include #include #include #include @@ -502,6 +503,11 @@ class NetworkOPsImp final : public NetworkOPs bool unsubLedger(std::uint64_t uListener) override; + bool + subBookChanges(InfoSub::ref ispListener) override; + bool + unsubBookChanges(std::uint64_t uListener) override; + bool subServer(InfoSub::ref ispListener, Json::Value& jvResult, bool admin) override; @@ -743,9 +749,10 @@ class NetworkOPsImp final : public NetworkOPs sValidations, // Received validations. sPeerStatus, // Peer status changes. sConsensusPhase, // Consensus phase + sBookChanges, // Per-ledger order book changes - sLastEntry = sConsensusPhase // as this name implies, any new entry - // must be ADDED ABOVE this one + sLastEntry = sBookChanges // as this name implies, any new entry + // must be ADDED ABOVE this one }; std::array mStreamMaps; @@ -2898,6 +2905,24 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) } } + if (!mStreamMaps[sBookChanges].empty()) + { + Json::Value jvObj = ripple::RPC::computeBookChanges(lpAccepted); + + auto it = mStreamMaps[sBookChanges].begin(); + while (it != mStreamMaps[sBookChanges].end()) + { + InfoSub::pointer p = it->second.lock(); + if (p) + { + p->send(jvObj, true); + ++it; + } + else + it = mStreamMaps[sBookChanges].erase(it); + } + } + { static bool firstTime = true; if (firstTime) @@ -3876,6 +3901,16 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult) .second; } +// <-- bool: true=added, false=already there +bool +NetworkOPsImp::subBookChanges(InfoSub::ref isrListener) +{ + std::lock_guard sl(mSubLock); + return mStreamMaps[sBookChanges] + .emplace(isrListener->getSeq(), isrListener) + .second; +} + // <-- bool: true=erased, false=was not there bool NetworkOPsImp::unsubLedger(std::uint64_t uSeq) @@ -3884,6 +3919,14 @@ NetworkOPsImp::unsubLedger(std::uint64_t uSeq) return mStreamMaps[sLedger].erase(uSeq); } +// <-- bool: true=erased, false=was not there +bool +NetworkOPsImp::unsubBookChanges(std::uint64_t uSeq) +{ + std::lock_guard sl(mSubLock); + return mStreamMaps[sBookChanges].erase(uSeq); +} + // <-- bool: true=added, false=already there bool NetworkOPsImp::subManifests(InfoSub::ref isrListener) diff --git a/src/ripple/net/InfoSub.h b/src/ripple/net/InfoSub.h index 3c170669bab..fb44e23b720 100644 --- a/src/ripple/net/InfoSub.h +++ b/src/ripple/net/InfoSub.h @@ -128,6 +128,11 @@ class InfoSub : public CountedObject virtual bool unsubLedger(std::uint64_t uListener) = 0; + virtual bool + subBookChanges(ref ispListener) = 0; + virtual bool + unsubBookChanges(std::uint64_t uListener) = 0; + virtual bool subManifests(ref ispListener) = 0; virtual bool diff --git a/src/ripple/net/impl/RPCCall.cpp b/src/ripple/net/impl/RPCCall.cpp index 820f25ddfc2..334ca8693e3 100644 --- a/src/ripple/net/impl/RPCCall.cpp +++ b/src/ripple/net/impl/RPCCall.cpp @@ -1242,6 +1242,7 @@ class RPCParser {"account_objects", &RPCParser::parseAccountItems, 1, 5}, {"account_offers", &RPCParser::parseAccountItems, 1, 4}, {"account_tx", &RPCParser::parseAccountTransactions, 1, 8}, + {"book_changes", &RPCParser::parseLedgerId, 1, 1}, {"book_offers", &RPCParser::parseBookOffers, 2, 7}, {"can_delete", &RPCParser::parseCanDelete, 0, 1}, {"channel_authorize", &RPCParser::parseChannelAuthorize, 3, 4}, diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 0dc413e6d20..1c5bf8463b0 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -166,11 +166,13 @@ JSS(build_path); // in: TransactionSign JSS(build_version); // out: NetworkOPs JSS(cancel_after); // out: AccountChannels JSS(can_delete); // out: CanDelete +JSS(changes); // out: BookChanges JSS(channel_id); // out: AccountChannels JSS(channels); // out: AccountChannels JSS(check); // in: AccountObjects JSS(check_nodes); // in: LedgerCleaner JSS(clear); // in/out: FetchInfo +JSS(close); // out: BookChanges JSS(close_flags); // out: LedgerToJson JSS(close_time); // in: Application, out: NetworkOPs, // RCLCxPeerPos, LedgerToJson @@ -193,6 +195,8 @@ JSS(converge_time_s); // out: NetworkOPs JSS(cookie); // out: NetworkOPs JSS(count); // in: AccountTx*, ValidatorList JSS(counters); // in/out: retrieve counters +JSS(currency_a); // out: BookChanges +JSS(currency_b); // out: BookChanges JSS(currentShard); // out: NodeToShardStatus JSS(currentShardIndex); // out: NodeToShardStatus JSS(currency); // in: paths/PathRequest, STAmount @@ -282,6 +286,7 @@ JSS(hashes); // in: AccountObjects JSS(have_header); // out: InboundLedger JSS(have_state); // out: InboundLedger JSS(have_transactions); // out: InboundLedger +JSS(high); // out: BookChanges JSS(highest_sequence); // out: AccountInfo JSS(highest_ticket); // out: AccountInfo JSS(historical_perminute); // historical_perminute. @@ -363,6 +368,7 @@ JSS(load_fee); // out: LoadFeeTrackImp, NetworkOPs JSS(local); // out: resource/Logic.h JSS(local_txs); // out: GetCounts JSS(local_static_keys); // out: ValidatorList +JSS(low); // out: BookChanges JSS(lowest_sequence); // out: AccountInfo JSS(lowest_ticket); // out: AccountInfo JSS(majority); // out: RPC feature @@ -639,6 +645,8 @@ JSS(validator_sites); // out: ValidatorSites JSS(value); // out: STAmount JSS(version); // out: RPCVersion JSS(vetoed); // out: AmendmentTableImpl +JSS(volume_a); // out: BookChanges +JSS(volume_b); // out: BookChanges JSS(vote); // in: Feature JSS(warning); // rpc: JSS(warnings); // out: server_info, server_state diff --git a/src/ripple/rpc/BookChanges.h b/src/ripple/rpc/BookChanges.h new file mode 100644 index 00000000000..11f8e704127 --- /dev/null +++ b/src/ripple/rpc/BookChanges.h @@ -0,0 +1,213 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2019 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_RPC_BOOKCHANGES_H_INCLUDED +#define RIPPLE_RPC_BOOKCAHNGES_H_INCLUDED + +namespace Json { +class Value; +} + +namespace ripple { + +class ReadView; +class Transaction; +class TxMeta; +class STTx; + +namespace RPC { + +template +Json::Value +computeBookChanges(std::shared_ptr const& lpAccepted) +{ + std::map< + std::string, + std::tuple< + STAmount, // side A volume + STAmount, // side B volume + STAmount, // high rate + STAmount, // low rate + STAmount, // open rate + STAmount // close rate + >> + tally; + + for (auto& tx : lpAccepted->txs) + { + if (!tx.first || !tx.second || + !tx.first->isFieldPresent(sfTransactionType)) + continue; + + std::optional offerCancel; + uint16_t tt = tx.first->getFieldU16(sfTransactionType); + switch (tt) + { + case ttOFFER_CANCEL: + case ttOFFER_CREATE: { + if (tx.first->isFieldPresent(sfOfferSequence)) + offerCancel = tx.first->getFieldU32(sfOfferSequence); + break; + } + // in future if any other ways emerge to cancel an offer + // this switch makes them easy to add + default: + break; + } + + for (auto const& node : tx.second->getFieldArray(sfAffectedNodes)) + { + SField const& metaType = node.getFName(); + uint16_t nodeType = node.getFieldU16(sfLedgerEntryType); + + // we only care about ltOFFER objects being modified or + // deleted + if (nodeType != ltOFFER || metaType == sfCreatedNode) + continue; + + // if either FF or PF are missing we can't compute + // but generally these are cancelled rather than crossed + // so skipping them is consistent + if (!node.isFieldPresent(sfFinalFields) || + !node.isFieldPresent(sfPreviousFields)) + continue; + + STObject& finalFields = (const_cast(node)) + .getField(sfFinalFields) + .downcast(); + + STObject& previousFields = (const_cast(node)) + .getField(sfPreviousFields) + .downcast(); + + // defensive case that should never be hit + if (!finalFields.isFieldPresent(sfTakerGets) || + !finalFields.isFieldPresent(sfTakerPays) || + !previousFields.isFieldPresent(sfTakerGets) || + !previousFields.isFieldPresent(sfTakerPays)) + continue; + + // filter out any offers deleted by explicit offer cancels + if (metaType == sfDeletedNode && offerCancel && + finalFields.getFieldU32(sfSequence) == *offerCancel) + continue; + + // compute the difference in gets and pays actually + // affected onto the offer + STAmount deltaGets = finalFields.getFieldAmount(sfTakerGets) - + previousFields.getFieldAmount(sfTakerGets); + STAmount deltaPays = finalFields.getFieldAmount(sfTakerPays) - + previousFields.getFieldAmount(sfTakerPays); + + std::string g{to_string(deltaGets.issue())}; + std::string p{to_string(deltaPays.issue())}; + + bool const noswap = + isXRP(deltaGets) ? true : (isXRP(deltaPays) ? false : (g < p)); + + STAmount first = noswap ? deltaGets : deltaPays; + STAmount second = noswap ? deltaPays : deltaGets; + + // defensively programmed, should (probably) never happen + if (second == beast::zero) + continue; + + STAmount rate = divide(first, second, noIssue()); + + if (first < beast::zero) + first = -first; + + if (second < beast::zero) + second = -second; + + std::stringstream ss; + if (noswap) + ss << g << "|" << p; + else + ss << p << "|" << g; + + std::string key{ss.str()}; + + if (tally.find(key) == tally.end()) + tally[key] = { + first, // side A vol + second, // side B vol + rate, // high + rate, // low + rate, // open + rate // close + }; + else + { + // increment volume + auto& entry = tally[key]; + + std::get<0>(entry) += first; // side A vol + std::get<1>(entry) += second; // side B vol + + if (std::get<2>(entry) < rate) // high + std::get<2>(entry) = rate; + + if (std::get<3>(entry) > rate) // low + std::get<3>(entry) = rate; + + std::get<5>(entry) = rate; // close + } + } + } + + Json::Value jvObj(Json::objectValue); + jvObj[jss::type] = "bookChanges"; + jvObj[jss::ledger_index] = lpAccepted->info().seq; + jvObj[jss::ledger_hash] = to_string(lpAccepted->info().hash); + jvObj[jss::ledger_time] = Json::Value::UInt( + lpAccepted->info().closeTime.time_since_epoch().count()); + + jvObj[jss::changes] = Json::arrayValue; + + for (auto const& entry : tally) + { + Json::Value& inner = jvObj[jss::changes].append(Json::objectValue); + + STAmount volA = std::get<0>(entry.second); + STAmount volB = std::get<1>(entry.second); + + inner[jss::currency_a] = + (isXRP(volA) ? "XRP_drops" : to_string(volA.issue())); + inner[jss::currency_b] = + (isXRP(volB) ? "XRP_drops" : to_string(volB.issue())); + + inner[jss::volume_a] = + (isXRP(volA) ? to_string(volA.xrp()) : to_string(volA.iou())); + inner[jss::volume_b] = + (isXRP(volB) ? to_string(volB.xrp()) : to_string(volB.iou())); + + inner[jss::high] = to_string(std::get<2>(entry.second).iou()); + inner[jss::low] = to_string(std::get<3>(entry.second).iou()); + inner[jss::open] = to_string(std::get<4>(entry.second).iou()); + inner[jss::close] = to_string(std::get<5>(entry.second).iou()); + } + + return jvObj; +} + +} // namespace RPC +} // namespace ripple + +#endif diff --git a/src/ripple/rpc/handlers/BookOffers.cpp b/src/ripple/rpc/handlers/BookOffers.cpp index a42a0de9924..e85b6029ba6 100644 --- a/src/ripple/rpc/handlers/BookOffers.cpp +++ b/src/ripple/rpc/handlers/BookOffers.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -200,4 +201,16 @@ doBookOffers(RPC::JsonContext& context) return jvResult; } +Json::Value +doBookChanges(RPC::JsonContext& context) +{ + auto res = RPC::getLedgerByContext(context); + + if (std::holds_alternative(res)) + return std::get(res); + + return RPC::computeBookChanges( + std::get>(res)); +} + } // namespace ripple diff --git a/src/ripple/rpc/handlers/Handlers.h b/src/ripple/rpc/handlers/Handlers.h index 1bb3be05654..3c00899d734 100644 --- a/src/ripple/rpc/handlers/Handlers.h +++ b/src/ripple/rpc/handlers/Handlers.h @@ -43,6 +43,8 @@ doAccountTxJson(RPC::JsonContext&); Json::Value doBookOffers(RPC::JsonContext&); Json::Value +doBookChanges(RPC::JsonContext&); +Json::Value doBlackList(RPC::JsonContext&); Json::Value doCanDelete(RPC::JsonContext&); diff --git a/src/ripple/rpc/handlers/LedgerRequest.cpp b/src/ripple/rpc/handlers/LedgerRequest.cpp index 3fe238e9212..88d26176ddc 100644 --- a/src/ripple/rpc/handlers/LedgerRequest.cpp +++ b/src/ripple/rpc/handlers/LedgerRequest.cpp @@ -26,7 +26,9 @@ #include #include #include +#include #include +#include namespace ripple { @@ -37,122 +39,17 @@ namespace ripple { Json::Value doLedgerRequest(RPC::JsonContext& context) { - if (context.app.config().reporting()) - return rpcError(rpcREPORTING_UNSUPPORTED); + auto res = getLedgerByContext(context); - auto const hasHash = context.params.isMember(jss::ledger_hash); - auto const hasIndex = context.params.isMember(jss::ledger_index); - std::uint32_t ledgerIndex = 0; + if (std::holds_alternative(res)) + return std::get(res); - auto& ledgerMaster = context.app.getLedgerMaster(); - LedgerHash ledgerHash; + auto const& ledger = std::get>(res); - if ((hasHash && hasIndex) || !(hasHash || hasIndex)) - { - return RPC::make_param_error( - "Exactly one of ledger_hash and ledger_index can be set."); - } - - context.loadType = Resource::feeHighBurdenRPC; - - if (hasHash) - { - auto const& jsonHash = context.params[jss::ledger_hash]; - if (!jsonHash.isString() || !ledgerHash.parseHex(jsonHash.asString())) - return RPC::invalid_field_error(jss::ledger_hash); - } - else - { - auto const& jsonIndex = context.params[jss::ledger_index]; - if (!jsonIndex.isInt()) - return RPC::invalid_field_error(jss::ledger_index); - - // We need a validated ledger to get the hash from the sequence - if (ledgerMaster.getValidatedLedgerAge() > - RPC::Tuning::maxValidatedLedgerAge) - { - if (context.apiVersion == 1) - return rpcError(rpcNO_CURRENT); - return rpcError(rpcNOT_SYNCED); - } - - ledgerIndex = jsonIndex.asInt(); - auto ledger = ledgerMaster.getValidatedLedger(); - - if (ledgerIndex >= ledger->info().seq) - return RPC::make_param_error("Ledger index too large"); - if (ledgerIndex <= 0) - return RPC::make_param_error("Ledger index too small"); - - auto const j = context.app.journal("RPCHandler"); - // Try to get the hash of the desired ledger from the validated ledger - auto neededHash = hashOfSeq(*ledger, ledgerIndex, j); - if (!neededHash) - { - // Find a ledger more likely to have the hash of the desired ledger - auto const refIndex = getCandidateLedger(ledgerIndex); - auto refHash = hashOfSeq(*ledger, refIndex, j); - assert(refHash); - - ledger = ledgerMaster.getLedgerByHash(*refHash); - if (!ledger) - { - // We don't have the ledger we need to figure out which ledger - // they want. Try to get it. - - if (auto il = context.app.getInboundLedgers().acquire( - *refHash, refIndex, InboundLedger::Reason::GENERIC)) - { - Json::Value jvResult = RPC::make_error( - rpcLGR_NOT_FOUND, - "acquiring ledger containing requested index"); - jvResult[jss::acquiring] = - getJson(LedgerFill(*il, &context)); - return jvResult; - } - - if (auto il = context.app.getInboundLedgers().find(*refHash)) - { - Json::Value jvResult = RPC::make_error( - rpcLGR_NOT_FOUND, - "acquiring ledger containing requested index"); - jvResult[jss::acquiring] = il->getJson(0); - return jvResult; - } - - // Likely the app is shutting down - return Json::Value(); - } - - neededHash = hashOfSeq(*ledger, ledgerIndex, j); - } - assert(neededHash); - ledgerHash = neededHash ? *neededHash : beast::zero; // kludge - } - - // Try to get the desired ledger - // Verify all nodes even if we think we have it - auto ledger = context.app.getInboundLedgers().acquire( - ledgerHash, ledgerIndex, InboundLedger::Reason::GENERIC); - - // In standalone mode, accept the ledger from the ledger cache - if (!ledger && context.app.config().standalone()) - ledger = ledgerMaster.getLedgerByHash(ledgerHash); - - if (ledger) - { - // We already had the entire ledger verified/acquired - Json::Value jvResult; - jvResult[jss::ledger_index] = ledger->info().seq; - addJson(jvResult, {*ledger, &context, 0}); - return jvResult; - } - - if (auto il = context.app.getInboundLedgers().find(ledgerHash)) - return il->getJson(0); - - return RPC::make_error( - rpcNOT_READY, "findCreate failed to return an inbound ledger"); + Json::Value jvResult; + jvResult[jss::ledger_index] = ledger->info().seq; + addJson(jvResult, {*ledger, &context, 0}); + return jvResult; } } // namespace ripple diff --git a/src/ripple/rpc/handlers/Subscribe.cpp b/src/ripple/rpc/handlers/Subscribe.cpp index a3c1d1e1c21..f17aa62b626 100644 --- a/src/ripple/rpc/handlers/Subscribe.cpp +++ b/src/ripple/rpc/handlers/Subscribe.cpp @@ -136,6 +136,10 @@ doSubscribe(RPC::JsonContext& context) { context.netOps.subLedger(ispSub, jvResult); } + else if (streamName == "book_changes") + { + context.netOps.subBookChanges(ispSub); + } else if (streamName == "manifests") { context.netOps.subManifests(ispSub); diff --git a/src/ripple/rpc/impl/Handler.cpp b/src/ripple/rpc/impl/Handler.cpp index 15f2ea8f856..17a15eed31b 100644 --- a/src/ripple/rpc/impl/Handler.cpp +++ b/src/ripple/rpc/impl/Handler.cpp @@ -72,6 +72,7 @@ Handler const handlerArray[]{ {"account_offers", byRef(&doAccountOffers), Role::USER, NO_CONDITION}, {"account_tx", byRef(&doAccountTxJson), Role::USER, NO_CONDITION}, {"blacklist", byRef(&doBlackList), Role::ADMIN, NO_CONDITION}, + {"book_changes", byRef(&doBookChanges), Role::USER, NO_CONDITION}, {"book_offers", byRef(&doBookOffers), Role::USER, NO_CONDITION}, {"can_delete", byRef(&doCanDelete), Role::ADMIN, NO_CONDITION}, {"channel_authorize", byRef(&doChannelAuthorize), Role::USER, NO_CONDITION}, diff --git a/src/ripple/rpc/impl/RPCHandler.cpp b/src/ripple/rpc/impl/RPCHandler.cpp index b04a6f0ed2a..c7984f8309c 100644 --- a/src/ripple/rpc/impl/RPCHandler.cpp +++ b/src/ripple/rpc/impl/RPCHandler.cpp @@ -17,7 +17,9 @@ */ //============================================================================== +#include #include +#include #include #include #include @@ -30,14 +32,17 @@ #include #include #include +#include #include #include +#include #include #include #include #include #include #include +#include namespace ripple { namespace RPC { diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 6958ce9d9bb..77255a5e6f6 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include +#include #include namespace ripple { @@ -956,5 +958,119 @@ getAPIVersionNumber(Json::Value const& jv, bool betaEnabled) return requestedVersion.asUInt(); } +std::variant, Json::Value> +getLedgerByContext(RPC::JsonContext& context) +{ + if (context.app.config().reporting()) + return rpcError(rpcREPORTING_UNSUPPORTED); + + auto const hasHash = context.params.isMember(jss::ledger_hash); + auto const hasIndex = context.params.isMember(jss::ledger_index); + std::uint32_t ledgerIndex = 0; + + auto& ledgerMaster = context.app.getLedgerMaster(); + LedgerHash ledgerHash; + + if ((hasHash && hasIndex) || !(hasHash || hasIndex)) + { + return RPC::make_param_error( + "Exactly one of ledger_hash and ledger_index can be set."); + } + + context.loadType = Resource::feeHighBurdenRPC; + + if (hasHash) + { + auto const& jsonHash = context.params[jss::ledger_hash]; + if (!jsonHash.isString() || !ledgerHash.parseHex(jsonHash.asString())) + return RPC::invalid_field_error(jss::ledger_hash); + } + else + { + auto const& jsonIndex = context.params[jss::ledger_index]; + if (!jsonIndex.isInt()) + return RPC::invalid_field_error(jss::ledger_index); + + // We need a validated ledger to get the hash from the sequence + if (ledgerMaster.getValidatedLedgerAge() > + RPC::Tuning::maxValidatedLedgerAge) + { + if (context.apiVersion == 1) + return rpcError(rpcNO_CURRENT); + return rpcError(rpcNOT_SYNCED); + } + + ledgerIndex = jsonIndex.asInt(); + auto ledger = ledgerMaster.getValidatedLedger(); + + if (ledgerIndex >= ledger->info().seq) + return RPC::make_param_error("Ledger index too large"); + if (ledgerIndex <= 0) + return RPC::make_param_error("Ledger index too small"); + + auto const j = context.app.journal("RPCHandler"); + // Try to get the hash of the desired ledger from the validated ledger + auto neededHash = hashOfSeq(*ledger, ledgerIndex, j); + if (!neededHash) + { + // Find a ledger more likely to have the hash of the desired ledger + auto const refIndex = getCandidateLedger(ledgerIndex); + auto refHash = hashOfSeq(*ledger, refIndex, j); + assert(refHash); + + ledger = ledgerMaster.getLedgerByHash(*refHash); + if (!ledger) + { + // We don't have the ledger we need to figure out which ledger + // they want. Try to get it. + + if (auto il = context.app.getInboundLedgers().acquire( + *refHash, refIndex, InboundLedger::Reason::GENERIC)) + { + Json::Value jvResult = RPC::make_error( + rpcLGR_NOT_FOUND, + "acquiring ledger containing requested index"); + jvResult[jss::acquiring] = + getJson(LedgerFill(*il, &context)); + return jvResult; + } + + if (auto il = context.app.getInboundLedgers().find(*refHash)) + { + Json::Value jvResult = RPC::make_error( + rpcLGR_NOT_FOUND, + "acquiring ledger containing requested index"); + jvResult[jss::acquiring] = il->getJson(0); + return jvResult; + } + + // Likely the app is shutting down + return Json::Value(); + } + + neededHash = hashOfSeq(*ledger, ledgerIndex, j); + } + assert(neededHash); + ledgerHash = neededHash ? *neededHash : beast::zero; // kludge + } + + // Try to get the desired ledger + // Verify all nodes even if we think we have it + auto ledger = context.app.getInboundLedgers().acquire( + ledgerHash, ledgerIndex, InboundLedger::Reason::GENERIC); + + // In standalone mode, accept the ledger from the ledger cache + if (!ledger && context.app.config().standalone()) + ledger = ledgerMaster.getLedgerByHash(ledgerHash); + + if (ledger) + return ledger; + + if (auto il = context.app.getInboundLedgers().find(ledgerHash)) + return il->getJson(0); + + return RPC::make_error( + rpcNOT_READY, "findCreate failed to return an inbound ledger"); +} } // namespace RPC } // namespace ripple diff --git a/src/ripple/rpc/impl/RPCHelpers.h b/src/ripple/rpc/impl/RPCHelpers.h index e3d44c2e730..2aa62f3474a 100644 --- a/src/ripple/rpc/impl/RPCHelpers.h +++ b/src/ripple/rpc/impl/RPCHelpers.h @@ -31,6 +31,7 @@ #include #include #include +#include namespace Json { class Value; @@ -287,6 +288,11 @@ chooseLedgerEntryType(Json::Value const& params); unsigned int getAPIVersionNumber(const Json::Value& value, bool betaEnabled); +/** Return a ledger based on ledger_hash or ledger_index, + or an RPC error */ +std::variant, Json::Value> +getLedgerByContext(RPC::JsonContext& context); + } // namespace RPC } // namespace ripple From 4e724794c57cddcd8b2f0754a4b32592b1ef3342 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Mon, 11 Jul 2022 21:03:26 +0900 Subject: [PATCH 003/508] Fix typo in Consensus.h (#4179) whenver -> whenever --- src/ripple/consensus/Consensus.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/consensus/Consensus.h b/src/ripple/consensus/Consensus.h index a86144c3893..1d02e0f13b8 100644 --- a/src/ripple/consensus/Consensus.h +++ b/src/ripple/consensus/Consensus.h @@ -295,7 +295,7 @@ class Consensus using Result = ConsensusResult; - // Helper class to ensure adaptor is notified whenver the ConsensusMode + // Helper class to ensure adaptor is notified whenever the ConsensusMode // changes class MonitoredMode { From 3172a816fa5b1803a72ae591ace03be31a35ffec Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Thu, 7 Jul 2022 13:58:11 -0700 Subject: [PATCH 004/508] Describe resolution for common SOCI-related build errors --- Builds/linux/README.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/Builds/linux/README.md b/Builds/linux/README.md index bb5bc761827..001a3705c73 100644 --- a/Builds/linux/README.md +++ b/Builds/linux/README.md @@ -239,3 +239,32 @@ change the `/opt/local` module path above to match your chosen installation pref `rippled` builds a set of unit tests into the server executable. To run these unit tests after building, pass the `--unittest` option to the compiled `rippled` executable. The executable will exit with summary info after running the unit tests. + +## Workaround for a compile error in soci + +Compilation errors have been observed with Apple Clang 13.1.6+ and soci v4.x. soci compiles with the `-Werror` flag which causes warnings to be treated as errors. These warnings pertain to style (not correctness). However, they cause the cmake process to fail. + +Here's an example of how this looks: +``` +.../rippled/.nih_c/unix_makefiles/AppleClang_13.1.6.13160021/Debug/src/soci/src/core/session.cpp:450:66: note: in instantiation of function template specialization 'soci::use' requested here + return prepare << backEnd_->get_column_descriptions_query(), use(table_name, "t"); + ^ +1 error generated. +``` + +Please apply the below patch (courtesy of Scott Determan) to remove these errors. `.nih_c/unix_makefiles/AppleClang_13.1.6.13160021/Debug/src/soci/cmake/SociConfig.cmake` file needs to be edited. This file is an example for Mac OS and it might be slightly different for other OS/Architectures. + +``` +diff --git a/cmake/SociConfig.cmake b/cmake/SociConfig.cmake +index 97d907e4..11bcd1f3 100644 +--- a/cmake/SociConfig.cmake ++++ b/cmake/SociConfig.cmake +@@ -58,8 +58,8 @@ if (MSVC) + + else() + +- set(SOCI_GCC_CLANG_COMMON_FLAGS +- "-pedantic -Werror -Wno-error=parentheses -Wall -Wextra -Wpointer-arith -Wcast-align -Wcast-qual -Wfloat-equal -Woverloaded-virtual -Wredundant-decls -Wno-long-long") ++ set(SOCI_GCC_CLANG_COMMON_FLAGS "") ++ # "-pedantic -Werror -Wno-error=parentheses -Wall -Wextra -Wpointer-arith -Wcast-align -Wcast-qual -Wfloat-equal -Woverloaded-virtual -Wredundant-decls -Wno-long-long") +``` From d632f9f6c85e7177bb835fa9903201464629ee05 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Fri, 17 Jun 2022 14:33:28 -0700 Subject: [PATCH 005/508] Properly handle incorrect port numbers in parseURL (fixes #4200) --- src/ripple/basics/impl/StringUtilities.cpp | 7 +++++++ src/test/basics/StringUtilities_test.cpp | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/src/ripple/basics/impl/StringUtilities.cpp b/src/ripple/basics/impl/StringUtilities.cpp index 8036cc3bfb0..bebbe1ef80b 100644 --- a/src/ripple/basics/impl/StringUtilities.cpp +++ b/src/ripple/basics/impl/StringUtilities.cpp @@ -90,6 +90,13 @@ parseUrl(parsedURL& pUrl, std::string const& strUrl) if (!port.empty()) { pUrl.port = beast::lexicalCast(port); + + // For inputs larger than 2^32-1 (65535), lexicalCast returns 0. + // parseUrl returns false for such inputs. + if (pUrl.port == 0) + { + return false; + } } pUrl.path = smMatch[6]; diff --git a/src/test/basics/StringUtilities_test.cpp b/src/test/basics/StringUtilities_test.cpp index fc6d54c63ce..6146a3dcd41 100644 --- a/src/test/basics/StringUtilities_test.cpp +++ b/src/test/basics/StringUtilities_test.cpp @@ -289,6 +289,13 @@ class StringUtilities_test : public beast::unit_test::suite BEAST_EXPECT(!parseUrl(pUrl, "nonsense")); BEAST_EXPECT(!parseUrl(pUrl, "://")); BEAST_EXPECT(!parseUrl(pUrl, ":///")); + BEAST_EXPECT( + !parseUrl(pUrl, "scheme://user:pass@domain:65536/abc:321")); + BEAST_EXPECT(!parseUrl(pUrl, "UPPER://domain:23498765/")); + BEAST_EXPECT(!parseUrl(pUrl, "UPPER://domain:0/")); + BEAST_EXPECT(!parseUrl(pUrl, "UPPER://domain:+7/")); + BEAST_EXPECT(!parseUrl(pUrl, "UPPER://domain:-7234/")); + BEAST_EXPECT(!parseUrl(pUrl, "UPPER://domain:@#$56!/")); } { From e46d2bcf276368e5751003351d4e9ccddd232eaa Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Thu, 14 Jul 2022 16:37:11 -0700 Subject: [PATCH 006/508] Correctly use the configured `network_id` parameter: The existing code properly parses the network_id parameter from the the configuration file, but it does not properly set up the code to use the value correctly. As a result the configured `network_id` is ignored. --- src/ripple/app/misc/NetworkOPs.cpp | 7 +++---- src/ripple/overlay/impl/OverlayImpl.h | 4 +--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 6ff644ba846..bdc266422e5 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -2323,10 +2323,6 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (!app_.config().SERVER_DOMAIN.empty()) info[jss::server_domain] = app_.config().SERVER_DOMAIN; - if (!app_.config().reporting()) - if (auto const netid = app_.overlay().networkID()) - info[jss::network_id] = static_cast(*netid); - info[jss::build_version] = BuildInfo::getVersionString(); info[jss::server_state] = strOperatingMode(admin); @@ -2469,6 +2465,9 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) if (!app_.config().reporting()) { + if (auto const netid = app_.overlay().networkID()) + info[jss::network_id] = static_cast(*netid); + auto const escalationMetrics = app_.getTxQ().getMetrics(*app_.openLedger().current()); diff --git a/src/ripple/overlay/impl/OverlayImpl.h b/src/ripple/overlay/impl/OverlayImpl.h index 5f23b9150ed..2ba7999cbe0 100644 --- a/src/ripple/overlay/impl/OverlayImpl.h +++ b/src/ripple/overlay/impl/OverlayImpl.h @@ -125,8 +125,6 @@ class OverlayImpl : public Overlay, public reduce_relay::SquelchHandler // Peer IDs expecting to receive a last link notification std::set csIDs_; - std::optional networkID_; - reduce_relay::Slots slots_; // Transaction reduce-relay metrics @@ -391,7 +389,7 @@ class OverlayImpl : public Overlay, public reduce_relay::SquelchHandler std::optional networkID() const override { - return networkID_; + return setup_.networkID; } Json::Value From f55913dcee5ed61a2640852467c1e67444e0844e Mon Sep 17 00:00:00 2001 From: seelabs Date: Wed, 1 Jun 2022 15:06:48 -0400 Subject: [PATCH 007/508] Add support for clang's ThreadSafetyAnalysis --- Builds/CMake/RippledCore.cmake | 1 + CMakeLists.txt | 6 +++ src/ripple/basics/ThreadSafetyAnalysis.h | 63 ++++++++++++++++++++++++ 3 files changed, 70 insertions(+) create mode 100644 src/ripple/basics/ThreadSafetyAnalysis.h diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index f8477b68471..3930788071f 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -157,6 +157,7 @@ install ( src/ripple/basics/safe_cast.h src/ripple/basics/Slice.h src/ripple/basics/StringUtilities.h + src/ripple/basics/ThreadSafetyAnalysis.h src/ripple/basics/ToString.h src/ripple/basics/UnorderedContainers.h src/ripple/basics/XRPAmount.h diff --git a/CMakeLists.txt b/CMakeLists.txt index c5e750d7b41..c0c0fff9444 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,6 +21,12 @@ if(Git_FOUND) endif() endif() #git +if (thread_safety_analysis) + add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS) + add_compile_options("-stdlib=libc++") + add_link_options("-stdlib=libc++") +endif() + list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps") diff --git a/src/ripple/basics/ThreadSafetyAnalysis.h b/src/ripple/basics/ThreadSafetyAnalysis.h new file mode 100644 index 00000000000..b1889d5b4c6 --- /dev/null +++ b/src/ripple/basics/ThreadSafetyAnalysis.h @@ -0,0 +1,63 @@ +#ifndef RIPPLE_BASICS_THREAD_SAFTY_ANALYSIS_H_INCLUDED +#define RIPPLE_BASICS_THREAD_SAFTY_ANALYSIS_H_INCLUDED + +#ifdef RIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS +#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#else +#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#endif + +#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) + +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) + +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) + +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) + +#define ACQUIRED_BEFORE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + +#define ACQUIRED_AFTER(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + +#define REQUIRES(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) + +#define REQUIRES_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) + +#define ACQUIRE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) + +#define ACQUIRE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) + +#define RELEASE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) + +#define RELEASE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) + +#define RELEASE_GENERIC(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(release_generic_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) + +#define TRY_ACQUIRE_SHARED(...) \ + THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) + +#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) + +#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) + +#define ASSERT_SHARED_CAPABILITY(x) \ + THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) + +#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) + +#define NO_THREAD_SAFETY_ANALYSIS \ + THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + +#endif From 8e6a0d418ce306b62b757dfa7b646d183c1830a4 Mon Sep 17 00:00:00 2001 From: seelabs Date: Wed, 1 Jun 2022 11:58:27 -0400 Subject: [PATCH 008/508] Fix race conditions in shard: ThreadSafetyAnalysis was used to identify race conditions in this file. This analysis was modivated by a (rare) crash while running unit tests. Add locks to Shard flagged by ThreadSafetyAnalysis --- src/ripple/nodestore/impl/Shard.cpp | 20 ++++++++++++++++---- src/ripple/nodestore/impl/Shard.h | 26 +++++++++++++++----------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 911eedef6b5..030fbf4aa12 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -223,6 +223,7 @@ Shard::storeNodeObject(std::shared_ptr const& nodeObject) try { + std::lock_guard lock(mutex_); backend_->store(nodeObject); } catch (std::exception const& e) @@ -249,6 +250,7 @@ Shard::fetchNodeObject(uint256 const& hash, FetchReport& fetchReport) Status status; try { + std::lock_guard lock(mutex_); status = backend_->fetch(hash.data(), &nodeObject); } catch (std::exception const& e) @@ -331,6 +333,7 @@ Shard::storeLedger( try { + std::lock_guard lock(mutex_); backend_->storeBatch(batch); } catch (std::exception const& e) @@ -538,6 +541,7 @@ Shard::getWriteLoad() auto const scopedCount{makeBackendCount()}; if (!scopedCount) return 0; + std::lock_guard lock(mutex_); return backend_->getWriteLoad(); } @@ -572,6 +576,8 @@ Shard::finalize(bool writeSQLite, std::optional const& referenceHash) try { + std::lock_guard lock(mutex_); + state_ = ShardState::finalizing; progress_ = 0; @@ -759,8 +765,11 @@ Shard::finalize(bool writeSQLite, std::optional const& referenceHash) try { - // Store final key's value, may already be stored - backend_->store(nodeObject); + { + // Store final key's value, may already be stored + std::lock_guard lock(mutex_); + backend_->store(nodeObject); + } // Do not allow all other threads work with the shard busy_ = true; @@ -819,7 +828,7 @@ Shard::open(std::lock_guard const& lock) using namespace boost::filesystem; Config const& config{app_.config()}; auto preexist{false}; - auto fail = [this, &preexist](std::string const& msg) { + auto fail = [this, &preexist](std::string const& msg) REQUIRES(mutex_) { backend_->close(); lgrSQLiteDB_.reset(); txSQLiteDB_.reset(); @@ -837,7 +846,7 @@ Shard::open(std::lock_guard const& lock) } return false; }; - auto createAcquireInfo = [this, &config]() { + auto createAcquireInfo = [this, &config]() REQUIRES(mutex_) { DatabaseCon::Setup setup; setup.startUp = config.standalone() ? config.LOAD : config.START_UP; setup.standAlone = config.standalone(); @@ -1024,6 +1033,8 @@ Shard::storeSQLite(std::shared_ptr const& ledger) try { + std::lock_guard lock(mutex_); + auto res = updateLedgerDBs( *txSQLiteDB_->checkoutDb(), *lgrSQLiteDB_->checkoutDb(), @@ -1186,6 +1197,7 @@ Shard::verifyFetch(uint256 const& hash) const try { + std::lock_guard lock(mutex_); switch (backend_->fetch(hash.data(), &nodeObject)) { case ok: diff --git a/src/ripple/nodestore/impl/Shard.h b/src/ripple/nodestore/impl/Shard.h index b7516e5f1e6..210bdd54a60 100644 --- a/src/ripple/nodestore/impl/Shard.h +++ b/src/ripple/nodestore/impl/Shard.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -210,6 +211,7 @@ class Shard final std::string getStoredSeqs() { + std::lock_guard lock(mutex_); if (!acquireInfo_) return ""; @@ -316,29 +318,30 @@ class Shard final boost::filesystem::path const dir_; // Storage space utilized by the shard - std::uint64_t fileSz_{0}; + GUARDED_BY(mutex_) std::uint64_t fileSz_{0}; // Number of file descriptors required by the shard - std::uint32_t fdRequired_{0}; + GUARDED_BY(mutex_) std::uint32_t fdRequired_{0}; // NuDB key/value store for node objects - std::unique_ptr backend_; + std::unique_ptr backend_ GUARDED_BY(mutex_); std::atomic backendCount_{0}; // Ledger SQLite database used for indexes - std::unique_ptr lgrSQLiteDB_; + std::unique_ptr lgrSQLiteDB_ GUARDED_BY(mutex_); // Transaction SQLite database used for indexes - std::unique_ptr txSQLiteDB_; + std::unique_ptr txSQLiteDB_ GUARDED_BY(mutex_); // Tracking information used only when acquiring a shard from the network. // If the shard is finalized, this member will be null. - std::unique_ptr acquireInfo_; + std::unique_ptr acquireInfo_ GUARDED_BY(mutex_); + ; // Older shard without an acquire database or final key // Eventually there will be no need for this and should be removed - bool legacy_{false}; + GUARDED_BY(mutex_) bool legacy_{false}; // Determines if the shard needs to stop processing for shutdown std::atomic stop_{false}; @@ -356,16 +359,17 @@ class Shard final std::atomic removeOnDestroy_{false}; // The time of the last access of a shard with a finalized state - std::chrono::steady_clock::time_point lastAccess_; + std::chrono::steady_clock::time_point lastAccess_ GUARDED_BY(mutex_); + ; // Open shard databases [[nodiscard]] bool - open(std::lock_guard const& lock); + open(std::lock_guard const& lock) REQUIRES(mutex_); // Open/Create SQLite databases // Lock over mutex_ required [[nodiscard]] bool - initSQLite(std::lock_guard const&); + initSQLite(std::lock_guard const&) REQUIRES(mutex_); // Write SQLite entries for this ledger [[nodiscard]] bool @@ -374,7 +378,7 @@ class Shard final // Set storage and file descriptor usage stats // Lock over mutex_ required void - setFileStats(std::lock_guard const&); + setFileStats(std::lock_guard const&) REQUIRES(mutex_); // Verify this ledger by walking its SHAMaps and verifying its Merkle trees // Every node object verified will be stored in the deterministic shard From 723733a7788268f46b49d8bb4bde1093535f93db Mon Sep 17 00:00:00 2001 From: seelabs Date: Fri, 27 May 2022 12:28:57 -0400 Subject: [PATCH 009/508] Catch missing node error when rotating database: While there should never be a missing node when copying the SHAMap, rippled should not terminate when there's an error rotating the database. This patch aborts the database rotation rather than aborting rippled. --- src/ripple/app/misc/SHAMapStoreImp.cpp | 30 +++++++++++++++++++------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index 70519fc92ce..af568d0274e 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -17,17 +17,18 @@ */ //============================================================================== +#include + #include #include -#include #include #include #include #include #include -#include - #include +#include +#include #include @@ -363,11 +364,24 @@ SHAMapStoreImp::run() JLOG(journal_.debug()) << "copying ledger " << validatedSeq; std::uint64_t nodeCount = 0; - validatedLedger->stateMap().snapShot(false)->visitNodes(std::bind( - &SHAMapStoreImp::copyNode, - this, - std::ref(nodeCount), - std::placeholders::_1)); + + try + { + validatedLedger->stateMap().snapShot(false)->visitNodes( + std::bind( + &SHAMapStoreImp::copyNode, + this, + std::ref(nodeCount), + std::placeholders::_1)); + } + catch (SHAMapMissingNode const& e) + { + JLOG(journal_.error()) + << "Missing node while copying ledger before rotate: " + << e.what(); + continue; + } + if (stopping()) return; // Only log if we completed without a "health" abort From 1f75ba23ee256a81d190e7fa47582e6f1ea2400f Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 27 May 2022 20:12:11 -0400 Subject: [PATCH 010/508] Fix bitwise or on boolean operands warning / error --- src/ripple/app/tx/impl/CreateOffer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/app/tx/impl/CreateOffer.cpp b/src/ripple/app/tx/impl/CreateOffer.cpp index 4ec41f2b358..9bb7532852f 100644 --- a/src/ripple/app/tx/impl/CreateOffer.cpp +++ b/src/ripple/app/tx/impl/CreateOffer.cpp @@ -728,7 +728,7 @@ CreateOffer::flowCross( // additional path with XRP as the intermediate between two books. // This second path we have to build ourselves. STPathSet paths; - if (!takerAmount.in.native() & !takerAmount.out.native()) + if (!takerAmount.in.native() && !takerAmount.out.native()) { STPath path; path.emplace_back(std::nullopt, xrpCurrency(), std::nullopt); From 18d437284e695d0e37dbf3938704ab3c1bf7fe31 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 25 May 2022 18:51:44 -0400 Subject: [PATCH 011/508] Lower the message level for missing optional doc components --- Builds/CMake/RippledDocs.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Builds/CMake/RippledDocs.cmake b/Builds/CMake/RippledDocs.cmake index 7d9ef90fd66..6cb8f730dd8 100644 --- a/Builds/CMake/RippledDocs.cmake +++ b/Builds/CMake/RippledDocs.cmake @@ -31,7 +31,7 @@ if (tests) # find_path sets a CACHE variable, so don't try using a "local" variable. find_path (${variable} "${name}" ${ARGN}) if (NOT ${variable}) - message (WARNING "could not find ${name}") + message (NOTICE "could not find ${name}") else () message (STATUS "found ${name}: ${${variable}}/${name}") endif () From ee60b16b3a62fda2557be522d84d915ffd272437 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 24 May 2022 17:44:21 -0400 Subject: [PATCH 012/508] Lower log level of "addPathsForType" log message (fixes #4177) --- src/ripple/app/paths/Pathfinder.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ripple/app/paths/Pathfinder.cpp b/src/ripple/app/paths/Pathfinder.cpp index 71a4afa7563..556622ee7bf 100644 --- a/src/ripple/app/paths/Pathfinder.cpp +++ b/src/ripple/app/paths/Pathfinder.cpp @@ -796,8 +796,8 @@ Pathfinder::addPathsForType( PathType const& pathType, std::function const& continueCallback) { - JLOG(j_.warn()) << "addPathsForType " - << CollectionAndDelimiter(pathType, ", "); + JLOG(j_.debug()) << "addPathsForType " + << CollectionAndDelimiter(pathType, ", "); // See if the set of paths for this type already exists. auto it = mPaths.find(pathType); if (it != mPaths.end()) From 0839a202c91af50a444c1ef348adc90f8afa1dd2 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Fri, 13 May 2022 17:06:14 -0700 Subject: [PATCH 013/508] Reduce console noise coming from unit tests: A few unit tests have historically generated a lot of noise to the console from log writes. This noise was not useful and made it harder to locate actual test failures. By changing the log level of these tests from - severities::kError to - severities::kDisabled it was possible to remove that noise coming from the logs. --- src/test/app/LedgerLoad_test.cpp | 39 +++++++++++++------ src/test/app/LedgerReplay_test.cpp | 7 +++- src/test/app/NFTokenDir_test.cpp | 7 +++- src/test/app/ValidatorKeys_test.cpp | 12 +++++- src/test/app/ValidatorList_test.cpp | 6 ++- src/test/app/ValidatorSite_test.cpp | 8 +--- src/test/basics/PerfLog_test.cpp | 9 ++++- .../beast/beast_io_latency_probe_test.cpp | 3 -- src/test/core/ClosureCounter_test.cpp | 14 +++++-- src/test/core/Config_test.cpp | 5 +-- src/test/jtx/Env_test.cpp | 12 ++++-- src/test/ledger/View_test.cpp | 32 +++++++++++++-- src/test/overlay/short_read_test.cpp | 2 - src/test/rpc/LedgerRPC_test.cpp | 3 -- src/test/rpc/NodeToShardRPC_test.cpp | 3 +- src/test/rpc/ShardArchiveHandler_test.cpp | 9 +++-- src/test/server/Server_test.cpp | 1 - src/test/shamap/SHAMapSync_test.cpp | 1 - src/test/unit_test/FileDirGuard.h | 3 -- 19 files changed, 116 insertions(+), 60 deletions(-) diff --git a/src/test/app/LedgerLoad_test.cpp b/src/test/app/LedgerLoad_test.cpp index 7175df34580..d78d25ea05a 100644 --- a/src/test/app/LedgerLoad_test.cpp +++ b/src/test/app/LedgerLoad_test.cpp @@ -21,11 +21,12 @@ #include #include #include +#include +#include + #include #include #include -#include -#include namespace ripple { @@ -111,7 +112,9 @@ class LedgerLoad_test : public beast::unit_test::suite Env env( *this, envconfig( - ledgerConfig, sd.dbPath, sd.ledgerFile, Config::LOAD_FILE)); + ledgerConfig, sd.dbPath, sd.ledgerFile, Config::LOAD_FILE), + nullptr, + beast::severities::kDisabled); auto jrb = env.rpc("ledger", "current", "full")[jss::result]; BEAST_EXPECT( sd.ledger[jss::ledger][jss::accountState].size() == @@ -129,7 +132,9 @@ class LedgerLoad_test : public beast::unit_test::suite except([&] { Env env( *this, - envconfig(ledgerConfig, sd.dbPath, "", Config::LOAD_FILE)); + envconfig(ledgerConfig, sd.dbPath, "", Config::LOAD_FILE), + nullptr, + beast::severities::kDisabled); }); // file does not exist @@ -137,10 +142,9 @@ class LedgerLoad_test : public beast::unit_test::suite Env env( *this, envconfig( - ledgerConfig, - sd.dbPath, - "badfile.json", - Config::LOAD_FILE)); + ledgerConfig, sd.dbPath, "badfile.json", Config::LOAD_FILE), + nullptr, + beast::severities::kDisabled); }); // make a corrupted version of the ledger file (last 10 bytes removed). @@ -168,7 +172,9 @@ class LedgerLoad_test : public beast::unit_test::suite ledgerConfig, sd.dbPath, ledgerFileCorrupt.string(), - Config::LOAD_FILE)); + Config::LOAD_FILE), + nullptr, + beast::severities::kDisabled); }); } @@ -183,7 +189,9 @@ class LedgerLoad_test : public beast::unit_test::suite boost::erase_all(ledgerHash, "\""); Env env( *this, - envconfig(ledgerConfig, sd.dbPath, ledgerHash, Config::LOAD)); + envconfig(ledgerConfig, sd.dbPath, ledgerHash, Config::LOAD), + nullptr, + beast::severities::kDisabled); auto jrb = env.rpc("ledger", "current", "full")[jss::result]; BEAST_EXPECT(jrb[jss::ledger][jss::accountState].size() == 97); BEAST_EXPECT( @@ -199,7 +207,10 @@ class LedgerLoad_test : public beast::unit_test::suite // create a new env with the ledger "latest" specified for startup Env env( - *this, envconfig(ledgerConfig, sd.dbPath, "latest", Config::LOAD)); + *this, + envconfig(ledgerConfig, sd.dbPath, "latest", Config::LOAD), + nullptr, + beast::severities::kDisabled); auto jrb = env.rpc("ledger", "current", "full")[jss::result]; BEAST_EXPECT( sd.ledger[jss::ledger][jss::accountState].size() == @@ -213,7 +224,11 @@ class LedgerLoad_test : public beast::unit_test::suite using namespace test::jtx; // create a new env with specific ledger index at startup - Env env(*this, envconfig(ledgerConfig, sd.dbPath, "43", Config::LOAD)); + Env env( + *this, + envconfig(ledgerConfig, sd.dbPath, "43", Config::LOAD), + nullptr, + beast::severities::kDisabled); auto jrb = env.rpc("ledger", "current", "full")[jss::result]; BEAST_EXPECT( sd.ledger[jss::ledger][jss::accountState].size() == diff --git a/src/test/app/LedgerReplay_test.cpp b/src/test/app/LedgerReplay_test.cpp index 549495d40b3..cff94ee045b 100644 --- a/src/test/app/LedgerReplay_test.cpp +++ b/src/test/app/LedgerReplay_test.cpp @@ -465,7 +465,7 @@ struct LedgerServer assert(param.initLedgers > 0); createAccounts(param.initAccounts); createLedgerHistory(); - app.logs().threshold(beast::severities::Severity::kWarning); + app.logs().threshold(beast::severities::kWarning); } /** @@ -567,7 +567,10 @@ class LedgerReplayClient PeerSetBehavior behavior = PeerSetBehavior::Good, InboundLedgersBehavior inboundBhvr = InboundLedgersBehavior::Good, PeerFeature peerFeature = PeerFeature::LedgerReplayEnabled) - : env(suite, jtx::envconfig(jtx::port_increment, 3)) + : env(suite, + jtx::envconfig(jtx::port_increment, 3), + nullptr, + beast::severities::kDisabled) , app(env.app()) , ledgerMaster(env.app().getLedgerMaster()) , inboundLedgers( diff --git a/src/test/app/NFTokenDir_test.cpp b/src/test/app/NFTokenDir_test.cpp index ae7eeeaf603..8f8d0f581f0 100644 --- a/src/test/app/NFTokenDir_test.cpp +++ b/src/test/app/NFTokenDir_test.cpp @@ -388,7 +388,12 @@ class NFTokenDir_test : public beast::unit_test::suite auto exerciseFixNFTokenDirV1 = [this, &features](std::initializer_list seeds) { - Env env{*this, features}; + Env env{ + *this, + envconfig(), + features, + nullptr, + beast::severities::kDisabled}; // Eventually all of the NFTokens will be owned by buyer. Account const buyer{"buyer"}; diff --git a/src/test/app/ValidatorKeys_test.cpp b/src/test/app/ValidatorKeys_test.cpp index 18061c7e4aa..3943fd85881 100644 --- a/src/test/app/ValidatorKeys_test.cpp +++ b/src/test/app/ValidatorKeys_test.cpp @@ -23,8 +23,9 @@ #include #include #include +#include + #include -#include namespace ripple { namespace test { @@ -75,7 +76,14 @@ class ValidatorKeys_test : public beast::unit_test::suite void run() override { - SuiteJournal journal("ValidatorKeys_test", *this); + // We're only using Env for its Journal. That Journal gives better + // coverage in unit tests. + test::jtx::Env env{ + *this, + test::jtx::envconfig(), + nullptr, + beast::severities::kDisabled}; + beast::Journal journal{env.app().journal("ValidatorKeys_test")}; // Keys/ID when using [validation_seed] SecretKey const seedSecretKey = diff --git a/src/test/app/ValidatorList_test.cpp b/src/test/app/ValidatorList_test.cpp index 860b8fc1701..fead5563f21 100644 --- a/src/test/app/ValidatorList_test.cpp +++ b/src/test/app/ValidatorList_test.cpp @@ -29,9 +29,10 @@ #include #include #include -#include #include +#include + namespace ripple { namespace test { @@ -217,7 +218,8 @@ class ValidatorList_test : public beast::unit_test::suite { testcase("Config Load"); - jtx::Env env(*this); + jtx::Env env( + *this, jtx::envconfig(), nullptr, beast::severities::kDisabled); auto& app = env.app(); PublicKey emptyLocalKey; std::vector const emptyCfgKeys; diff --git a/src/test/app/ValidatorSite_test.cpp b/src/test/app/ValidatorSite_test.cpp index db9f5c9776f..8ec86feadce 100644 --- a/src/test/app/ValidatorSite_test.cpp +++ b/src/test/app/ValidatorSite_test.cpp @@ -69,7 +69,7 @@ class ValidatorSite_test : public beast::unit_test::suite using namespace jtx; - Env env(*this); + Env env(*this, envconfig(), nullptr, beast::severities::kDisabled); auto trustedSites = std::make_unique(env.app(), env.journal); @@ -282,9 +282,6 @@ class ValidatorSite_test : public beast::unit_test::suite if (u.cfg.failFetch) { using namespace std::chrono; - log << " -- Msg: " - << myStatus[jss::last_refresh_message].asString() - << std::endl; std::stringstream nextRefreshStr{ myStatus[jss::next_refresh_time].asString()}; system_clock::time_point nextRefresh; @@ -357,9 +354,6 @@ class ValidatorSite_test : public beast::unit_test::suite sink.messages().str().find(u.expectMsg) != std::string::npos, sink.messages().str()); - log << " -- Msg: " - << myStatus[jss::last_refresh_message].asString() - << std::endl; } } } diff --git a/src/test/basics/PerfLog_test.cpp b/src/test/basics/PerfLog_test.cpp index a79dded90e6..79944e0ed71 100644 --- a/src/test/basics/PerfLog_test.cpp +++ b/src/test/basics/PerfLog_test.cpp @@ -24,12 +24,13 @@ #include #include #include +#include + #include #include #include #include #include -#include #include //------------------------------------------------------------------------------ @@ -44,7 +45,11 @@ class PerfLog_test : public beast::unit_test::suite // We're only using Env for its Journal. That Journal gives better // coverage in unit tests. - test::jtx::Env env_{*this}; + test::jtx::Env env_{ + *this, + test::jtx::envconfig(), + nullptr, + beast::severities::kDisabled}; beast::Journal j_{env_.app().journal("PerfLog_test")}; struct Fixture diff --git a/src/test/beast/beast_io_latency_probe_test.cpp b/src/test/beast/beast_io_latency_probe_test.cpp index 9ae6a1341c1..b2bf67b10b9 100644 --- a/src/test/beast/beast_io_latency_probe_test.cpp +++ b/src/test/beast/beast_io_latency_probe_test.cpp @@ -200,9 +200,6 @@ class io_latency_probe_test : public beast::unit_test::suite, duration_cast(probe_duration).count()) / static_cast(tt.getMean()); #endif - log << "expected_probe_count_min: " << expected_probe_count_min << "\n"; - log << "expected_probe_count_max: " << expected_probe_count_max << "\n"; - test_sampler io_probe{interval, get_io_service()}; io_probe.start(); MyTimer timer{get_io_service(), probe_duration}; diff --git a/src/test/core/ClosureCounter_test.cpp b/src/test/core/ClosureCounter_test.cpp index 478816a8958..c4199a0b06e 100644 --- a/src/test/core/ClosureCounter_test.cpp +++ b/src/test/core/ClosureCounter_test.cpp @@ -19,9 +19,10 @@ #include #include +#include + #include #include -#include #include namespace ripple { @@ -31,9 +32,14 @@ namespace test { class ClosureCounter_test : public beast::unit_test::suite { - // We're only using Env for its Journal. - jtx::Env env{*this}; - beast::Journal j{env.app().journal("ClosureCounter_test")}; + // We're only using Env for its Journal. That Journal gives better + // coverage in unit tests. + test::jtx::Env env_{ + *this, + jtx::envconfig(), + nullptr, + beast::severities::kDisabled}; + beast::Journal j{env_.app().journal("ClosureCounter_test")}; void testConstruction() diff --git a/src/test/core/Config_test.cpp b/src/test/core/Config_test.cpp index 45afaf6cb31..da29fafaca2 100644 --- a/src/test/core/Config_test.cpp +++ b/src/test/core/Config_test.cpp @@ -154,7 +154,7 @@ class RippledCfgGuard : public ripple::test::detail::FileDirGuard rmDataDir_ = !exists(dataDir_); config_.setup( file_.string(), - /*bQuiet*/ true, + /* bQuiet */ true, /* bSilent */ false, /* bStandalone */ false); } @@ -190,9 +190,6 @@ class RippledCfgGuard : public ripple::test::detail::FileDirGuard using namespace boost::filesystem; if (rmDataDir_) rmDir(dataDir_); - else - test_.log << "Skipping rm dir: " << dataDir_.string() - << std::endl; } catch (std::exception& e) { diff --git a/src/test/jtx/Env_test.cpp b/src/test/jtx/Env_test.cpp index b1a1a81253c..6f09f49ed5d 100644 --- a/src/test/jtx/Env_test.cpp +++ b/src/test/jtx/Env_test.cpp @@ -888,10 +888,14 @@ class Env_test : public beast::unit_test::suite testExceptionalShutdown() { except([this] { - jtx::Env env{*this, jtx::envconfig([](std::unique_ptr cfg) { - (*cfg).deprecatedClearSection("port_rpc"); - return cfg; - })}; + jtx::Env env{ + *this, + jtx::envconfig([](std::unique_ptr cfg) { + (*cfg).deprecatedClearSection("port_rpc"); + return cfg; + }), + nullptr, + beast::severities::kDisabled}; }); pass(); } diff --git a/src/test/ledger/View_test.cpp b/src/test/ledger/View_test.cpp index 45c8f007e0c..bbb1eec8fa5 100644 --- a/src/test/ledger/View_test.cpp +++ b/src/test/ledger/View_test.cpp @@ -130,6 +130,8 @@ class View_test : public beast::unit_test::suite void testLedger() { + testcase("Ledger"); + using namespace jtx; Env env(*this); Config config; @@ -165,6 +167,8 @@ class View_test : public beast::unit_test::suite void testMeta() { + testcase("Meta"); + using namespace jtx; Env env(*this); wipe(env.app().openLedger()); @@ -196,6 +200,8 @@ class View_test : public beast::unit_test::suite void testMetaSucc() { + testcase("Meta succ"); + using namespace jtx; Env env(*this); wipe(env.app().openLedger()); @@ -260,6 +266,8 @@ class View_test : public beast::unit_test::suite void testStacked() { + testcase("Stacked"); + using namespace jtx; Env env(*this); wipe(env.app().openLedger()); @@ -325,6 +333,8 @@ class View_test : public beast::unit_test::suite void testContext() { + testcase("Context"); + using namespace jtx; using namespace std::chrono; { @@ -387,6 +397,8 @@ class View_test : public beast::unit_test::suite void testUpperAndLowerBound() { + testcase("Upper and lower bound"); + using namespace jtx; Env env(*this); Config config; @@ -654,6 +666,8 @@ class View_test : public beast::unit_test::suite void testSles() { + testcase("Sles"); + using namespace jtx; Env env(*this); Config config; @@ -786,6 +800,8 @@ class View_test : public beast::unit_test::suite void testFlags() { + testcase("Flags"); + using namespace jtx; Env env(*this); @@ -949,6 +965,8 @@ class View_test : public beast::unit_test::suite void testTransferRate() { + testcase("Transfer rate"); + using namespace jtx; Env env(*this); @@ -975,12 +993,14 @@ class View_test : public beast::unit_test::suite // construct and manage two different Env instances at the same // time. So we can use two Env instances to produce mutually // incompatible ledgers. + testcase("Are compatible"); + using namespace jtx; auto const alice = Account("alice"); auto const bob = Account("bob"); // The first Env. - Env eA(*this); + Env eA(*this, envconfig(), nullptr, beast::severities::kDisabled); eA.fund(XRP(10000), alice); eA.close(); @@ -990,9 +1010,13 @@ class View_test : public beast::unit_test::suite eA.close(); auto const rdViewA4 = eA.closed(); - // The two Env's can't share the same ports, so modifiy the config + // The two Env's can't share the same ports, so modify the config // of the second Env to use higher port numbers - Env eB{*this, envconfig(port_increment, 3)}; + Env eB{ + *this, + envconfig(port_increment, 3), + nullptr, + beast::severities::kDisabled}; // Make ledgers that are incompatible with the first ledgers. Note // that bob is funded before alice. @@ -1029,6 +1053,8 @@ class View_test : public beast::unit_test::suite void testRegressions() { + testcase("Regressions"); + using namespace jtx; // Create a ledger with 1 item, put a diff --git a/src/test/overlay/short_read_test.cpp b/src/test/overlay/short_read_test.cpp index dd649bfd152..434b4100852 100644 --- a/src/test/overlay/short_read_test.cpp +++ b/src/test/overlay/short_read_test.cpp @@ -195,8 +195,6 @@ class short_read_test : public beast::unit_test::suite { acceptor_.listen(); server_.endpoint_ = acceptor_.local_endpoint(); - test_.log << "[server] up on port: " << server_.endpoint_.port() - << std::endl; } void diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 1692b980673..2580c4bfef1 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -1250,13 +1250,10 @@ class LedgerRPC_test : public beast::unit_test::suite // no amendments env.fund(XRP(10000), "alice"); env.close(); - log << env.closed()->info().hash; env.fund(XRP(10000), "bob"); env.close(); - log << env.closed()->info().hash; env.fund(XRP(10000), "jim"); env.close(); - log << env.closed()->info().hash; env.fund(XRP(10000), "jill"); { diff --git a/src/test/rpc/NodeToShardRPC_test.cpp b/src/test/rpc/NodeToShardRPC_test.cpp index edfaf6c2073..07d8d8953dd 100644 --- a/src/test/rpc/NodeToShardRPC_test.cpp +++ b/src/test/rpc/NodeToShardRPC_test.cpp @@ -276,7 +276,8 @@ class NodeToShardRPC_test : public beast::unit_test::suite sectionNode.set("ledgers_per_shard", "256"); c->setupControl(true, true, true); - return jtx::Env(*this, std::move(c)); + return jtx::Env( + *this, std::move(c), nullptr, beast::severities::kDisabled); }(); std::uint8_t const numberOfShards = 10; diff --git a/src/test/rpc/ShardArchiveHandler_test.cpp b/src/test/rpc/ShardArchiveHandler_test.cpp index 37c9d016843..ee0bec1eadf 100644 --- a/src/test/rpc/ShardArchiveHandler_test.cpp +++ b/src/test/rpc/ShardArchiveHandler_test.cpp @@ -173,7 +173,8 @@ class ShardArchiveHandler_test : public beast::unit_test::suite } c->setupControl(true, true, true); - jtx::Env env(*this, std::move(c)); + jtx::Env env( + *this, std::move(c), nullptr, beast::severities::kDisabled); std::uint8_t const numberOfDownloads = 10; @@ -276,7 +277,8 @@ class ShardArchiveHandler_test : public beast::unit_test::suite } c->setupControl(true, true, true); - jtx::Env env(*this, std::move(c)); + jtx::Env env( + *this, std::move(c), nullptr, beast::severities::kDisabled); std::uint8_t const numberOfDownloads = 10; @@ -380,7 +382,8 @@ class ShardArchiveHandler_test : public beast::unit_test::suite } c->setupControl(true, true, true); - jtx::Env env(*this, std::move(c)); + jtx::Env env( + *this, std::move(c), nullptr, beast::severities::kDisabled); std::uint8_t const numberOfDownloads = 10; // Create some ledgers so that the ShardArchiveHandler diff --git a/src/test/server/Server_test.cpp b/src/test/server/Server_test.cpp index 35f9149cadc..b5eb71f361c 100644 --- a/src/test/server/Server_test.cpp +++ b/src/test/server/Server_test.cpp @@ -299,7 +299,6 @@ class Server_test : public beast::unit_test::suite serverPort.back().port = 0; serverPort.back().protocol.insert("http"); auto eps = s->ports(serverPort); - log << "server listening on port " << eps[0].port() << std::endl; test_request(eps[0]); test_keepalive(eps[0]); // s->close(); diff --git a/src/test/shamap/SHAMapSync_test.cpp b/src/test/shamap/SHAMapSync_test.cpp index ba32f6e80dc..6b2648a96c9 100644 --- a/src/test/shamap/SHAMapSync_test.cpp +++ b/src/test/shamap/SHAMapSync_test.cpp @@ -184,7 +184,6 @@ class SHAMapSync_test : public beast::unit_test::suite BEAST_EXPECT(source.deepCompare(destination)); - log << "Checking destination invariants..." << std::endl; destination.invariants(); } }; diff --git a/src/test/unit_test/FileDirGuard.h b/src/test/unit_test/FileDirGuard.h index 6337365f007..3c79fb11b8e 100644 --- a/src/test/unit_test/FileDirGuard.h +++ b/src/test/unit_test/FileDirGuard.h @@ -86,9 +86,6 @@ class DirGuard if (rmSubDir_) rmDir(subDir_); - else - test_.log << "Skipping rm dir: " << subDir_.string() - << std::endl; } catch (std::exception& e) { From 8266d9d598d19f05e1155956b30ca443c27e119e Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Wed, 22 Jun 2022 17:15:23 -0700 Subject: [PATCH 014/508] Correct a technical flaw with NFT offers: The existing code would, incorrectly, allow negative amounts in offers for non-fungible tokens. Such offers would be handled very differently depending on the context: a direct offer would fail with an error code indicating an internal processing error, whereas brokered offers would improperly succeed. This commit introduces the `fixNFTokenNegOffer` amendment that detects such offers during creation and returns an appropriate error code. The commit also extends the existing code to allow for buy offers that contain a `Destination` field, so that a specific broker can be set in the offer. --- src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp | 23 ++ src/ripple/app/tx/impl/NFTokenCreateOffer.cpp | 17 +- src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/impl/Feature.cpp | 3 +- src/test/app/NFToken_test.cpp | 350 +++++++++++++++++- 5 files changed, 371 insertions(+), 25 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp index 7c78f175f63..fb5f51c7251 100644 --- a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp @@ -75,6 +75,12 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) if (hasExpired(ctx.view, (*offerSLE)[~sfExpiration])) return {nullptr, tecEXPIRED}; + // The initial implementation had a bug that allowed a negative + // amount. The fixNFTokenNegOffer amendment fixes that. + if ((*offerSLE)[sfAmount].negative() && + ctx.view.rules().enabled(fixNFTokenNegOffer)) + return {nullptr, temBAD_OFFER}; + return {std::move(offerSLE), tesSUCCESS}; } return {nullptr, tesSUCCESS}; @@ -103,6 +109,14 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) if ((*so)[sfAmount] > (*bo)[sfAmount]) return tecINSUFFICIENT_PAYMENT; + // If the buyer specified a destination, that destination must be + // the seller or the broker. + if (auto const dest = bo->at(~sfDestination)) + { + if (*dest != so->at(sfOwner) && *dest != ctx.tx[sfAccount]) + return tecNFTOKEN_BUY_SELL_MISMATCH; + } + // If the seller specified a destination, that destination must be // the buyer or the broker. if (auto const dest = so->at(~sfDestination)) @@ -142,6 +156,15 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) !nft::findToken(ctx.view, ctx.tx[sfAccount], (*bo)[sfNFTokenID])) return tecNO_PERMISSION; + // If not in bridged mode... + if (!so) + { + // If the offer has a Destination field, the acceptor must be the + // Destination. + if (auto const dest = bo->at(~sfDestination); + dest.has_value() && *dest != ctx.tx[sfAccount]) + return tecNO_PERMISSION; + } // The account offering to buy must have funds: auto const needed = bo->at(sfAmount); diff --git a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp index bf92472e2ce..80e4c3964a7 100644 --- a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp @@ -46,7 +46,11 @@ NFTokenCreateOffer::preflight(PreflightContext const& ctx) auto const nftFlags = nft::getFlags(ctx.tx[sfNFTokenID]); { - auto const amount = ctx.tx[sfAmount]; + STAmount const amount = ctx.tx[sfAmount]; + + if (amount.negative() && ctx.rules.enabled(fixNFTokenNegOffer)) + // An offer for a negative amount makes no sense. + return temBAD_AMOUNT; if (!isXRP(amount)) { @@ -78,9 +82,14 @@ NFTokenCreateOffer::preflight(PreflightContext const& ctx) if (auto dest = ctx.tx[~sfDestination]) { - // The destination field is only valid on a sell offer; it makes no - // sense in a buy offer. - if (!isSellOffer) + // Some folks think it makes sense for a buy offer to specify a + // specific broker using the Destination field. This change doesn't + // deserve it's own amendment, so we're piggy-backing on + // fixNFTokenNegOffer. + // + // Prior to fixNFTokenNegOffer any use of the Destination field on + // a buy offer was malformed. + if (!isSellOffer && !ctx.rules.enabled(fixNFTokenNegOffer)) return temMALFORMED; // The destination can't be the account executing the transaction. diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index b3ecb099bcc..7164cd46e4e 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 48; +static constexpr std::size_t numFeatures = 49; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -336,6 +336,7 @@ extern uint256 const featureCheckCashMakesTrustLine; extern uint256 const featureNonFungibleTokensV1; extern uint256 const featureExpandedSignerList; extern uint256 const fixNFTokenDirV1; +extern uint256 const fixNFTokenNegOffer; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index f6f67c003bc..1fcca56e884 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -431,7 +431,7 @@ REGISTER_FEATURE(RequireFullyCanonicalSig, Supported::yes, DefaultVote::yes REGISTER_FIX (fix1781, Supported::yes, DefaultVote::yes); REGISTER_FEATURE(HardenedValidations, Supported::yes, DefaultVote::yes); REGISTER_FIX (fixAmendmentMajorityCalc, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(NegativeUNL, Supported::yes, DefaultVote::no); +REGISTER_FEATURE(NegativeUNL, Supported::yes, DefaultVote::yes); REGISTER_FEATURE(TicketBatch, Supported::yes, DefaultVote::yes); REGISTER_FEATURE(FlowSortStrands, Supported::yes, DefaultVote::yes); REGISTER_FIX (fixSTAmountCanonicalize, Supported::yes, DefaultVote::yes); @@ -440,6 +440,7 @@ REGISTER_FEATURE(CheckCashMakesTrustLine, Supported::yes, DefaultVote::no) REGISTER_FEATURE(NonFungibleTokensV1, Supported::yes, DefaultVote::no); REGISTER_FEATURE(ExpandedSignerList, Supported::yes, DefaultVote::no); REGISTER_FIX (fixNFTokenDirV1, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixNFTokenNegOffer, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 7dd4a781286..5a25b3670b8 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -2666,7 +2666,7 @@ class NFToken_test : public beast::unit_test::suite env.close(); // Test how adding a Destination field to an offer affects permissions - // for cancelling offers. + // for canceling offers. { uint256 const offerMinterToIssuer = keylet::nftoffer(minter, env.seq(minter)).key; @@ -2680,14 +2680,20 @@ class NFToken_test : public beast::unit_test::suite token::destination(buyer), txflags(tfSellNFToken)); - // buy offers cannot contain a Destination, so this attempt fails. + uint256 const offerIssuerToMinter = + keylet::nftoffer(issuer, env.seq(issuer)).key; env(token::createOffer(issuer, nftokenID, drops(1)), token::owner(minter), - token::destination(minter), - ter(temMALFORMED)); + token::destination(minter)); + + uint256 const offerIssuerToBuyer = + keylet::nftoffer(issuer, env.seq(issuer)).key; + env(token::createOffer(issuer, nftokenID, drops(1)), + token::owner(minter), + token::destination(buyer)); env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, issuer) == 2); BEAST_EXPECT(ownerCount(env, minter) == 3); BEAST_EXPECT(ownerCount(env, buyer) == 0); @@ -2702,8 +2708,12 @@ class NFToken_test : public beast::unit_test::suite ter(tecNO_PERMISSION)); env(token::cancelOffer(buyer, {offerMinterToIssuer}), ter(tecNO_PERMISSION)); + env(token::cancelOffer(buyer, {offerIssuerToMinter}), + ter(tecNO_PERMISSION)); + env(token::cancelOffer(minter, {offerIssuerToBuyer}), + ter(tecNO_PERMISSION)); env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, issuer) == 2); BEAST_EXPECT(ownerCount(env, minter) == 3); BEAST_EXPECT(ownerCount(env, buyer) == 0); @@ -2711,6 +2721,8 @@ class NFToken_test : public beast::unit_test::suite // cancel the offers. env(token::cancelOffer(buyer, {offerMinterToBuyer})); env(token::cancelOffer(minter, {offerMinterToIssuer})); + env(token::cancelOffer(buyer, {offerIssuerToBuyer})); + env(token::cancelOffer(issuer, {offerIssuerToMinter})); env.close(); BEAST_EXPECT(ownerCount(env, issuer) == 0); BEAST_EXPECT(ownerCount(env, minter) == 1); @@ -2720,7 +2732,7 @@ class NFToken_test : public beast::unit_test::suite // Test how adding a Destination field to a sell offer affects // accepting that offer. { - uint256 const offerMinterToBuyer = + uint256 const offerMinterSellsToBuyer = keylet::nftoffer(minter, env.seq(minter)).key; env(token::createOffer(minter, nftokenID, drops(1)), token::destination(buyer), @@ -2732,7 +2744,7 @@ class NFToken_test : public beast::unit_test::suite // issuer cannot accept a sell offer where they are not the // destination. - env(token::acceptSellOffer(issuer, offerMinterToBuyer), + env(token::acceptSellOffer(issuer, offerMinterSellsToBuyer), ter(tecNO_PERMISSION)); env.close(); BEAST_EXPECT(ownerCount(env, issuer) == 0); @@ -2740,36 +2752,61 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, buyer) == 0); // However buyer can accept the sell offer. - env(token::acceptSellOffer(buyer, offerMinterToBuyer)); + env(token::acceptSellOffer(buyer, offerMinterSellsToBuyer)); env.close(); BEAST_EXPECT(ownerCount(env, issuer) == 0); BEAST_EXPECT(ownerCount(env, minter) == 0); BEAST_EXPECT(ownerCount(env, buyer) == 1); } - // You can't add a Destination field to a buy offer. + // Test how adding a Destination field to a buy offer affects + // accepting that offer. { + uint256 const offerMinterBuysFromBuyer = + keylet::nftoffer(minter, env.seq(minter)).key; env(token::createOffer(minter, nftokenID, drops(1)), token::owner(buyer), - token::destination(buyer), - ter(temMALFORMED)); + token::destination(buyer)); env.close(); BEAST_EXPECT(ownerCount(env, issuer) == 0); - BEAST_EXPECT(ownerCount(env, minter) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 1); BEAST_EXPECT(ownerCount(env, buyer) == 1); - // However without the Destination the buy offer works fine. - uint256 const offerMinterToBuyer = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftokenID, drops(1)), - token::owner(buyer)); + // issuer cannot accept a buy offer where they are the + // destination. + env(token::acceptBuyOffer(issuer, offerMinterBuysFromBuyer), + ter(tecNO_PERMISSION)); env.close(); BEAST_EXPECT(ownerCount(env, issuer) == 0); BEAST_EXPECT(ownerCount(env, minter) == 1); BEAST_EXPECT(ownerCount(env, buyer) == 1); // Buyer accepts minter's offer. - env(token::acceptBuyOffer(buyer, offerMinterToBuyer)); + env(token::acceptBuyOffer(buyer, offerMinterBuysFromBuyer)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 0); + + // If a destination other than the NFToken owner is set, that + // destination must act as a broker. The NFToken owner may not + // simply accept the offer. + uint256 const offerBuyerBuysFromMinter = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftokenID, drops(1)), + token::owner(minter), + token::destination(broker)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 1); + + env(token::acceptBuyOffer(minter, offerBuyerBuysFromMinter), + ter(tecNO_PERMISSION)); + env.close(); + + // Clean up the unused offer. + env(token::cancelOffer(buyer, {offerBuyerBuysFromMinter})); env.close(); BEAST_EXPECT(ownerCount(env, issuer) == 0); BEAST_EXPECT(ownerCount(env, minter) == 1); @@ -2856,6 +2893,47 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, issuer) == 1); BEAST_EXPECT(ownerCount(env, minter) == 1); BEAST_EXPECT(ownerCount(env, buyer) == 0); + + // Clean out the unconsumed offer. + env(token::cancelOffer(issuer, {offerIssuerToBuyer})); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 0); + } + + // Show that if a buy and a sell offer both have the same destination, + // then that destination can broker the offers. + { + uint256 const offerMinterToBroker = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftokenID, drops(1)), + token::destination(broker), + txflags(tfSellNFToken)); + + uint256 const offerBuyerToBroker = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftokenID, drops(1)), + token::owner(minter), + token::destination(broker)); + + // Cannot broker offers when the sell destination is not the buyer + // or the broker. + env(token::brokerOffers( + issuer, offerBuyerToBroker, offerMinterToBroker), + ter(tecNFTOKEN_BUY_SELL_MISMATCH)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 2); + BEAST_EXPECT(ownerCount(env, buyer) == 1); + + // Broker is successful if they are the destination of both offers. + env(token::brokerOffers( + broker, offerBuyerToBroker, offerMinterToBroker)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 0); + BEAST_EXPECT(ownerCount(env, buyer) == 1); } } @@ -4557,6 +4635,239 @@ class NFToken_test : public beast::unit_test::suite checkOffers("nft_buy_offers", 501, 2, __LINE__); } + void + testFixNFTokenNegOffer(FeatureBitset features) + { + // Exercise changes introduced by fixNFTokenNegOffer. + using namespace test::jtx; + + testcase("fixNFTokenNegOffer"); + + Account const issuer{"issuer"}; + Account const buyer{"buyer"}; + Account const gw{"gw"}; + IOU const gwXAU(gw["XAU"]); + + // Test both with and without fixNFTokenNegOffer + for (auto const& tweakedFeatures : + {features - fixNFTokenNegOffer, features | fixNFTokenNegOffer}) + { + // There was a bug in the initial NFT implementation that + // allowed offers to be placed with negative amounts. Verify + // that fixNFTokenNegOffer addresses the problem. + Env env{*this, tweakedFeatures}; + + env.fund(XRP(1000000), issuer, buyer, gw); + env.close(); + + env(trust(issuer, gwXAU(2000))); + env(trust(buyer, gwXAU(2000))); + env.close(); + + env(pay(gw, issuer, gwXAU(1000))); + env(pay(gw, buyer, gwXAU(1000))); + env.close(); + + // Create an NFT that we'll make XRP offers for. + uint256 const nftID0{ + token::getNextID(env, issuer, 0u, tfTransferable)}; + env(token::mint(issuer, 0), txflags(tfTransferable)); + env.close(); + + // Create an NFT that we'll make IOU offers for. + uint256 const nftID1{ + token::getNextID(env, issuer, 1u, tfTransferable)}; + env(token::mint(issuer, 1), txflags(tfTransferable)); + env.close(); + + TER const offerCreateTER = tweakedFeatures[fixNFTokenNegOffer] + ? static_cast(temBAD_AMOUNT) + : static_cast(tesSUCCESS); + + // Make offers with negative amounts for the NFTs + uint256 const sellNegXrpOfferIndex = + keylet::nftoffer(issuer, env.seq(issuer)).key; + env(token::createOffer(issuer, nftID0, XRP(-2)), + txflags(tfSellNFToken), + ter(offerCreateTER)); + env.close(); + + uint256 const sellNegIouOfferIndex = + keylet::nftoffer(issuer, env.seq(issuer)).key; + env(token::createOffer(issuer, nftID1, gwXAU(-2)), + txflags(tfSellNFToken), + ter(offerCreateTER)); + env.close(); + + uint256 const buyNegXrpOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID0, XRP(-1)), + token::owner(issuer), + ter(offerCreateTER)); + env.close(); + + uint256 const buyNegIouOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID1, gwXAU(-1)), + token::owner(issuer), + ter(offerCreateTER)); + env.close(); + + { + // Now try to accept the offers. + // 1. If fixNFTokenNegOffer is NOT enabled get tecINTERNAL. + // 2. If fixNFTokenNegOffer IS enabled get tecOBJECT_NOT_FOUND. + TER const offerAcceptTER = tweakedFeatures[fixNFTokenNegOffer] + ? static_cast(tecOBJECT_NOT_FOUND) + : static_cast(tecINTERNAL); + + // Sell offers. + env(token::acceptSellOffer(buyer, sellNegXrpOfferIndex), + ter(offerAcceptTER)); + env.close(); + env(token::acceptSellOffer(buyer, sellNegIouOfferIndex), + ter(offerAcceptTER)); + env.close(); + + // Buy offers. + env(token::acceptBuyOffer(issuer, buyNegXrpOfferIndex), + ter(offerAcceptTER)); + env.close(); + env(token::acceptBuyOffer(issuer, buyNegIouOfferIndex), + ter(offerAcceptTER)); + env.close(); + } + { + // 1. If fixNFTokenNegOffer is NOT enabled get tecSUCCESS. + // 2. If fixNFTokenNegOffer IS enabled get tecOBJECT_NOT_FOUND. + TER const offerAcceptTER = tweakedFeatures[fixNFTokenNegOffer] + ? static_cast(tecOBJECT_NOT_FOUND) + : static_cast(tesSUCCESS); + + // Brokered offers. + env(token::brokerOffers( + gw, buyNegXrpOfferIndex, sellNegXrpOfferIndex), + ter(offerAcceptTER)); + env.close(); + env(token::brokerOffers( + gw, buyNegIouOfferIndex, sellNegIouOfferIndex), + ter(offerAcceptTER)); + env.close(); + } + } + + // Test what happens if NFTokenOffers are created with negative amounts + // and then fixNFTokenNegOffer goes live. What does an acceptOffer do? + { + Env env{*this, features - fixNFTokenNegOffer}; + + env.fund(XRP(1000000), issuer, buyer, gw); + env.close(); + + env(trust(issuer, gwXAU(2000))); + env(trust(buyer, gwXAU(2000))); + env.close(); + + env(pay(gw, issuer, gwXAU(1000))); + env(pay(gw, buyer, gwXAU(1000))); + env.close(); + + // Create an NFT that we'll make XRP offers for. + uint256 const nftID0{ + token::getNextID(env, issuer, 0u, tfTransferable)}; + env(token::mint(issuer, 0), txflags(tfTransferable)); + env.close(); + + // Create an NFT that we'll make IOU offers for. + uint256 const nftID1{ + token::getNextID(env, issuer, 1u, tfTransferable)}; + env(token::mint(issuer, 1), txflags(tfTransferable)); + env.close(); + + // Make offers with negative amounts for the NFTs + uint256 const sellNegXrpOfferIndex = + keylet::nftoffer(issuer, env.seq(issuer)).key; + env(token::createOffer(issuer, nftID0, XRP(-2)), + txflags(tfSellNFToken)); + env.close(); + + uint256 const sellNegIouOfferIndex = + keylet::nftoffer(issuer, env.seq(issuer)).key; + env(token::createOffer(issuer, nftID1, gwXAU(-2)), + txflags(tfSellNFToken)); + env.close(); + + uint256 const buyNegXrpOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID0, XRP(-1)), + token::owner(issuer)); + env.close(); + + uint256 const buyNegIouOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID1, gwXAU(-1)), + token::owner(issuer)); + env.close(); + + // Now the amendment passes. + env.enableFeature(fixNFTokenNegOffer); + env.close(); + + // All attempts to accept the offers with negative amounts + // should fail with temBAD_OFFER. + env(token::acceptSellOffer(buyer, sellNegXrpOfferIndex), + ter(temBAD_OFFER)); + env.close(); + env(token::acceptSellOffer(buyer, sellNegIouOfferIndex), + ter(temBAD_OFFER)); + env.close(); + + // Buy offers. + env(token::acceptBuyOffer(issuer, buyNegXrpOfferIndex), + ter(temBAD_OFFER)); + env.close(); + env(token::acceptBuyOffer(issuer, buyNegIouOfferIndex), + ter(temBAD_OFFER)); + env.close(); + + // Brokered offers. + env(token::brokerOffers( + gw, buyNegXrpOfferIndex, sellNegXrpOfferIndex), + ter(temBAD_OFFER)); + env.close(); + env(token::brokerOffers( + gw, buyNegIouOfferIndex, sellNegIouOfferIndex), + ter(temBAD_OFFER)); + env.close(); + } + + // Test buy offers with a destination with and without + // fixNFTokenNegOffer. + for (auto const& tweakedFeatures : + {features - fixNFTokenNegOffer, features | fixNFTokenNegOffer}) + { + Env env{*this, tweakedFeatures}; + + env.fund(XRP(1000000), issuer, buyer); + + // Create an NFT that we'll make offers for. + uint256 const nftID{ + token::getNextID(env, issuer, 0u, tfTransferable)}; + env(token::mint(issuer, 0), txflags(tfTransferable)); + env.close(); + + TER const offerCreateTER = tweakedFeatures[fixNFTokenNegOffer] + ? static_cast(tesSUCCESS) + : static_cast(temMALFORMED); + + env(token::createOffer(buyer, nftID, drops(1)), + token::owner(issuer), + token::destination(issuer), + ter(offerCreateTER)); + env.close(); + } + } + void testWithFeats(FeatureBitset features) { @@ -4584,6 +4895,7 @@ class NFToken_test : public beast::unit_test::suite testNFTokenWithTickets(features); testNFTokenDeleteAccount(features); testNftXxxOffers(features); + testFixNFTokenNegOffer(features); } public: From b0b44d32bd1460b23a9291a747a731f6883aa8e2 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Tue, 5 Jul 2022 16:56:54 -0400 Subject: [PATCH 015/508] Fix amendment voting persistence: An incorrect SQL query could cause the server to improperly configure its voting state after a restart; typically, this would manifest as an apparent failure to store a vote which the administrator of the server had configured. This commit fixes the broken SQL and ensures that amendment votes are properly reloaded post-restart and closes #4220. --- src/ripple/app/rdb/impl/Wallet.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/ripple/app/rdb/impl/Wallet.cpp b/src/ripple/app/rdb/impl/Wallet.cpp index c6040964bde..24715404ce5 100644 --- a/src/ripple/app/rdb/impl/Wallet.cpp +++ b/src/ripple/app/rdb/impl/Wallet.cpp @@ -254,7 +254,10 @@ readAmendments( soci::transaction tr(session); std::string sql = - "SELECT AmendmentHash, AmendmentName, Veto FROM FeatureVotes"; + "SELECT AmendmentHash, AmendmentName, Veto FROM " + "( SELECT AmendmentHash, AmendmentName, Veto, RANK() OVER " + "( PARTITION BY AmendmentHash ORDER BY ROWID DESC ) " + "as rnk FROM FeatureVotes ) WHERE rnk = 1"; // SOCI requires boost::optional (not std::optional) as parameters. boost::optional amendment_hash; boost::optional amendment_name; From d458e9972bee8f458a4c6b87705288ddf0a1daa1 Mon Sep 17 00:00:00 2001 From: Nathan Nichols Date: Wed, 13 Jul 2022 20:35:31 -0700 Subject: [PATCH 016/508] Improve JSON sanitization in reporting mode --- src/ripple/app/reporting/P2pProxy.cpp | 2 +- src/ripple/rpc/impl/ServerHandlerImp.cpp | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/ripple/app/reporting/P2pProxy.cpp b/src/ripple/app/reporting/P2pProxy.cpp index 8e4fc3a97aa..ee04b68e6b5 100644 --- a/src/ripple/app/reporting/P2pProxy.cpp +++ b/src/ripple/app/reporting/P2pProxy.cpp @@ -72,7 +72,7 @@ shouldForwardToP2p(RPC::JsonContext& context) if (params.isMember(jss::ledger_index)) { auto indexValue = params[jss::ledger_index]; - if (!indexValue.isNumeric()) + if (indexValue.isString()) { auto index = indexValue.asString(); return index == "current" || index == "closed"; diff --git a/src/ripple/rpc/impl/ServerHandlerImp.cpp b/src/ripple/rpc/impl/ServerHandlerImp.cpp index 3ac5f04a9d6..cb70fdcab4f 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.cpp +++ b/src/ripple/rpc/impl/ServerHandlerImp.cpp @@ -873,9 +873,23 @@ ServerHandlerImp::processRequest( params, {user, forwardedFor}}; Json::Value result; + auto start = std::chrono::system_clock::now(); - RPC::doCommand(context, result); + + try + { + RPC::doCommand(context, result); + } + catch (std::exception const& ex) + { + result = RPC::make_error(rpcINTERNAL); + JLOG(m_journal.error()) << "Internal error : " << ex.what() + << " when processing request: " + << Json::Compact{Json::Value{params}}; + } + auto end = std::chrono::system_clock::now(); + logDuration(params, end - start, m_journal); usage.charge(loadType); From 5e6728dccde1217fb47da6a5f1a594bf4937843d Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Wed, 18 May 2022 13:14:37 -0700 Subject: [PATCH 017/508] Set cluster timer only when in a cluster --- src/ripple/app/misc/NetworkOPs.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index bdc266422e5..8dff1af7b2b 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -919,7 +919,10 @@ void NetworkOPsImp::setStateTimer() { setHeartbeatTimer(); - setClusterTimer(); + + // Only do this work if a cluster is configured + if (app_.cluster().size() != 0) + setClusterTimer(); } void @@ -972,6 +975,7 @@ void NetworkOPsImp::setClusterTimer() { using namespace std::chrono_literals; + setTimer( clusterTimer_, 10s, @@ -1057,7 +1061,11 @@ NetworkOPsImp::processHeartbeatTimer() void NetworkOPsImp::processClusterTimer() { + if (app_.cluster().size() == 0) + return; + using namespace std::chrono_literals; + bool const update = app_.cluster().update( app_.nodeIdentity().first, "", From 47ccd0b579f90777a395e48236f13f676ca18618 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Thu, 9 Jun 2022 13:41:38 -0700 Subject: [PATCH 018/508] Limit how often endpoint messages can be processed: The peer discovery protocol depends on peers exchanging messages listing IP addresses for other peers. Under normal circumstances, these messages should not be sent frequently; the existing code would track the earliest time a new message should be processed, but did not actually enforce that limit. --- src/ripple/peerfinder/impl/Logic.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/ripple/peerfinder/impl/Logic.h b/src/ripple/peerfinder/impl/Logic.h index 4e69c8bd38d..ca14a5111fc 100644 --- a/src/ripple/peerfinder/impl/Logic.h +++ b/src/ripple/peerfinder/impl/Logic.h @@ -782,10 +782,14 @@ class Logic // Must be handshaked! assert(slot->state() == Slot::active); - preprocess(slot, list); - clock_type::time_point const now(m_clock.now()); + // Limit how often we accept new endpoints + if (slot->whenAcceptEndpoints > now) + return; + + preprocess(slot, list); + for (auto const& ep : list) { assert(ep.hops != 0); From 9eb303f8e8f1a623da99b69e9998a633b8605a40 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Sat, 11 Jun 2022 22:01:35 -0700 Subject: [PATCH 019/508] Improve STVector256 deserialization --- src/ripple/basics/Slice.h | 7 ++++--- src/ripple/basics/base_uint.h | 6 ++++++ src/ripple/protocol/impl/STVector256.cpp | 26 ++++++++++++------------ 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/src/ripple/basics/Slice.h b/src/ripple/basics/Slice.h index 67c954bb723..0ba6a94b62b 100644 --- a/src/ripple/basics/Slice.h +++ b/src/ripple/basics/Slice.h @@ -48,7 +48,8 @@ class Slice std::size_t size_ = 0; public: - using const_iterator = std::uint8_t const*; + using value_type = std::uint8_t; + using const_iterator = value_type const*; /** Default constructed Slice has length 0. */ Slice() noexcept = default; @@ -75,13 +76,13 @@ class Slice This may be zero for an empty range. */ /** @{ */ - std::size_t + [[nodiscard]] std::size_t size() const noexcept { return size_; } - std::size_t + [[nodiscard]] std::size_t length() const noexcept { return size_; diff --git a/src/ripple/basics/base_uint.h b/src/ripple/basics/base_uint.h index ccbb24a13a6..8f277c3003c 100644 --- a/src/ripple/basics/base_uint.h +++ b/src/ripple/basics/base_uint.h @@ -26,6 +26,7 @@ #define RIPPLE_BASICS_BASE_UINT_H_INCLUDED #include +#include #include #include #include @@ -56,6 +57,11 @@ struct is_contiguous_container< { }; +template <> +struct is_contiguous_container : std::true_type +{ +}; + } // namespace detail /** Integers of any length that is a multiple of 32-bits diff --git a/src/ripple/protocol/impl/STVector256.cpp b/src/ripple/protocol/impl/STVector256.cpp index f74670ac000..0ef1295b16a 100644 --- a/src/ripple/protocol/impl/STVector256.cpp +++ b/src/ripple/protocol/impl/STVector256.cpp @@ -26,19 +26,19 @@ namespace ripple { STVector256::STVector256(SerialIter& sit, SField const& name) : STBase(name) { - Blob data = sit.getVL(); - auto const count = data.size() / (256 / 8); - mValue.reserve(count); - Blob::iterator begin = data.begin(); - unsigned int uStart = 0; - for (unsigned int i = 0; i != count; i++) - { - unsigned int uEnd = uStart + (256 / 8); - // This next line could be optimized to construct a default - // uint256 in the vector and then copy into it - mValue.push_back(uint256(Blob(begin + uStart, begin + uEnd))); - uStart = uEnd; - } + auto const slice = sit.getSlice(sit.getVLDataLength()); + + if (slice.size() % uint256::size() != 0) + Throw( + "Bad serialization for STVector256: " + + std::to_string(slice.size())); + + auto const cnt = slice.size() / uint256::size(); + + mValue.reserve(cnt); + + for (std::size_t i = 0; i != cnt; ++i) + mValue.emplace_back(slice.substr(i * uint256::size(), uint256::size())); } STBase* From 59326bbbc552287e44b3a0d7b8afbb1ddddb3e3b Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Wed, 6 Jul 2022 12:49:00 -0700 Subject: [PATCH 020/508] Introduce the `NonFungibleTokensV1_1` amendment: The XLS-20 implementation contained two bugs that would require the introduction of amendments. This complicates the adoption of XLS-20 by requiring a staggered amendment activation, first of the two fix amendments, followed by the `NonFungibleTokensV1` amendment. After consideration, the consensus among node operators is that the process should be simplified by the introduction of a new amendment that, if enabled, would behaves as if the `NonFungibleTokensV1` and the two fix amendments (`fixNFTokenDirV1` and `fixNFTokenNegOffer`) were activated at once. This commit implements this proposal; it does not introduce any new functionality or additional features, above and beyond that offered by the existing amendments. --- src/ripple/protocol/Feature.h | 3 ++- src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/Rules.cpp | 18 ++++++++++++++---- src/test/app/NFTokenDir_test.cpp | 3 ++- src/test/app/NFToken_test.cpp | 15 +++++++++++---- 5 files changed, 30 insertions(+), 10 deletions(-) diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 7164cd46e4e..f0d0c8efbb5 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 49; +static constexpr std::size_t numFeatures = 50; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -337,6 +337,7 @@ extern uint256 const featureNonFungibleTokensV1; extern uint256 const featureExpandedSignerList; extern uint256 const fixNFTokenDirV1; extern uint256 const fixNFTokenNegOffer; +extern uint256 const featureNonFungibleTokensV1_1; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 1fcca56e884..4060067e30a 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -441,6 +441,7 @@ REGISTER_FEATURE(NonFungibleTokensV1, Supported::yes, DefaultVote::no) REGISTER_FEATURE(ExpandedSignerList, Supported::yes, DefaultVote::no); REGISTER_FIX (fixNFTokenDirV1, Supported::yes, DefaultVote::no); REGISTER_FIX (fixNFTokenNegOffer, Supported::yes, DefaultVote::no); +REGISTER_FEATURE(NonFungibleTokensV1_1, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/ripple/protocol/impl/Rules.cpp b/src/ripple/protocol/impl/Rules.cpp index 3736764fcf9..baea81d6c99 100644 --- a/src/ripple/protocol/impl/Rules.cpp +++ b/src/ripple/protocol/impl/Rules.cpp @@ -17,10 +17,9 @@ */ //============================================================================== +#include #include -#include - namespace ripple { class Rules::Impl @@ -40,9 +39,8 @@ class Rules::Impl std::unordered_set> const& presets, std::optional const& digest, STVector256 const& amendments) - : presets_(presets) + : presets_(presets), digest_(digest) { - digest_ = digest; set_.reserve(amendments.size()); set_.insert(amendments.begin(), amendments.end()); } @@ -83,6 +81,18 @@ bool Rules::enabled(uint256 const& feature) const { assert(impl_); + + // The functionality of the "NonFungibleTokensV1_1" amendment is + // precisely the functionality of the following three amendments + // so if their status is ever queried individually, we inject an + // extra check here to simplify the checking elsewhere. + if (feature == featureNonFungibleTokensV1 || + feature == fixNFTokenNegOffer || feature == fixNFTokenDirV1) + { + if (impl_->enabled(featureNonFungibleTokensV1_1)) + return true; + } + return impl_->enabled(feature); } diff --git a/src/test/app/NFTokenDir_test.cpp b/src/test/app/NFTokenDir_test.cpp index 8f8d0f581f0..d50bd1584d6 100644 --- a/src/test/app/NFTokenDir_test.cpp +++ b/src/test/app/NFTokenDir_test.cpp @@ -1075,7 +1075,8 @@ class NFTokenDir_test : public beast::unit_test::suite { using namespace test::jtx; FeatureBitset const all{supported_amendments()}; - FeatureBitset const fixNFTDir{fixNFTokenDirV1}; + FeatureBitset const fixNFTDir{ + fixNFTokenDirV1, featureNonFungibleTokensV1_1}; testWithFeats(all - fixNFTDir); testWithFeats(all); diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 5a25b3670b8..2db31afe0db 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -96,7 +96,10 @@ class NFToken_test : public beast::unit_test::suite { // If the NFT amendment is not enabled, you should not be able // to create or burn NFTs. - Env env{*this, features - featureNonFungibleTokensV1}; + Env env{ + *this, + features - featureNonFungibleTokensV1 - + featureNonFungibleTokensV1_1}; Account const& master = env.master; BEAST_EXPECT(ownerCount(env, master) == 0); @@ -4650,7 +4653,8 @@ class NFToken_test : public beast::unit_test::suite // Test both with and without fixNFTokenNegOffer for (auto const& tweakedFeatures : - {features - fixNFTokenNegOffer, features | fixNFTokenNegOffer}) + {features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1, + features | fixNFTokenNegOffer}) { // There was a bug in the initial NFT implementation that // allowed offers to be placed with negative amounts. Verify @@ -4759,7 +4763,9 @@ class NFToken_test : public beast::unit_test::suite // Test what happens if NFTokenOffers are created with negative amounts // and then fixNFTokenNegOffer goes live. What does an acceptOffer do? { - Env env{*this, features - fixNFTokenNegOffer}; + Env env{ + *this, + features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1}; env.fund(XRP(1000000), issuer, buyer, gw); env.close(); @@ -4844,7 +4850,8 @@ class NFToken_test : public beast::unit_test::suite // Test buy offers with a destination with and without // fixNFTokenNegOffer. for (auto const& tweakedFeatures : - {features - fixNFTokenNegOffer, features | fixNFTokenNegOffer}) + {features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1, + features | fixNFTokenNegOffer}) { Env env{*this, tweakedFeatures}; From 7e46f5342b7f1a574c2468ac05a834227898da7a Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Wed, 8 Jun 2022 12:18:04 -0700 Subject: [PATCH 021/508] Correct a technical flaw with the spinlock locking: The existing spinlock code, used to protect SHAMapInnerNode child lists, has a mistake that can allow the same child to be repeatedly locked under some circumstances. The bug was in the `SpinBitLock::lock` loop condition check and would result in the loop terminating early. This commit fixes this and further simplifies the lock loop making the correctness of the code easier to verify without sacrificing performance. It also promotes the spinlock class from an implementation detail to a more general purpose, easier to use lock class with clearer semantics. Two different lock types now allow developers to easily grab either a single spinlock from an a group of spinlocks (packed in an unsigned integer) or to grab all of the spinlocks at once. While this commit makes spinlocks more widely available to developers, they are rarely the best tool for the job. Use them judiciously and only after careful consideration. --- Builds/CMake/RippledCore.cmake | 1 + src/ripple/basics/spinlock.h | 223 +++++++++++++++++++++ src/ripple/shamap/impl/SHAMapInnerNode.cpp | 93 +-------- 3 files changed, 229 insertions(+), 88 deletions(-) create mode 100644 src/ripple/basics/spinlock.h diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 3930788071f..ec9e23663fd 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -156,6 +156,7 @@ install ( src/ripple/basics/MathUtilities.h src/ripple/basics/safe_cast.h src/ripple/basics/Slice.h + src/ripple/basics/spinlock.h src/ripple/basics/StringUtilities.h src/ripple/basics/ThreadSafetyAnalysis.h src/ripple/basics/ToString.h diff --git a/src/ripple/basics/spinlock.h b/src/ripple/basics/spinlock.h new file mode 100644 index 00000000000..85a2ac41d51 --- /dev/null +++ b/src/ripple/basics/spinlock.h @@ -0,0 +1,223 @@ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright 2022, Nikolaos D. Bougalis + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ + +#ifndef RIPPLE_BASICS_SPINLOCK_H_INCLUDED +#define RIPPLE_BASICS_SPINLOCK_H_INCLUDED + +#include +#include +#include +#include + +#ifndef __aarch64__ +#include +#endif + +namespace ripple { + +namespace detail { +/** Inform the processor that we are in a tight spin-wait loop. + + Spinlocks caught in tight loops can result in the processor's pipeline + filling up with comparison operations, resulting in a misprediction at + the time the lock is finally acquired, necessitating pipeline flushing + which is ridiculously expensive and results in very high latency. + + This function instructs the processor to "pause" for some architecture + specific amount of time, to prevent this. + */ +inline void +spin_pause() noexcept +{ +#ifdef __aarch64__ + asm volatile("yield"); +#else + _mm_pause(); +#endif +} + +} // namespace detail + +/** @{ */ +/** Classes to handle arrays of spinlocks packed into a single atomic integer: + + Packed spinlocks allow for tremendously space-efficient lock-sharding + but they come at a cost. + + First, the implementation is necessarily low-level and uses advanced + features like memory ordering and highly platform-specific tricks to + maximize performance. This imposes a significant and ongoing cost to + developers. + + Second, and perhaps most important, is that the packing of multiple + locks into a single integer which, albeit space-efficient, also has + performance implications stemming from data dependencies, increased + cache-coherency traffic between processors and heavier loads on the + processor's load/store units. + + To be sure, these locks can have advantages but they are definitely + not general purpose locks and should not be thought of or used that + way. The use cases for them are likely few and far between; without + a compelling reason to use them, backed by profiling data, it might + be best to use one of the standard locking primitives instead. Note + that in most common platforms, `std::mutex` is so heavily optimized + that it can, usually, outperform spinlocks. + + @tparam T An unsigned integral type (e.g. std::uint16_t) + */ + +/** A class that grabs a single packed spinlock from an atomic integer. + + This class meets the requirements of Lockable: + https://en.cppreference.com/w/cpp/named_req/Lockable + */ +template +class packed_spinlock +{ + // clang-format off + static_assert(std::is_unsigned_v); + static_assert(std::atomic::is_always_lock_free); + static_assert( + std::is_same_v&>().fetch_or(0)), T> && + std::is_same_v&>().fetch_and(0)), T>, + "std::atomic::fetch_and(T) and std::atomic::fetch_and(T) are required by packed_spinlock"); + // clang-format on + +private: + std::atomic& bits_; + T const mask_; + +public: + packed_spinlock(packed_spinlock const&) = delete; + packed_spinlock& + operator=(packed_spinlock const&) = delete; + + /** A single spinlock packed inside the specified atomic + + @param lock The atomic integer inside which the spinlock is packed. + @param index The index of the spinlock this object acquires. + + @note For performance reasons, you should strive to have `lock` be + on a cacheline by itself. + */ + packed_spinlock(std::atomic& lock, int index) + : bits_(lock), mask_(static_cast(1) << index) + { + assert(index >= 0 && (mask_ != 0)); + } + + [[nodiscard]] bool + try_lock() + { + return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == 0; + } + + void + lock() + { + while (!try_lock()) + { + // The use of relaxed memory ordering here is intentional and + // serves to help reduce cache coherency traffic during times + // of contention by avoiding writes that would definitely not + // result in the lock being acquired. + while ((bits_.load(std::memory_order_relaxed) & mask_) != 0) + detail::spin_pause(); + } + } + + void + unlock() + { + bits_.fetch_and(~mask_, std::memory_order_release); + } +}; + +/** A spinlock implemented on top of an atomic integer. + + @note Using `packed_spinlock` and `spinlock` against the same underlying + atomic integer can result in `spinlock` not being able to actually + acquire the lock during periods of high contention, because of how + the two locks operate: `spinlock` will spin trying to grab all the + bits at once, whereas any given `packed_spinlock` will only try to + grab one bit at a time. Caveat emptor. + + This class meets the requirements of Lockable: + https://en.cppreference.com/w/cpp/named_req/Lockable + */ +template +class spinlock +{ + static_assert(std::is_unsigned_v); + static_assert(std::atomic::is_always_lock_free); + +private: + std::atomic& lock_; + +public: + spinlock(spinlock const&) = delete; + spinlock& + operator=(spinlock const&) = delete; + + /** Grabs the + + @param lock The atomic integer to spin against. + + @note For performance reasons, you should strive to have `lock` be + on a cacheline by itself. + */ + spinlock(std::atomic& lock) : lock_(lock) + { + } + + [[nodiscard]] bool + try_lock() + { + T expected = 0; + + return lock_.compare_exchange_weak( + expected, + std::numeric_limits::max(), + std::memory_order_acquire, + std::memory_order_relaxed); + } + + void + lock() + { + while (!try_lock()) + { + // The use of relaxed memory ordering here is intentional and + // serves to help reduce cache coherency traffic during times + // of contention by avoiding writes that would definitely not + // result in the lock being acquired. + while (lock_.load(std::memory_order_relaxed) != 0) + detail::spin_pause(); + } + } + + void + unlock() + { + lock_.store(0, std::memory_order_release); + } +}; +/** @} */ + +} // namespace ripple + +#endif diff --git a/src/ripple/shamap/impl/SHAMapInnerNode.cpp b/src/ripple/shamap/impl/SHAMapInnerNode.cpp index eb00f8587f2..d408fe195f1 100644 --- a/src/ripple/shamap/impl/SHAMapInnerNode.cpp +++ b/src/ripple/shamap/impl/SHAMapInnerNode.cpp @@ -22,102 +22,19 @@ #include #include #include +#include #include #include #include #include #include -#include - #include #include #include -#ifndef __aarch64__ -// This is used for the _mm_pause instruction: -#include -#endif - namespace ripple { -/** A specialized 16-way spinlock used to protect inner node branches. - - This class packs 16 separate spinlocks into a single 16-bit value. It makes - it possible to lock any one lock at once or, alternatively, all together. - - The implementation tries to use portable constructs but has to be low-level - for performance. - */ -class SpinBitlock -{ -private: - std::atomic& bits_; - std::uint16_t mask_; - -public: - SpinBitlock(std::atomic& lock) : bits_(lock), mask_(0xFFFF) - { - } - - SpinBitlock(std::atomic& lock, int index) - : bits_(lock), mask_(1 << index) - { - assert(index >= 0 && index < 16); - } - - [[nodiscard]] bool - try_lock() - { - // If we want to grab all the individual bitlocks at once we cannot - // use `fetch_or`! To see why, imagine that `lock_ == 0x0020` which - // means that the `fetch_or` would return `0x0020` but all the bits - // would already be (incorrectly!) set. Oops! - std::uint16_t expected = 0; - - if (mask_ != 0xFFFF) - return (bits_.fetch_or(mask_, std::memory_order_acquire) & mask_) == - expected; - - return bits_.compare_exchange_weak( - expected, - mask_, - std::memory_order_acquire, - std::memory_order_relaxed); - } - - void - lock() - { - // Testing suggests that 99.9999% of the time this will succeed, so - // we try to optimize the fast path. - if (try_lock()) - return; - - do - { - // We try to spin for a few times: - for (int i = 0; i != 100; ++i) - { - if (try_lock()) - return; - -#ifndef __aarch64__ - _mm_pause(); -#endif - } - - std::this_thread::yield(); - } while ((bits_.load(std::memory_order_relaxed) & mask_) == 0); - } - - void - unlock() - { - bits_.fetch_and(~mask_, std::memory_order_release); - } -}; - SHAMapInnerNode::SHAMapInnerNode( std::uint32_t cowid, std::uint8_t numAllocatedChildren) @@ -185,7 +102,7 @@ SHAMapInnerNode::clone(std::uint32_t cowid) const }); } - SpinBitlock sl(lock_); + spinlock sl(lock_); std::lock_guard lock(sl); if (thisIsSparse) @@ -422,7 +339,7 @@ SHAMapInnerNode::getChildPointer(int branch) auto const index = *getChildIndex(branch); - SpinBitlock sl(lock_, index); + packed_spinlock sl(lock_, index); std::lock_guard lock(sl); return hashesAndChildren_.getChildren()[index].get(); } @@ -435,7 +352,7 @@ SHAMapInnerNode::getChild(int branch) auto const index = *getChildIndex(branch); - SpinBitlock sl(lock_, index); + packed_spinlock sl(lock_, index); std::lock_guard lock(sl); return hashesAndChildren_.getChildren()[index]; } @@ -462,7 +379,7 @@ SHAMapInnerNode::canonicalizeChild( auto [_, hashes, children] = hashesAndChildren_.getHashesAndChildren(); assert(node->getHash() == hashes[childIndex]); - SpinBitlock sl(lock_, childIndex); + packed_spinlock sl(lock_, childIndex); std::lock_guard lock(sl); if (children[childIndex]) From b95ca989651ebbb5242f8101a552c3523a92455e Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Tue, 12 Jul 2022 10:15:09 -0700 Subject: [PATCH 022/508] Fix #4231 (wrong include guard) and an out-of-order construction warning --- src/ripple/protocol/impl/Rules.cpp | 2 +- src/ripple/rpc/BookChanges.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ripple/protocol/impl/Rules.cpp b/src/ripple/protocol/impl/Rules.cpp index baea81d6c99..35a09b85658 100644 --- a/src/ripple/protocol/impl/Rules.cpp +++ b/src/ripple/protocol/impl/Rules.cpp @@ -39,7 +39,7 @@ class Rules::Impl std::unordered_set> const& presets, std::optional const& digest, STVector256 const& amendments) - : presets_(presets), digest_(digest) + : digest_(digest), presets_(presets) { set_.reserve(amendments.size()); set_.insert(amendments.begin(), amendments.end()); diff --git a/src/ripple/rpc/BookChanges.h b/src/ripple/rpc/BookChanges.h index 11f8e704127..5aef1c693dd 100644 --- a/src/ripple/rpc/BookChanges.h +++ b/src/ripple/rpc/BookChanges.h @@ -18,7 +18,7 @@ //============================================================================== #ifndef RIPPLE_RPC_BOOKCHANGES_H_INCLUDED -#define RIPPLE_RPC_BOOKCAHNGES_H_INCLUDED +#define RIPPLE_RPC_BOOKCHANGES_H_INCLUDED namespace Json { class Value; From 22b4de2e4445fa56e892ec40db9aa834c1f7511a Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Sat, 16 Jul 2022 14:57:49 -0700 Subject: [PATCH 023/508] Fix a race condition during shutdown --- src/ripple/nodestore/impl/Database.cpp | 46 ++++++++++++++++++++------ 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/src/ripple/nodestore/impl/Database.cpp b/src/ripple/nodestore/impl/Database.cpp index 15aad0a02a3..70416c873d5 100644 --- a/src/ripple/nodestore/impl/Database.cpp +++ b/src/ripple/nodestore/impl/Database.cpp @@ -68,11 +68,14 @@ Database::Database( decltype(read_) read; - while (!isStopping()) + while (true) { { std::unique_lock lock(readLock_); + if (isStopping()) + break; + if (read_.empty()) { runningThreads_--; @@ -81,10 +84,10 @@ Database::Database( } if (isStopping()) - continue; + break; - // If configured, extract multiple object at a time to - // minimize the overhead of acquiring the mutex. + // extract multiple object at a time to minimize the + // overhead of acquiring the mutex. for (int cnt = 0; !read_.empty() && cnt != requestBundle_; ++cnt) @@ -120,6 +123,7 @@ Database::Database( read.clear(); } + --runningThreads_; --readThreads_; }, i); @@ -160,15 +164,34 @@ Database::maxLedgers(std::uint32_t shardIndex) const noexcept void Database::stop() { - if (!readStopping_.exchange(true, std::memory_order_relaxed)) { std::lock_guard lock(readLock_); - read_.clear(); - readCondVar_.notify_all(); + + if (!readStopping_.exchange(true, std::memory_order_relaxed)) + { + JLOG(j_.debug()) << "Clearing read queue because of stop request"; + read_.clear(); + readCondVar_.notify_all(); + } } + JLOG(j_.debug()) << "Waiting for stop request to complete..."; + + using namespace std::chrono; + + auto const start = steady_clock::now(); + while (readThreads_.load() != 0) + { + assert(steady_clock::now() - start < 30s); std::this_thread::yield(); + } + + JLOG(j_.debug()) << "Stop request completed in " + << duration_cast( + steady_clock::now() - start) + .count() + << " millseconds"; } void @@ -177,10 +200,13 @@ Database::asyncFetch( std::uint32_t ledgerSeq, std::function const&)>&& cb) { - // Post a read std::lock_guard lock(readLock_); - read_[hash].emplace_back(ledgerSeq, std::move(cb)); - readCondVar_.notify_one(); + + if (!isStopping()) + { + read_[hash].emplace_back(ledgerSeq, std::move(cb)); + readCondVar_.notify_one(); + } } void From 83faf43140e27e5d6d6779eaa0ffb75c33d98029 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Tue, 12 Jul 2022 10:15:37 -0700 Subject: [PATCH 024/508] Set version to 1.9.2-rc1 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 5fd82220388..62973550aa3 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.1" +char const* const versionString = "1.9.2-rc1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From e5275b857752c2d6078cb8774edbb7e60f179d72 Mon Sep 17 00:00:00 2001 From: manojsdoshi Date: Mon, 25 Jul 2022 13:30:46 -0700 Subject: [PATCH 025/508] Set version to 1.9.2 --- RELEASENOTES.md | 65 +++++++++++++++++++++++++- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 2 files changed, 65 insertions(+), 2 deletions(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 9412e13ef73..61ac9163105 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -5,7 +5,70 @@ This document contains the release notes for `rippled`, the reference server implementation of the XRP Ledger protocol. To learn more about how to build, run or update a `rippled` server, visit https://xrpl.org/install-rippled.html -Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/ripple/rippled/issues/new/choose) +Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/xrplf/rippled/issues/new/choose) + +# Introducing XRP Ledger version 1.9.2 + +Version 1.9.2 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes several fixes and improvements, including a second new fix amendment to correct a bug in Non-Fungible Tokens (NFTs) code, a new API method for order book changes, less noisy logging, and other small fixes. + + + + +## Action Required + +This release introduces a two new amendments to the XRP Ledger protocol. The first, **fixNFTokenNegOffer**, fixes a bug in code associated with the **NonFungibleTokensV1** amendment, originally introduced in [version 1.9.0](https://xrpl.org/blog/2022/rippled-1.9.0.html). The second, **NonFungibleTokensV1_1**, is a "roll-up" amendment that enables the **NonFungibleTokensV1** feature plus the two fix amendments associated with it, **fixNFTokenDirV1** and **fixNFTokenNegOffer**. + +If you want to enable NFT code on the XRP Ledger Mainnet, you can vote in favor of only the **NonFungibleTokensV1_1** amendment to support enabling the feature and fixes together, without risk that the unfixed NFT code may become enabled first. + +These amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. + +If you operate an XRP Ledger server, then you should upgrade to version 1.9.2 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. + +For more information about NFTs on the XRP Ledger, see [NFT Conceptual Overview](https://xrpl.org/nft-conceptual-overview.html). + +## Install / Upgrade + +On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). + +## Changelog + +This release contains the following features and improvements. + +- **Introduce fixNFTokenNegOffer amendment.** This amendment fixes a bug in the Non-Fungible Tokens (NFTs) functionality provided by the NonFungibleTokensV1 amendment (not currently enabled on Mainnet). The bug allowed users to place offers to buy tokens for negative amounts of money when using Brokered Mode. Anyone who accepted such an offer would transfer the token _and_ pay money. This amendment explicitly disallows offers to buy or sell NFTs for negative amounts of money, and returns an appropriate error code. This also corrects the error code returned when placing offers to buy or sell NFTs for negative amounts in Direct Mode. ([8266d9d](https://github.com/XRPLF/rippled/commit/8266d9d598d19f05e1155956b30ca443c27e119e)) +- **Introduce `NonFungibleTokensV1_1` amendment.** This amendment encompasses three NFT-related amendments: the original NonFungibleTokensV1 amendment (from version 1.9.0), the fixNFTokenDirV1 amendment (from version 1.9.1), and the new fixNFTokenNegOffer amendment from this release. This amendment contains no changes other than enabling those three amendments together; this allows validators to vote in favor of _only_ enabling the feature and fixes at the same time. ([59326bb](https://github.com/XRPLF/rippled/commit/59326bbbc552287e44b3a0d7b8afbb1ddddb3e3b)) +- **Handle invalid port numbers.** If the user specifies a URL with an invalid port number, the server would silently attempt to use port 0 instead. Now it raises an error instead. This affects admin API methods and config file parameters for downloading history shards and specifying validator list sites. ([#4213](https://github.com/XRPLF/rippled/pull/4213)) +- **Reduce log noisiness.** Decreased the severity of benign log messages in several places: "addPathsForType" messages during regular operation, expected errors during unit tests, and missing optional documentation components when compiling from source. ([#4178](https://github.com/XRPLF/rippled/pull/4178), [#4166](https://github.com/XRPLF/rippled/pull/4166), [#4180](https://github.com/XRPLF/rippled/pull/4180)) +- **Fix race condition in history shard implementation and support clang's ThreadSafetyAnalysis tool.** Added build settings so that developers can use this feature of the clang compiler to analyze the code for correctness, and fix an error found by this tool, which was the source of rare crashes in unit tests. ([#4188](https://github.com/XRPLF/rippled/pull/4188)) +- **Prevent crash when rotating a database with missing data.** When rotating databases, a missing entry could cause the server to crash. While there should never be a missing database entry, this change keeps the server running by aborting database rotation. ([#4182](https://github.com/XRPLF/rippled/pull/4182)) +- **Fix bitwise comparison in OfferCreate.** Fixed an expression that incorrectly used a bitwise comparison for two boolean values rather than a true boolean comparison. The outcome of the two comparisons is equivalent, so this is not a transaction processing change, but the bitwise comparison relied on compilers to implicitly fix the expression. ([#4183](https://github.com/XRPLF/rippled/pull/4183)) +- **Disable cluster timer when not in a cluster.** Disabled a timer that was unused on servers not running in clustered mode. The functionality of clustered servers is unchanged. ([#4173](https://github.com/XRPLF/rippled/pull/4173)) +- **Limit how often to process peer discovery messages.** In the peer-to-peer network, servers periodically share IP addresses of their peers with each other to facilitate peer discovery. It is not necessary to process these types of messages too often; previously, the code tracked whether it needed to process new messages of this type but always processed them anyway. With this change, the server no longer processes peer discovery messages if it has done so recently. ([#4202](https://github.com/XRPLF/rippled/pull/4202)) +- **Improve STVector256 deserialization.** Optimized the processing of this data type in protocol messages. This data type is used in several types of ledger entry that are important for bookkeeping, including directory pages that track other ledger types, amendments tracking, and the ledger hashes history. ([#4204](https://github.com/XRPLF/rippled/pull/4204)) +- **Fix and refactor spinlock code.** The spinlock code, which protects the `SHAMapInnerNode` child lists, had a mistake that allowed the same child to be repeatedly locked under some circumstances. Fixed this bug and improved the spinlock code to make it easier to use correctly and easier to verify that the code works correctly. ([#4201](https://github.com/XRPLF/rippled/pull/4201)) +- **Improve comments and contributor documentation.** Various minor documentation changes including some to reflect the fact that the source code repository is now owned by the XRP Ledger Foundation. ([#4214](https://github.com/XRPLF/rippled/pull/4214), [#4179](https://github.com/XRPLF/rippled/pull/4179), [#4222](https://github.com/XRPLF/rippled/pull/4222)) +- **Introduces a new API book_changes to provide information in a format that is useful for building charts that highlight DEX activity at a per-ledger level.** ([#4212](https://github.com/XRPLF/rippled/pull/4212)) + +## Contributions + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome contributions, big and small, and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value. + +### Credits + +The following people contributed directly to this release: + +- Chenna Keshava B S +- Ed Hennis +- Ikko Ashimine +- Nik Bougalis +- Richard Holland +- Scott Schurr +- Scott Determan + +For a real-time view of all lifetime contributors, including links to the commits made by each, please visit the "Contributors" section of the GitHub repository: . # Introducing XRP Ledger version 1.9.1 diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 62973550aa3..c51e0d595d1 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.2-rc1" +char const* const versionString = "1.9.2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 21a3f4a5b5115bb344e85917be43722e66c76ce2 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Wed, 27 Jul 2022 12:32:56 -0400 Subject: [PATCH 026/508] Change by-value to by-reference to persist vote --- src/ripple/app/misc/impl/AmendmentTable.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/app/misc/impl/AmendmentTable.cpp b/src/ripple/app/misc/impl/AmendmentTable.cpp index 5f75d29540a..93113af800e 100644 --- a/src/ripple/app/misc/impl/AmendmentTable.cpp +++ b/src/ripple/app/misc/impl/AmendmentTable.cpp @@ -425,7 +425,7 @@ AmendmentTableImpl::AmendmentTableImpl( } else // up-vote { - auto s = add(amend_hash, lock); + AmendmentState& s = add(amend_hash, lock); JLOG(j_.debug()) << "Amendment {" << *amendment_name << ", " << amend_hash << "} is upvoted."; From 6a9c270776e7a71fb76a52d040f1cd28b257eec7 Mon Sep 17 00:00:00 2001 From: Crypto Brad Garlinghouse Date: Tue, 26 Jul 2022 04:41:01 +0000 Subject: [PATCH 027/508] Properly handle self-assignment of PublicKey --- src/ripple/protocol/impl/PublicKey.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/ripple/protocol/impl/PublicKey.cpp b/src/ripple/protocol/impl/PublicKey.cpp index 9fed78088df..ac86634f1ef 100644 --- a/src/ripple/protocol/impl/PublicKey.cpp +++ b/src/ripple/protocol/impl/PublicKey.cpp @@ -24,7 +24,6 @@ #include #include #include -#include namespace ripple { @@ -186,14 +185,18 @@ PublicKey::PublicKey(PublicKey const& other) : size_(other.size_) { if (size_) std::memcpy(buf_, other.buf_, size_); -}; +} PublicKey& PublicKey::operator=(PublicKey const& other) { - size_ = other.size_; - if (size_) - std::memcpy(buf_, other.buf_, size_); + if (this != &other) + { + size_ = other.size_; + if (size_) + std::memcpy(buf_, other.buf_, size_); + } + return *this; } From cd3a6bf530563b0b739fd02fb9d0eee1b5d70264 Mon Sep 17 00:00:00 2001 From: Wo Jake <87929946+wojake@users.noreply.github.com> Date: Tue, 2 Aug 2022 08:36:12 +0000 Subject: [PATCH 028/508] Document the "DefaultVote::no" policy in the code --- src/ripple/protocol/impl/Feature.cpp | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 4060067e30a..fcd774ce9fd 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -52,7 +52,7 @@ enum class Supported : bool { no = false, yes }; // enabled using run-time conditionals based on the state of the amendment. // There is value in retaining that conditional code for some time after // the amendment is enabled to make it simple to replay old transactions. -// However, once an Amendment has been enabled for, say, more than two years +// However, once an amendment has been enabled for, say, more than two years // then retaining that conditional code has less value since it is // uncommon to replay such old transactions. // @@ -61,10 +61,15 @@ enum class Supported : bool { no = false, yes }; // 2018 needs to happen on an older version of the server code. There's // a log message in Application.cpp that warns about replaying old ledgers. // -// At some point in the future someone may wish to remove Amendment -// conditional code for Amendments that were enabled after January 2018. +// At some point in the future someone may wish to remove amendment +// conditional code for amendments that were enabled after January 2018. // When that happens then the log message in Application.cpp should be // updated. +// +// Generally, amendments which introduce new features should be set as +// "DefaultVote::no" whereas in rare cases, amendments that fix critical +// bugs should be set as "DefaultVote::yes", if off-chain consensus is +// reached amongst reviewers, validator operators, and other participants. class FeatureCollections { From 47dec467ea659c1b64c7b5f4eb8a1bfa9759ff91 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Fri, 19 Aug 2022 14:40:53 -0700 Subject: [PATCH 029/508] Set version to 1.9.3 --- RELEASENOTES.md | 32 ++++++++++++++++++++++++++ src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 61ac9163105..5f5810fc615 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -7,6 +7,38 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/xrplf/rippled/issues/new/choose) +# Introducing XRP Ledger version 1.9.3 + +Version 1.9.3 of `rippled`, the reference server implementation of the XRP Ledger protocol is now available. This release corrects minor technical flaws with the code that loads configured amendment votes after a startup and the copy constructor of `PublicKey`. + +## Install / Upgrade + +On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). + +## Changelog + +## Contributions + +This releases contains the following bug fixes: + +- **Change by-value to by-reference to persist vote**: A minor technical flaw, caused by use of a copy instead of a reference, resulted in operator-configured "yes" votes to not be properly loaded after a restart. ([#4256](https://github.com/XRPLF/rippled/pull/4256)) +- **Properly handle self-assignment of PublicKey**: The `PublicKey` copy assignment operator mishandled the case where a `PublicKey` would be assigned to itself, and could result in undefined behavior. + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome contributions, big and small, and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value. + +### Credits + +The following people contributed directly to this release: + +- Howard Hinnant +- Crypto Brad Garlinghouse +- Wo Jake <87929946+wojake@users.noreply.github.com> + + # Introducing XRP Ledger version 1.9.2 Version 1.9.2 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release includes several fixes and improvements, including a second new fix amendment to correct a bug in Non-Fungible Tokens (NFTs) code, a new API method for order book changes, less noisy logging, and other small fixes. diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index c51e0d595d1..734dc11cb38 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.2" +char const* const versionString = "1.9.3" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 92d35e54c7de6bbe44ff6c7c52cc0765b3f78258 Mon Sep 17 00:00:00 2001 From: seelabs Date: Tue, 16 Nov 2021 22:31:34 -0500 Subject: [PATCH 030/508] Switch from C++17 to C++20 --- CMakeLists.txt | 2 +- src/ripple/app/consensus/RCLConsensus.cpp | 4 +- src/ripple/app/paths/impl/BookStep.cpp | 8 +-- src/ripple/basics/TaggedCache.h | 2 +- src/ripple/beast/utility/PropertyStream.h | 12 ---- .../utility/src/beast_PropertyStream.cpp | 36 ---------- src/ripple/consensus/Validations.h | 4 +- src/ripple/ledger/OpenView.h | 2 +- src/ripple/ledger/detail/RawStateTable.h | 2 +- .../nodestore/backend/RocksDBFactory.cpp | 5 +- src/ripple/overlay/Slot.h | 39 +++++------ src/ripple/overlay/impl/PeerImp.cpp | 2 +- src/ripple/protocol/Feature.h | 1 - src/ripple/protocol/Serializer.h | 8 +-- src/ripple/rpc/impl/ShardArchiveHandler.cpp | 67 ++++++++++--------- src/test/app/Path_test.cpp | 2 +- src/test/core/Coroutine_test.cpp | 2 +- src/test/csf/Peer.h | 2 +- 18 files changed, 77 insertions(+), 123 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c0c0fff9444..d3b494c103a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,7 +6,7 @@ endif () project (rippled) set(CMAKE_CXX_EXTENSIONS OFF) -set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) # make GIT_COMMIT_HASH define available to all sources diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index 79d41581ae3..a4a4e0989a7 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -421,7 +421,9 @@ RCLConsensus::Adaptor::onAccept( Json::Value&& consensusJson) { app_.getJobQueue().addJob( - jtACCEPT, "acceptLedger", [=, cj = std::move(consensusJson)]() mutable { + jtACCEPT, + "acceptLedger", + [=, this, cj = std::move(consensusJson)]() mutable { // Note that no lock is held or acquired during this job. // This is because generic Consensus guarantees that once a ledger // is accepted, the consensus results and capture by reference state diff --git a/src/ripple/app/paths/impl/BookStep.cpp b/src/ripple/app/paths/impl/BookStep.cpp index a80ee13f8a9..a6b2c59611e 100644 --- a/src/ripple/app/paths/impl/BookStep.cpp +++ b/src/ripple/app/paths/impl/BookStep.cpp @@ -1125,10 +1125,10 @@ bookStepEqual(Step const& step, ripple::Book const& book) bool const inXRP = isXRP(book.in.currency); bool const outXRP = isXRP(book.out.currency); if (inXRP && outXRP) - return equalHelper< - XRPAmount, - XRPAmount, - BookPaymentStep>(step, book); + { + assert(0); + return false; // no such thing as xrp/xrp book step + } if (inXRP && !outXRP) return equalHelper< XRPAmount, diff --git a/src/ripple/basics/TaggedCache.h b/src/ripple/basics/TaggedCache.h index 548d21dc78e..d9a1b542ed9 100644 --- a/src/ripple/basics/TaggedCache.h +++ b/src/ripple/basics/TaggedCache.h @@ -684,7 +684,7 @@ class TaggedCache { // strong, expired ++cacheRemovals; - if (cit->second.ptr.unique()) + if (cit->second.ptr.use_count() == 1) { stuffToSweep.push_back(cit->second.ptr); ++mapRemovals; diff --git a/src/ripple/beast/utility/PropertyStream.h b/src/ripple/beast/utility/PropertyStream.h index bfedb39ecd9..dbcc8a2d793 100644 --- a/src/ripple/beast/utility/PropertyStream.h +++ b/src/ripple/beast/utility/PropertyStream.h @@ -77,12 +77,6 @@ class PropertyStream add(std::string const& key, signed char value); virtual void add(std::string const& key, unsigned char value); - virtual void - add(std::string const& key, wchar_t value); -#if 0 - virtual void add (std::string const& key, char16_t value); - virtual void add (std::string const& key, char32_t value); -#endif virtual void add(std::string const& key, short value); virtual void @@ -139,12 +133,6 @@ class PropertyStream add(signed char value); virtual void add(unsigned char value); - virtual void - add(wchar_t value); -#if 0 - virtual void add (char16_t value); - virtual void add (char32_t value); -#endif virtual void add(short value); virtual void diff --git a/src/ripple/beast/utility/src/beast_PropertyStream.cpp b/src/ripple/beast/utility/src/beast_PropertyStream.cpp index 70c5ab9a809..ecd707e9554 100644 --- a/src/ripple/beast/utility/src/beast_PropertyStream.cpp +++ b/src/ripple/beast/utility/src/beast_PropertyStream.cpp @@ -414,24 +414,6 @@ PropertyStream::add(std::string const& key, unsigned char value) lexical_add(key, value); } -void -PropertyStream::add(std::string const& key, wchar_t value) -{ - lexical_add(key, value); -} - -#if 0 -void PropertyStream::add (std::string const& key, char16_t value) -{ - lexical_add (key, value); -} - -void PropertyStream::add (std::string const& key, char32_t value) -{ - lexical_add (key, value); -} -#endif - void PropertyStream::add(std::string const& key, short value) { @@ -525,24 +507,6 @@ PropertyStream::add(unsigned char value) lexical_add(value); } -void -PropertyStream::add(wchar_t value) -{ - lexical_add(value); -} - -#if 0 -void PropertyStream::add (char16_t value) -{ - lexical_add (value); -} - -void PropertyStream::add (char32_t value) -{ - lexical_add (value); -} -#endif - void PropertyStream::add(short value) { diff --git a/src/ripple/consensus/Validations.h b/src/ripple/consensus/Validations.h index 9200ac883c7..46bf4322a5a 100644 --- a/src/ripple/consensus/Validations.h +++ b/src/ripple/consensus/Validations.h @@ -27,8 +27,10 @@ #include #include #include + #include #include +#include #include #include @@ -294,7 +296,7 @@ class Validations using NodeKey = typename Validation::NodeKey; using WrappedValidationType = std::decay_t< - std::result_of_t>; + std::invoke_result_t>; // Manages concurrent access to members mutable Mutex mutex_; diff --git a/src/ripple/ledger/OpenView.h b/src/ripple/ledger/OpenView.h index 8467e4abcc1..98b783e3a48 100644 --- a/src/ripple/ledger/OpenView.h +++ b/src/ripple/ledger/OpenView.h @@ -77,7 +77,7 @@ class OpenView final : public ReadView, public TxsRawView }; // List of tx, key order - // Use the boost pmr functionality instead of the c++-17 standard pmr + // Use boost::pmr functionality instead of std::pmr // functions b/c clang does not support pmr yet (as-of 9/2020) using txs_map = std::map< key_type, diff --git a/src/ripple/ledger/detail/RawStateTable.h b/src/ripple/ledger/detail/RawStateTable.h index 2bee9e2a948..2bb38dc49d8 100644 --- a/src/ripple/ledger/detail/RawStateTable.h +++ b/src/ripple/ledger/detail/RawStateTable.h @@ -119,7 +119,7 @@ class RawStateTable } }; - // Use the boost pmr functionality instead of the c++-17 standard pmr + // Use boost::pmr functionality instead of the std::pmr // functions b/c clang does not support pmr yet (as-of 9/2020) using items_t = std::map< key_type, diff --git a/src/ripple/nodestore/backend/RocksDBFactory.cpp b/src/ripple/nodestore/backend/RocksDBFactory.cpp index e17dc55de7d..1a9e529e103 100644 --- a/src/ripple/nodestore/backend/RocksDBFactory.cpp +++ b/src/ripple/nodestore/backend/RocksDBFactory.cpp @@ -23,6 +23,7 @@ #include #include +#include #include #include // VFALCO Bad dependency #include @@ -30,6 +31,7 @@ #include #include #include + #include #include @@ -310,7 +312,8 @@ class RocksDBBackend : public Backend, public BatchWriter::Callback } else { - status = Status(customCode + getStatus.code()); + status = + Status(customCode + unsafe_cast(getStatus.code())); JLOG(m_journal.error()) << getStatus.ToString(); } diff --git a/src/ripple/overlay/Slot.h b/src/ripple/overlay/Slot.h index b7a2129ed82..1197eff56ef 100644 --- a/src/ripple/overlay/Slot.h +++ b/src/ripple/overlay/Slot.h @@ -494,16 +494,11 @@ template std::set Slot::getSelected() const { - std::set init; - return std::accumulate( - peers_.begin(), peers_.end(), init, [](auto& init, auto const& it) { - if (it.second.state == PeerState::Selected) - { - init.insert(it.first); - return init; - } - return init; - }); + std::set r; + for (auto const& [id, info] : peers_) + if (info.state == PeerState::Selected) + r.insert(id); + return r; } template @@ -513,20 +508,20 @@ std::unordered_map< Slot::getPeers() const { using namespace std::chrono; - auto init = std::unordered_map< + auto r = std::unordered_map< id_t, std::tuple>(); - return std::accumulate( - peers_.begin(), peers_.end(), init, [](auto& init, auto const& it) { - init.emplace(std::make_pair( - it.first, - std::move(std::make_tuple( - it.second.state, - it.second.count, - epoch(it.second.expire).count(), - epoch(it.second.lastMessage).count())))); - return init; - }); + + for (auto const& [id, info] : peers_) + r.emplace(std::make_pair( + id, + std::move(std::make_tuple( + info.state, + info.count, + epoch(info.expire).count(), + epoch(info.lastMessage).count())))); + + return r; } /** Slots is a container for validator's Slot and handles Slot update diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index 60870c90a15..5962ab0dfcd 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -2119,7 +2119,7 @@ PeerImp::onMessage(std::shared_ptr const& m) m->ledgerseq(), app_.getLedgerMaster().getValidLedgerIndex()); } - app_.getOPs().pubPeerStatus([=]() -> Json::Value { + app_.getOPs().pubPeerStatus([=, this]() -> Json::Value { Json::Value j = Json::objectValue; if (m->has_newstatus()) diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index f0d0c8efbb5..b46e4d588ed 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -126,7 +126,6 @@ class FeatureBitset : private std::bitset public: using base::bitset; using base::operator==; - using base::operator!=; using base::all; using base::any; diff --git a/src/ripple/protocol/Serializer.h b/src/ripple/protocol/Serializer.h index 37058f5622f..7c3ccf9580b 100644 --- a/src/ripple/protocol/Serializer.h +++ b/src/ripple/protocol/Serializer.h @@ -251,22 +251,22 @@ class Serializer } bool - operator==(Blob const& v) + operator==(Blob const& v) const { return v == mData; } bool - operator!=(Blob const& v) + operator!=(Blob const& v) const { return v != mData; } bool - operator==(const Serializer& v) + operator==(const Serializer& v) const { return v.mData == mData; } bool - operator!=(const Serializer& v) + operator!=(const Serializer& v) const { return v.mData != mData; } diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index 5e5635475e1..2284780c2b6 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -416,41 +416,42 @@ ShardArchiveHandler::complete(path dstPath) } // Make lambdas mutable captured vars can be moved from - auto wrapper = jobCounter_.wrap([=, - dstPath = std::move(dstPath)]() mutable { - if (stopping_) - return; + auto wrapper = + jobCounter_.wrap([=, this, dstPath = std::move(dstPath)]() mutable { + if (stopping_) + return; - // If not synced then defer and retry - auto const mode{app_.getOPs().getOperatingMode()}; - if (mode != OperatingMode::FULL) - { - std::lock_guard lock(m_); - timer_.expires_from_now(static_cast( - (static_cast(OperatingMode::FULL) - - static_cast(mode)) * - 10)); - - auto wrapper = timerCounter_.wrap( - [=, dstPath = std::move(dstPath)]( - boost::system::error_code const& ec) mutable { - if (ec != boost::asio::error::operation_aborted) - complete(std::move(dstPath)); - }); - - if (!wrapper) - onClosureFailed( - "failed to wrap closure for operating mode timer", lock); + // If not synced then defer and retry + auto const mode{app_.getOPs().getOperatingMode()}; + if (mode != OperatingMode::FULL) + { + std::lock_guard lock(m_); + timer_.expires_from_now(static_cast( + (static_cast(OperatingMode::FULL) - + static_cast(mode)) * + 10)); + + auto wrapper = timerCounter_.wrap( + [=, this, dstPath = std::move(dstPath)]( + boost::system::error_code const& ec) mutable { + if (ec != boost::asio::error::operation_aborted) + complete(std::move(dstPath)); + }); + + if (!wrapper) + onClosureFailed( + "failed to wrap closure for operating mode timer", + lock); + else + timer_.async_wait(*wrapper); + } else - timer_.async_wait(*wrapper); - } - else - { - process(dstPath); - std::lock_guard lock(m_); - removeAndProceed(lock); - } - }); + { + process(dstPath); + std::lock_guard lock(m_); + removeAndProceed(lock); + } + }); if (!wrapper) { diff --git a/src/test/app/Path_test.cpp b/src/test/app/Path_test.cpp index 05d23e82976..ef290393dab 100644 --- a/src/test/app/Path_test.cpp +++ b/src/test/app/Path_test.cpp @@ -203,7 +203,7 @@ class Path_test : public beast::unit_test::suite wait_for(std::chrono::duration const& rel_time) { std::unique_lock lk(mutex_); - auto b = cv_.wait_for(lk, rel_time, [=] { return signaled_; }); + auto b = cv_.wait_for(lk, rel_time, [this] { return signaled_; }); signaled_ = false; return b; } diff --git a/src/test/core/Coroutine_test.cpp b/src/test/core/Coroutine_test.cpp index 8937255a75b..6d1e5e33304 100644 --- a/src/test/core/Coroutine_test.cpp +++ b/src/test/core/Coroutine_test.cpp @@ -44,7 +44,7 @@ class Coroutine_test : public beast::unit_test::suite wait_for(std::chrono::duration const& rel_time) { std::unique_lock lk(mutex_); - auto b = cv_.wait_for(lk, rel_time, [=] { return signaled_; }); + auto b = cv_.wait_for(lk, rel_time, [this] { return signaled_; }); signaled_ = false; return b; } diff --git a/src/test/csf/Peer.h b/src/test/csf/Peer.h index 3a61b853c27..6d3008f7348 100644 --- a/src/test/csf/Peer.h +++ b/src/test/csf/Peer.h @@ -538,7 +538,7 @@ struct Peer ConsensusMode const& mode, Json::Value&& consensusJson) { - schedule(delays.ledgerAccept, [=]() { + schedule(delays.ledgerAccept, [=, this]() { const bool proposing = mode == ConsensusMode::proposing; const bool consensusFail = result.state == ConsensusState::MovedOn; From 69bb2be446e3cc24c694c0835b48bd2ecd3d119e Mon Sep 17 00:00:00 2001 From: Crypto Brad Garlinghouse Date: Tue, 19 Jul 2022 18:51:31 +0000 Subject: [PATCH 031/508] Introduce amendment to handle trustlines to self: Trustlines must be between two different accounts but two trustlines exist where an account extends trust to itself. They were created in the early days, likely because of bugs that have been fixed. The new fixTrustLinesToSelf amendment will remove those trustlines when it activates. --- src/ripple/app/tx/impl/Change.cpp | 87 ++++++++++++++++++++++++++++ src/ripple/app/tx/impl/Change.h | 3 + src/ripple/app/tx/impl/SetTrust.cpp | 53 ++++++++++------- src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/impl/Feature.cpp | 1 + 5 files changed, 124 insertions(+), 23 deletions(-) diff --git a/src/ripple/app/tx/impl/Change.cpp b/src/ripple/app/tx/impl/Change.cpp index bd66d7d5863..93ed1a04f92 100644 --- a/src/ripple/app/tx/impl/Change.cpp +++ b/src/ripple/app/tx/impl/Change.cpp @@ -23,9 +23,11 @@ #include #include #include +#include #include #include #include +#include namespace ripple { @@ -120,6 +122,88 @@ Change::preCompute() assert(account_ == beast::zero); } +void +Change::activateTrustLinesToSelfFix() +{ + JLOG(j_.warn()) << "fixTrustLinesToSelf amendment activation code starting"; + + auto removeTrustLineToSelf = [this](Sandbox& sb, uint256 id) { + auto tl = sb.peek(keylet::child(id)); + + if (tl == nullptr) + { + JLOG(j_.warn()) << id << ": Unable to locate trustline"; + return true; + } + + if (tl->getType() != ltRIPPLE_STATE) + { + JLOG(j_.warn()) << id << ": Unexpected type " + << static_cast(tl->getType()); + return true; + } + + auto const& lo = tl->getFieldAmount(sfLowLimit); + auto const& hi = tl->getFieldAmount(sfHighLimit); + + if (lo != hi) + { + JLOG(j_.warn()) << id << ": Trustline doesn't meet requirements"; + return true; + } + + if (auto const page = tl->getFieldU64(sfLowNode); !sb.dirRemove( + keylet::ownerDir(lo.getIssuer()), page, tl->key(), false)) + { + JLOG(j_.error()) << id << ": failed to remove low entry from " + << toBase58(lo.getIssuer()) << ":" << page + << " owner directory"; + return false; + } + + if (auto const page = tl->getFieldU64(sfHighNode); !sb.dirRemove( + keylet::ownerDir(hi.getIssuer()), page, tl->key(), false)) + { + JLOG(j_.error()) << id << ": failed to remove high entry from " + << toBase58(hi.getIssuer()) << ":" << page + << " owner directory"; + return false; + } + + if (tl->getFlags() & lsfLowReserve) + adjustOwnerCount( + sb, sb.peek(keylet::account(lo.getIssuer())), -1, j_); + + if (tl->getFlags() & lsfHighReserve) + adjustOwnerCount( + sb, sb.peek(keylet::account(hi.getIssuer())), -1, j_); + + sb.erase(tl); + + JLOG(j_.warn()) << "Successfully deleted trustline " << id; + + return true; + }; + + using namespace std::literals; + + Sandbox sb(&view()); + + if (removeTrustLineToSelf( + sb, + uint256{ + "2F8F21EFCAFD7ACFB07D5BB04F0D2E18587820C7611305BB674A64EAB0FA71E1"sv}) && + removeTrustLineToSelf( + sb, + uint256{ + "326035D5C0560A9DA8636545DD5A1B0DFCFF63E68D491B5522B767BB00564B1A"sv})) + { + JLOG(j_.warn()) << "fixTrustLinesToSelf amendment activation code " + "executed successfully"; + sb.apply(ctx_.rawView()); + } +} + TER Change::applyAmendment() { @@ -196,6 +280,9 @@ Change::applyAmendment() amendments.push_back(amendment); amendmentObject->setFieldV256(sfAmendments, amendments); + if (amendment == fixTrustLinesToSelf) + activateTrustLinesToSelfFix(); + ctx_.app.getAmendmentTable().enable(amendment); if (!ctx_.app.getAmendmentTable().isSupported(amendment)) diff --git a/src/ripple/app/tx/impl/Change.h b/src/ripple/app/tx/impl/Change.h index acd21837e69..0ee7067b323 100644 --- a/src/ripple/app/tx/impl/Change.h +++ b/src/ripple/app/tx/impl/Change.h @@ -56,6 +56,9 @@ class Change : public Transactor preclaim(PreclaimContext const& ctx); private: + void + activateTrustLinesToSelfFix(); + TER applyAmendment(); diff --git a/src/ripple/app/tx/impl/SetTrust.cpp b/src/ripple/app/tx/impl/SetTrust.cpp index 5f268f2c26b..23af19c7b15 100644 --- a/src/ripple/app/tx/impl/SetTrust.cpp +++ b/src/ripple/app/tx/impl/SetTrust.cpp @@ -104,19 +104,27 @@ SetTrust::preclaim(PreclaimContext const& ctx) auto const currency = saLimitAmount.getCurrency(); auto const uDstAccountID = saLimitAmount.getIssuer(); - if (id == uDstAccountID) + if (ctx.view.rules().enabled(fixTrustLinesToSelf)) { - // Prevent trustline to self from being created, - // unless one has somehow already been created - // (in which case doApply will clean it up). - auto const sleDelete = - ctx.view.read(keylet::line(id, uDstAccountID, currency)); - - if (!sleDelete) - { - JLOG(ctx.j.trace()) - << "Malformed transaction: Can not extend credit to self."; + if (id == uDstAccountID) return temDST_IS_SRC; + } + else + { + if (id == uDstAccountID) + { + // Prevent trustline to self from being created, + // unless one has somehow already been created + // (in which case doApply will clean it up). + auto const sleDelete = + ctx.view.read(keylet::line(id, uDstAccountID, currency)); + + if (!sleDelete) + { + JLOG(ctx.j.trace()) + << "Malformed transaction: Can not extend credit to self."; + return temDST_IS_SRC; + } } } @@ -183,18 +191,19 @@ SetTrust::doApply() auto viewJ = ctx_.app.journal("View"); - if (account_ == uDstAccountID) + // Trust lines to self are impossible but because of the old bug there are + // two on 19-02-2022. This code was here to allow those trust lines to be + // deleted. The fixTrustLinesToSelf fix amendment will remove them when it + // enables so this code will no longer be needed. + if (!view().rules().enabled(fixTrustLinesToSelf) && + account_ == uDstAccountID) { - // The only purpose here is to allow a mistakenly created - // trust line to oneself to be deleted. If no such trust - // lines exist now, why not remove this code and simply - // return an error? - SLE::pointer sleDelete = - view().peek(keylet::line(account_, uDstAccountID, currency)); - - JLOG(j_.warn()) << "Clearing redundant line."; - - return trustDelete(view(), sleDelete, account_, uDstAccountID, viewJ); + return trustDelete( + view(), + view().peek(keylet::line(account_, uDstAccountID, currency)), + account_, + uDstAccountID, + viewJ); } SLE::pointer sleDst = view().peek(keylet::account(uDstAccountID)); diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index b46e4d588ed..fc3256d1699 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 50; +static constexpr std::size_t numFeatures = 51; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -337,6 +337,7 @@ extern uint256 const featureExpandedSignerList; extern uint256 const fixNFTokenDirV1; extern uint256 const fixNFTokenNegOffer; extern uint256 const featureNonFungibleTokensV1_1; +extern uint256 const fixTrustLinesToSelf; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index fcd774ce9fd..e33b9efeb75 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -447,6 +447,7 @@ REGISTER_FEATURE(ExpandedSignerList, Supported::yes, DefaultVote::no) REGISTER_FIX (fixNFTokenDirV1, Supported::yes, DefaultVote::no); REGISTER_FIX (fixNFTokenNegOffer, Supported::yes, DefaultVote::no); REGISTER_FEATURE(NonFungibleTokensV1_1, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixTrustLinesToSelf, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. From e67f90588a9050162881389d7e7d1d0fb31066b0 Mon Sep 17 00:00:00 2001 From: Crypto Brad Garlinghouse Date: Sun, 24 Jul 2022 19:59:51 +0000 Subject: [PATCH 032/508] Use constexpr to check memo validity --- src/ripple/protocol/impl/STTx.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/ripple/protocol/impl/STTx.cpp b/src/ripple/protocol/impl/STTx.cpp index 66d20f3167a..1ce4ddb64b7 100644 --- a/src/ripple/protocol/impl/STTx.cpp +++ b/src/ripple/protocol/impl/STTx.cpp @@ -481,11 +481,10 @@ isMemoOkay(STObject const& st, std::string& reason) // The only allowed characters for MemoType and MemoFormat are the // characters allowed in URLs per RFC 3986: alphanumerics and the // following symbols: -._~:/?#[]@!$&'()*+,;=% - static std::array const allowedSymbols = [] { - std::array a; - a.fill(0); + static constexpr std::array const allowedSymbols = []() { + std::array a{}; - std::string symbols( + std::string_view symbols( "0123456789" "-._~:/?#[]@!$&'()*+,;=%" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" From 83ac141f656b1a95b5661853951ebd95b3ffba99 Mon Sep 17 00:00:00 2001 From: Crypto Brad Garlinghouse Date: Tue, 9 Aug 2022 19:05:24 +0000 Subject: [PATCH 033/508] Remove charUnHex --- Builds/CMake/RippledCore.cmake | 1 - src/ripple/basics/StringUtilities.h | 28 +++++++++++++---- src/ripple/basics/impl/strHex.cpp | 49 ----------------------------- src/ripple/basics/strHex.h | 15 --------- 4 files changed, 22 insertions(+), 71 deletions(-) delete mode 100644 src/ripple/basics/impl/strHex.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index ec9e23663fd..10af3b3109b 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -50,7 +50,6 @@ target_sources (xrpl_core PRIVATE src/ripple/basics/impl/FileUtilities.cpp src/ripple/basics/impl/IOUAmount.cpp src/ripple/basics/impl/Log.cpp - src/ripple/basics/impl/strHex.cpp src/ripple/basics/impl/StringUtilities.cpp #[===============================[ main sources: diff --git a/src/ripple/basics/StringUtilities.h b/src/ripple/basics/StringUtilities.h index 81e9ae82659..48de772ca41 100644 --- a/src/ripple/basics/StringUtilities.h +++ b/src/ripple/basics/StringUtilities.h @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -48,6 +49,24 @@ template std::optional strUnHex(std::size_t strSize, Iterator begin, Iterator end) { + static constexpr std::array const unxtab = []() { + std::array t{}; + + for (auto& x : t) + x = -1; + + for (int i = 0; i < 10; ++i) + t['0' + i] = i; + + for (int i = 0; i < 6; ++i) + { + t['A' + i] = 10 + i; + t['a' + i] = 10 + i; + } + + return t; + }(); + Blob out; out.reserve((strSize + 1) / 2); @@ -56,25 +75,22 @@ strUnHex(std::size_t strSize, Iterator begin, Iterator end) if (strSize & 1) { - int c = charUnHex(*iter); + int c = unxtab[*iter++]; if (c < 0) return {}; out.push_back(c); - ++iter; } while (iter != end) { - int cHigh = charUnHex(*iter); - ++iter; + int cHigh = unxtab[*iter++]; if (cHigh < 0) return {}; - int cLow = charUnHex(*iter); - ++iter; + int cLow = unxtab[*iter++]; if (cLow < 0) return {}; diff --git a/src/ripple/basics/impl/strHex.cpp b/src/ripple/basics/impl/strHex.cpp deleted file mode 100644 index 084493af53a..00000000000 --- a/src/ripple/basics/impl/strHex.cpp +++ /dev/null @@ -1,49 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { - -int -charUnHex(unsigned char c) -{ - static constexpr std::array const xtab = []() { - std::array t{}; - - for (auto& x : t) - x = -1; - - for (int i = 0; i < 10; ++i) - t['0' + i] = i; - - for (int i = 0; i < 6; ++i) - { - t['A' + i] = 10 + i; - t['a' + i] = 10 + i; - } - - return t; - }(); - - return xtab[c]; -} - -} // namespace ripple diff --git a/src/ripple/basics/strHex.h b/src/ripple/basics/strHex.h index e48ea921557..257fb540b3c 100644 --- a/src/ripple/basics/strHex.h +++ b/src/ripple/basics/strHex.h @@ -25,21 +25,6 @@ namespace ripple { -/** @{ */ -/** Converts a hex digit to the corresponding integer - @param cDigit one of '0'-'9', 'A'-'F' or 'a'-'f' - @return an integer from 0 to 15 on success; -1 on failure. -*/ -int -charUnHex(unsigned char c); - -inline int -charUnHex(char c) -{ - return charUnHex(static_cast(c)); -} -/** @} */ - template std::string strHex(FwdIt begin, FwdIt end) From 3d0c14f3e3108347e85889843cb90912700dacf0 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Thu, 18 Aug 2022 20:29:36 -0400 Subject: [PATCH 034/508] Remove use of deprecated std::iterator --- .../detail/aged_container_iterator.h | 59 ++++++++----------- .../container/detail/aged_ordered_container.h | 40 ++++++------- .../detail/aged_unordered_container.h | 42 ++++++------- src/ripple/beast/core/List.h | 3 +- src/ripple/beast/core/LockFreeStack.h | 15 +---- 5 files changed, 69 insertions(+), 90 deletions(-) diff --git a/src/ripple/beast/container/detail/aged_container_iterator.h b/src/ripple/beast/container/detail/aged_container_iterator.h index cd8677ce17f..7467ad33c7d 100644 --- a/src/ripple/beast/container/detail/aged_container_iterator.h +++ b/src/ripple/beast/container/detail/aged_container_iterator.h @@ -30,23 +30,21 @@ class aged_ordered_container; namespace detail { -// Idea for Base template argument to prevent having to repeat -// the base class declaration comes from newbiz on ##c++/Freenode -// // If Iterator is SCARY then this iterator will be as well. -template < - bool is_const, - class Iterator, - class Base = std::iterator< - typename std::iterator_traits::iterator_category, - typename std::conditional< - is_const, - typename Iterator::value_type::stashed::value_type const, - typename Iterator::value_type::stashed::value_type>::type, - typename std::iterator_traits::difference_type>> -class aged_container_iterator : public Base +template +class aged_container_iterator { public: + using iterator_category = + typename std::iterator_traits::iterator_category; + using value_type = typename std::conditional< + is_const, + typename Iterator::value_type::stashed::value_type const, + typename Iterator::value_type::stashed::value_type>::type; + using difference_type = + typename std::iterator_traits::difference_type; + using pointer = value_type*; + using reference = value_type&; using time_point = typename Iterator::value_type::stashed::time_point; aged_container_iterator() = default; @@ -56,13 +54,11 @@ class aged_container_iterator : public Base template < bool other_is_const, class OtherIterator, - class OtherBase, class = typename std::enable_if< (other_is_const == false || is_const == true) && std::is_same::value == false>::type> explicit aged_container_iterator( - aged_container_iterator const& - other) + aged_container_iterator const& other) : m_iter(other.m_iter) { } @@ -70,22 +66,19 @@ class aged_container_iterator : public Base // Disable constructing a const_iterator from a non-const_iterator. template < bool other_is_const, - class OtherBase, class = typename std::enable_if< other_is_const == false || is_const == true>::type> aged_container_iterator( - aged_container_iterator const& - other) + aged_container_iterator const& other) : m_iter(other.m_iter) { } // Disable assigning a const_iterator to a non-const iterator - template + template auto operator=( - aged_container_iterator const& - other) -> + aged_container_iterator const& other) -> typename std::enable_if< other_is_const == false || is_const == true, aged_container_iterator&>::type @@ -94,20 +87,18 @@ class aged_container_iterator : public Base return *this; } - template + template bool - operator==( - aged_container_iterator const& - other) const + operator==(aged_container_iterator const& + other) const { return m_iter == other.m_iter; } - template + template bool - operator!=( - aged_container_iterator const& - other) const + operator!=(aged_container_iterator const& + other) const { return m_iter != other.m_iter; } @@ -142,13 +133,13 @@ class aged_container_iterator : public Base return prev; } - typename Base::reference + reference operator*() const { return m_iter->value; } - typename Base::pointer + pointer operator->() const { return &m_iter->value; @@ -167,7 +158,7 @@ class aged_container_iterator : public Base template friend class aged_unordered_container; - template + template friend class aged_container_iterator; template diff --git a/src/ripple/beast/container/detail/aged_ordered_container.h b/src/ripple/beast/container/detail/aged_ordered_container.h index ed6585dd501..9da5f20a0a4 100644 --- a/src/ripple/beast/container/detail/aged_ordered_container.h +++ b/src/ripple/beast/container/detail/aged_ordered_container.h @@ -989,22 +989,20 @@ class aged_ordered_container template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> - beast::detail::aged_container_iterator - erase(beast::detail::aged_container_iterator pos); + beast::detail::aged_container_iterator + erase(beast::detail::aged_container_iterator pos); // enable_if prevents erase (reverse_iterator first, reverse_iterator last) // from compiling template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> - beast::detail::aged_container_iterator + beast::detail::aged_container_iterator erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last); + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last); template auto @@ -1019,10 +1017,9 @@ class aged_ordered_container template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> void - touch(beast::detail::aged_container_iterator pos) + touch(beast::detail::aged_container_iterator pos) { touch(pos, clock().now()); } @@ -1264,11 +1261,10 @@ class aged_ordered_container template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> void touch( - beast::detail::aged_container_iterator pos, + beast::detail::aged_container_iterator pos, typename clock_type::time_point const& now); template < @@ -2010,13 +2006,13 @@ template < class Clock, class Compare, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_ordered_container:: - erase(beast::detail::aged_container_iterator pos) + erase(beast::detail::aged_container_iterator pos) { unlink_and_delete_element(&*((pos++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( pos.iterator()); } @@ -2028,17 +2024,17 @@ template < class Clock, class Compare, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_ordered_container:: erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last) + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last) { for (; first != last;) unlink_and_delete_element(&*((first++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( first.iterator()); } @@ -2173,11 +2169,11 @@ template < class Clock, class Compare, class Allocator> -template +template void aged_ordered_container:: touch( - beast::detail::aged_container_iterator pos, + beast::detail::aged_container_iterator pos, typename clock_type::time_point const& now) { auto& e(*pos.iterator()); diff --git a/src/ripple/beast/container/detail/aged_unordered_container.h b/src/ripple/beast/container/detail/aged_unordered_container.h index 8bc2330fa9f..920e6196bb9 100644 --- a/src/ripple/beast/container/detail/aged_unordered_container.h +++ b/src/ripple/beast/container/detail/aged_unordered_container.h @@ -1205,15 +1205,15 @@ class aged_unordered_container return emplace(std::forward(args)...); } - template - beast::detail::aged_container_iterator - erase(beast::detail::aged_container_iterator pos); + template + beast::detail::aged_container_iterator + erase(beast::detail::aged_container_iterator pos); - template - beast::detail::aged_container_iterator + template + beast::detail::aged_container_iterator erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last); + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last); template auto @@ -1222,9 +1222,9 @@ class aged_unordered_container void swap(aged_unordered_container& other) noexcept; - template + template void - touch(beast::detail::aged_container_iterator pos) + touch(beast::detail::aged_container_iterator pos) { touch(pos, clock().now()); } @@ -1541,10 +1541,10 @@ class aged_unordered_container insert_unchecked(first, last); } - template + template void touch( - beast::detail::aged_container_iterator pos, + beast::detail::aged_container_iterator pos, typename clock_type::time_point const& now) { auto& e(*pos.iterator()); @@ -3044,8 +3044,8 @@ template < class Hash, class KeyEqual, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_unordered_container< IsMulti, IsMap, @@ -3054,11 +3054,11 @@ aged_unordered_container< Clock, Hash, KeyEqual, - Allocator>:: - erase(beast::detail::aged_container_iterator pos) + Allocator>::erase(beast::detail::aged_container_iterator + pos) { unlink_and_delete_element(&*((pos++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( pos.iterator()); } @@ -3071,8 +3071,8 @@ template < class Hash, class KeyEqual, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_unordered_container< IsMulti, IsMap, @@ -3083,13 +3083,13 @@ aged_unordered_container< KeyEqual, Allocator>:: erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last) + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last) { for (; first != last;) unlink_and_delete_element(&*((first++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( first.iterator()); } diff --git a/src/ripple/beast/core/List.h b/src/ripple/beast/core/List.h index 1daf5cda76f..9b3c889d6ac 100644 --- a/src/ripple/beast/core/List.h +++ b/src/ripple/beast/core/List.h @@ -72,11 +72,12 @@ class ListNode template class ListIterator - : public std::iterator { public: + using iterator_category = std::bidirectional_iterator_tag; using value_type = typename beast::detail::CopyConst::type; + using difference_type = std::ptrdiff_t; using pointer = value_type*; using reference = value_type&; using size_type = std::size_t; diff --git a/src/ripple/beast/core/LockFreeStack.h b/src/ripple/beast/core/LockFreeStack.h index ff022b96a10..107564415cd 100644 --- a/src/ripple/beast/core/LockFreeStack.h +++ b/src/ripple/beast/core/LockFreeStack.h @@ -29,18 +29,7 @@ namespace beast { //------------------------------------------------------------------------------ template -class LockFreeStackIterator : public std::iterator< - std::forward_iterator_tag, - typename Container::value_type, - typename Container::difference_type, - typename std::conditional< - IsConst, - typename Container::const_pointer, - typename Container::pointer>::type, - typename std::conditional< - IsConst, - typename Container::const_reference, - typename Container::reference>::type> +class LockFreeStackIterator { protected: using Node = typename Container::Node; @@ -48,7 +37,9 @@ class LockFreeStackIterator : public std::iterator< typename std::conditional::type; public: + using iterator_category = std::forward_iterator_tag; using value_type = typename Container::value_type; + using difference_type = typename Container::difference_type; using pointer = typename std::conditional< IsConst, typename Container::const_pointer, From 5e1cb09b8892e650f6c34a66521b6b1673bd6b65 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Mon, 25 Jul 2022 13:25:01 -0700 Subject: [PATCH 035/508] Update broken link to hosted Doxygen content (fixes #4251) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f4cb4e2a100..20ce719d79a 100644 --- a/README.md +++ b/README.md @@ -55,5 +55,5 @@ git-subtree. See those directories' README files for more details. * [XRP Ledger Dev Portal](https://xrpl.org/) * [Setup and Installation](https://xrpl.org/install-rippled.html) -* [Source Documentation (Doxygen)](https://ripple.github.io/rippled) +* [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/) * [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi) From ce64f7a90f99c6b5e68d3c3d913443023de061a6 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Wed, 20 Jul 2022 15:48:41 -0700 Subject: [PATCH 036/508] Remove deprecated AccountTxOld.cpp (fixes #2926) --- Builds/CMake/RippledCore.cmake | 1 - src/ripple/app/main/Main.cpp | 5 +- src/ripple/rpc/handlers/AccountTxOld.cpp | 255 ----------------------- 3 files changed, 3 insertions(+), 258 deletions(-) delete mode 100644 src/ripple/rpc/handlers/AccountTxOld.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 10af3b3109b..041044b86df 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -580,7 +580,6 @@ target_sources (rippled PRIVATE src/ripple/rpc/handlers/AccountObjects.cpp src/ripple/rpc/handlers/AccountOffers.cpp src/ripple/rpc/handlers/AccountTx.cpp - src/ripple/rpc/handlers/AccountTxOld.cpp src/ripple/rpc/handlers/BlackList.cpp src/ripple/rpc/handlers/BookOffers.cpp src/ripple/rpc/handlers/CanDelete.cpp diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index 64b6464a4c6..1d8169217af 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -133,8 +133,9 @@ printHelp(const po::options_description& desc) " account_objects [] [strict]\n" " account_offers | [] " "[strict]\n" - " account_tx accountID [ledger_min [ledger_max [limit " - "[offset]]]] [binary] [count] [descending]\n" + " account_tx accountID [ledger_index_min [ledger_index_max " + "[limit " + "]]] [binary]\n" " book_changes []\n" " book_offers [ " "[ [ []]]]]\n" diff --git a/src/ripple/rpc/handlers/AccountTxOld.cpp b/src/ripple/rpc/handlers/AccountTxOld.cpp deleted file mode 100644 index 1bc64247c36..00000000000 --- a/src/ripple/rpc/handlers/AccountTxOld.cpp +++ /dev/null @@ -1,255 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012-2014 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -// { -// account: account, -// ledger_index_min: ledger_index, -// ledger_index_max: ledger_index, -// binary: boolean, // optional, defaults to false -// count: boolean, // optional, defaults to false -// descending: boolean, // optional, defaults to false -// offset: integer, // optional, defaults to 0 -// limit: integer // optional -// } -Json::Value -doAccountTxOld(RPC::JsonContext& context) -{ - std::uint32_t offset = context.params.isMember(jss::offset) - ? context.params[jss::offset].asUInt() - : 0; - std::uint32_t limit = context.params.isMember(jss::limit) - ? context.params[jss::limit].asUInt() - : UINT32_MAX; - bool bBinary = context.params.isMember(jss::binary) && - context.params[jss::binary].asBool(); - bool bDescending = context.params.isMember(jss::descending) && - context.params[jss::descending].asBool(); - bool bCount = context.params.isMember(jss::count) && - context.params[jss::count].asBool(); - std::uint32_t uLedgerMin; - std::uint32_t uLedgerMax; - std::uint32_t uValidatedMin; - std::uint32_t uValidatedMax; - bool bValidated = - context.ledgerMaster.getValidatedRange(uValidatedMin, uValidatedMax); - - if (!context.params.isMember(jss::account)) - return rpcError(rpcINVALID_PARAMS); - - auto const raAccount = - parseBase58(context.params[jss::account].asString()); - if (!raAccount) - return rpcError(rpcACT_MALFORMED); - - if (offset > 3000) - return rpcError(rpcATX_DEPRECATED); - - context.loadType = Resource::feeHighBurdenRPC; - - // DEPRECATED - if (context.params.isMember(jss::ledger_min)) - { - context.params[jss::ledger_index_min] = context.params[jss::ledger_min]; - bDescending = true; - } - - // DEPRECATED - if (context.params.isMember(jss::ledger_max)) - { - context.params[jss::ledger_index_max] = context.params[jss::ledger_max]; - bDescending = true; - } - - if (context.params.isMember(jss::ledger_index_min) || - context.params.isMember(jss::ledger_index_max)) - { - std::int64_t iLedgerMin = context.params.isMember(jss::ledger_index_min) - ? context.params[jss::ledger_index_min].asInt() - : -1; - std::int64_t iLedgerMax = context.params.isMember(jss::ledger_index_max) - ? context.params[jss::ledger_index_max].asInt() - : -1; - - if (!bValidated && (iLedgerMin == -1 || iLedgerMax == -1)) - { - // Don't have a validated ledger range. - if (context.apiVersion == 1) - return rpcError(rpcLGR_IDXS_INVALID); - return rpcError(rpcNOT_SYNCED); - } - - uLedgerMin = iLedgerMin == -1 ? uValidatedMin : iLedgerMin; - uLedgerMax = iLedgerMax == -1 ? uValidatedMax : iLedgerMax; - - if (uLedgerMax < uLedgerMin) - { - if (context.apiVersion == 1) - return rpcError(rpcLGR_IDXS_INVALID); - return rpcError(rpcNOT_SYNCED); - } - } - else - { - std::shared_ptr ledger; - auto ret = RPC::lookupLedger(ledger, context); - - if (!ledger) - return ret; - - if (!ret[jss::validated].asBool() || - (ledger->info().seq > uValidatedMax) || - (ledger->info().seq < uValidatedMin)) - { - return rpcError(rpcLGR_NOT_VALIDATED); - } - - uLedgerMin = uLedgerMax = ledger->info().seq; - } - - int count = 0; - -#ifndef DEBUG - - try - { -#endif - - Json::Value ret(Json::objectValue); - - ret[jss::account] = context.app.accountIDCache().toBase58(*raAccount); - Json::Value& jvTxns = (ret[jss::transactions] = Json::arrayValue); - - RelationalDatabase::AccountTxOptions options = { - *raAccount, - uLedgerMin, - uLedgerMax, - offset, - limit, - isUnlimited(context.role)}; - - if (bBinary) - { - std::vector txns; - - if (bDescending) - txns = dynamic_cast( - &context.app.getRelationalDatabase()) - ->getNewestAccountTxsB(options); - else - txns = dynamic_cast( - &context.app.getRelationalDatabase()) - ->getOldestAccountTxsB(options); - - for (auto it = txns.begin(), end = txns.end(); it != end; ++it) - { - ++count; - Json::Value& jvObj = jvTxns.append(Json::objectValue); - - std::uint32_t uLedgerIndex = std::get<2>(*it); - jvObj[jss::tx_blob] = strHex(std::get<0>(*it)); - jvObj[jss::meta] = strHex(std::get<1>(*it)); - jvObj[jss::ledger_index] = uLedgerIndex; - jvObj[jss::validated] = bValidated && - uValidatedMin <= uLedgerIndex && - uValidatedMax >= uLedgerIndex; - } - } - else - { - RelationalDatabase::AccountTxs txns; - - if (bDescending) - txns = dynamic_cast( - &context.app.getRelationalDatabase()) - ->getNewestAccountTxs(options); - else - txns = dynamic_cast( - &context.app.getRelationalDatabase()) - ->getOldestAccountTxs(options); - - for (auto it = txns.begin(), end = txns.end(); it != end; ++it) - { - ++count; - Json::Value& jvObj = jvTxns.append(Json::objectValue); - - if (it->first) - jvObj[jss::tx] = - it->first->getJson(JsonOptions::include_date); - - if (it->second) - { - std::uint32_t uLedgerIndex = it->second->getLgrSeq(); - - auto meta = it->second->getJson(JsonOptions::none); - insertDeliveredAmount( - meta, context, it->first, *it->second); - jvObj[jss::meta] = std::move(meta); - - jvObj[jss::validated] = bValidated && - uValidatedMin <= uLedgerIndex && - uValidatedMax >= uLedgerIndex; - } - } - } - - // Add information about the original query - ret[jss::ledger_index_min] = uLedgerMin; - ret[jss::ledger_index_max] = uLedgerMax; - ret[jss::validated] = bValidated && uValidatedMin <= uLedgerMin && - uValidatedMax >= uLedgerMax; - ret[jss::offset] = offset; - - // We no longer return the full count but only the count of returned - // transactions. Computing this count was two expensive and this API is - // deprecated anyway. - if (bCount) - ret[jss::count] = count; - - if (context.params.isMember(jss::limit)) - ret[jss::limit] = limit; - - return ret; -#ifndef DEBUG - } - catch (std::exception const&) - { - return rpcError(rpcINTERNAL); - } - -#endif -} - -} // namespace ripple From b88ed5a8ec2a0735031ca23dc6569d54787dc2f2 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Fri, 15 Jul 2022 13:23:03 -0700 Subject: [PATCH 037/508] Update command-line usage help message (fixes #3318) --- src/ripple/app/main/Main.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index 1d8169217af..70127afe026 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -161,6 +161,7 @@ printHelp(const po::options_description& desc) " ledger_request \n" " log_level [[] ]\n" " logrotate\n" + " manifest \n" " node_to_shard [status|start|stop]\n" " peers\n" " ping\n" @@ -180,6 +181,7 @@ printHelp(const po::options_description& desc) " submit_multisigned \n" " tx \n" " validation_create [||]\n" + " validator_info\n" " validators\n" " validator_list_sites\n" " version\n" From 9aaa0dff5fd422e5f6880df8e20a1fd5ad3b4424 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Fri, 15 Jul 2022 16:26:48 -0700 Subject: [PATCH 038/508] Build the command map at compile time (fixes #3298): We profiled different algorithms and data structures to understand which strategy is best from a performance standpoint: - Linear search on an array; - Binary search on a sorted array; - Using `std::map`; and - Using `std::unordered_map`. Both linear search and std::unordered_map outperformed the other alternatives so no change to the existing data structure is justified. If more handers are added, this should be revisited. For some additional details and timings, please see: https://github.com/XRPLF/rippled/issues/3298#issuecomment-1185946010 --- src/ripple/net/impl/RPCCall.cpp | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/ripple/net/impl/RPCCall.cpp b/src/ripple/net/impl/RPCCall.cpp index 334ca8693e3..eb4906f3af7 100644 --- a/src/ripple/net/impl/RPCCall.cpp +++ b/src/ripple/net/impl/RPCCall.cpp @@ -1227,10 +1227,7 @@ class RPCParser int maxParams; }; - // FIXME: replace this with a function-static std::map and the lookup - // code with std::map::find when the problem with magic statics on - // Visual Studio is fixed. - static Command const commands[] = { + static constexpr Command commands[] = { // Request-response methods // - Returns an error, or the request. // - To modify the method, provide a new method in the request. From d318ab612adc86f1fd8527a50af232f377ca89ef Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Sun, 31 Jul 2022 23:10:03 -0700 Subject: [PATCH 039/508] Avoid unnecessary copying and dynamic memory allocations Co-authored-by: Chenna Keshava B S --- src/ripple/app/consensus/RCLConsensus.cpp | 11 ++----- src/ripple/app/consensus/RCLCxPeerPos.cpp | 38 ++++++----------------- src/ripple/app/consensus/RCLCxPeerPos.h | 32 ++++++------------- src/ripple/consensus/ConsensusProposal.h | 29 +++++++++++++++-- 4 files changed, 49 insertions(+), 61 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index a4a4e0989a7..31c851eb816 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -211,15 +211,10 @@ RCLConsensus::Adaptor::propose(RCLCxPeerPos::Proposal const& proposal) prop.set_nodepubkey( validatorKeys_.publicKey.data(), validatorKeys_.publicKey.size()); - auto signingHash = sha512Half( - HashPrefix::proposal, - std::uint32_t(proposal.proposeSeq()), - proposal.closeTime().time_since_epoch().count(), - proposal.prevLedger(), - proposal.position()); - auto sig = signDigest( - validatorKeys_.publicKey, validatorKeys_.secretKey, signingHash); + validatorKeys_.publicKey, + validatorKeys_.secretKey, + proposal.signingHash()); prop.set_signature(sig.data(), sig.size()); diff --git a/src/ripple/app/consensus/RCLCxPeerPos.cpp b/src/ripple/app/consensus/RCLCxPeerPos.cpp index 709e7898808..ee5a45b943f 100644 --- a/src/ripple/app/consensus/RCLCxPeerPos.cpp +++ b/src/ripple/app/consensus/RCLCxPeerPos.cpp @@ -32,29 +32,23 @@ RCLCxPeerPos::RCLCxPeerPos( Slice const& signature, uint256 const& suppression, Proposal&& proposal) - : data_{std::make_shared( - publicKey, - signature, - suppression, - std::move(proposal))} + : publicKey_(publicKey) + , suppression_(suppression) + , proposal_(std::move(proposal)) { -} + // The maximum allowed size of a signature is 72 bytes; we verify + // this elsewhere, but we want to be extra careful here: + assert(signature.size() != 0 && signature.size() <= signature_.capacity()); -uint256 -RCLCxPeerPos::signingHash() const -{ - return sha512Half( - HashPrefix::proposal, - std::uint32_t(proposal().proposeSeq()), - proposal().closeTime().time_since_epoch().count(), - proposal().prevLedger(), - proposal().position()); + if (signature.size() != 0 && signature.size() <= signature_.capacity()) + signature_.assign(signature.begin(), signature.end()); } bool RCLCxPeerPos::checkSign() const { - return verifyDigest(publicKey(), signingHash(), signature(), false); + return verifyDigest( + publicKey(), proposal_.signingHash(), signature(), false); } Json::Value @@ -88,16 +82,4 @@ proposalUniqueId( return s.getSHA512Half(); } -RCLCxPeerPos::Data::Data( - PublicKey const& publicKey, - Slice const& signature, - uint256 const& suppress, - Proposal&& proposal) - : publicKey_{publicKey} - , signature_{signature} - , suppression_{suppress} - , proposal_{std::move(proposal)} -{ -} - } // namespace ripple diff --git a/src/ripple/app/consensus/RCLCxPeerPos.h b/src/ripple/app/consensus/RCLCxPeerPos.h index 9d448aac48c..e82a85d422b 100644 --- a/src/ripple/app/consensus/RCLCxPeerPos.h +++ b/src/ripple/app/consensus/RCLCxPeerPos.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -61,10 +62,6 @@ class RCLCxPeerPos uint256 const& suppress, Proposal&& proposal); - //! Create the signing hash for the proposal - uint256 - signingHash() const; - //! Verify the signing hash of the proposal bool checkSign() const; @@ -73,27 +70,27 @@ class RCLCxPeerPos Slice signature() const { - return data_->signature_; + return {signature_.data(), signature_.size()}; } //! Public key of peer that sent the proposal PublicKey const& publicKey() const { - return data_->publicKey_; + return publicKey_; } //! Unique id used by hash router to suppress duplicates uint256 const& suppressionID() const { - return data_->suppression_; + return suppression_; } Proposal const& proposal() const { - return data_->proposal_; + return proposal_; } //! JSON representation of proposal @@ -101,21 +98,10 @@ class RCLCxPeerPos getJson() const; private: - struct Data : public CountedObject - { - PublicKey publicKey_; - Buffer signature_; - uint256 suppression_; - Proposal proposal_; - - Data( - PublicKey const& publicKey, - Slice const& signature, - uint256 const& suppress, - Proposal&& proposal); - }; - - std::shared_ptr data_; + PublicKey publicKey_; + uint256 suppression_; + Proposal proposal_; + boost::container::static_vector signature_; template void diff --git a/src/ripple/consensus/ConsensusProposal.h b/src/ripple/consensus/ConsensusProposal.h index a3eccbb016c..c5103cfe0d5 100644 --- a/src/ripple/consensus/ConsensusProposal.h +++ b/src/ripple/consensus/ConsensusProposal.h @@ -16,13 +16,16 @@ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ //============================================================================== -#ifndef RIPPLE_CONSENSUS_ConsensusProposal_H_INCLUDED -#define RIPPLE_CONSENSUS_ConsensusProposal_H_INCLUDED +#ifndef RIPPLE_CONSENSUS_CONSENSUSPROPOSAL_H_INCLUDED +#define RIPPLE_CONSENSUS_CONSENSUSPROPOSAL_H_INCLUDED +#include #include #include +#include #include #include +#include namespace ripple { /** Represents a proposed position taken during a round of consensus. @@ -169,6 +172,7 @@ class ConsensusProposal NetClock::time_point newCloseTime, NetClock::time_point now) { + signingHash_.reset(); position_ = newPosition; closeTime_ = newCloseTime; time_ = now; @@ -185,6 +189,7 @@ class ConsensusProposal void bowOut(NetClock::time_point now) { + signingHash_.reset(); time_ = now; proposeSeq_ = seqLeave; } @@ -210,6 +215,23 @@ class ConsensusProposal return ret; } + //! The digest for this proposal, used for signing purposes. + uint256 const& + signingHash() const + { + if (!signingHash_) + { + signingHash_ = sha512Half( + HashPrefix::proposal, + std::uint32_t(proposeSeq()), + closeTime().time_since_epoch().count(), + prevLedger(), + position()); + } + + return signingHash_.value(); + } + private: //! Unique identifier of prior ledger this proposal is based on LedgerID_t previousLedger_; @@ -228,6 +250,9 @@ class ConsensusProposal //! The identifier of the node taking this position NodeID_t nodeID_; + + //! The signing hash for this proposal + mutable std::optional signingHash_; }; template From 5a15229eeb13b69c8adf1f653b88a8f8b9480546 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Fri, 3 Jun 2022 18:09:37 -0700 Subject: [PATCH 040/508] Improve detection & handling of duplicate Node ID: Each node on the network is supposed to have a unique cryptographic identity. Typically, this identity is generated randomly at startup and stored for later reuse in the (poorly named) file `wallet.db`. If the file is copied, it is possible for two nodes to share the same node identity. This is generally not desirable and existing servers will detect and reject connections to other servers that have the same key. This commit achives three things: 1. It improves the detection code to pinpoint instances where two distinct servers with the same key connect with each other. In that case, servers will log an appropriate error and shut down pending intervention by the server's operator. 2. It makes it possible for server administrators to securely and easily generate new cryptographic identities for servers using the new `--newnodeid` command line arguments. When a server is started using this command, it will generate and save a random secure identity. 3. It makes it possible to configure the identity using a command line option, which makes it possible to derive it from data or parameters associated with the container or hardware where the instance is running by passing the `--nodeid` option, followed by a single argument identifying the infomation from which the node's identity is derived. For example, the following command will result in nodes with different hostnames having different node identities: `rippled --nodeid $HOSTNAME` The last option is particularly useful for automated cloud-based deployments that minimize the need for storing state and provide unique deployment identifiers. **Important note for server operators:** Depending on variables outside of the the control of this code, such as operating system version or configuration, permissions, and more, it may be possible for other users or programs to be able to access the command line arguments of other processes on the system. If you are operating in a shared environment, you should avoid using this option, preferring instead to use the `[node_seed]` option in the configuration file, and use permissions to limit exposure of the node seed. A user who gains access to the value used to derive the node's unique identity could impersonate that node. The commit also updates the minimum supported server protocol version to `XRPL/2.1`, which has been supported since version 1.5.0 and eliminates support for `XPRL/2.0`. --- src/ripple/app/consensus/RCLConsensus.cpp | 8 ++-- src/ripple/app/main/Application.cpp | 38 ++++++++++++++--- src/ripple/app/main/Application.h | 10 ++++- src/ripple/app/main/Main.cpp | 6 ++- src/ripple/app/main/NodeIdentity.cpp | 31 ++++++++++---- src/ripple/app/main/NodeIdentity.h | 11 ++++- src/ripple/app/rdb/Wallet.h | 17 ++++++-- src/ripple/app/rdb/impl/Wallet.cpp | 6 +++ src/ripple/overlay/README.md | 4 +- src/ripple/overlay/impl/Handshake.cpp | 39 +++++++++++++---- src/ripple/overlay/impl/ProtocolVersion.cpp | 1 - src/ripple/protocol/Seed.h | 8 +++- src/ripple/protocol/impl/Seed.cpp | 3 +- src/test/jtx/impl/Env.cpp | 2 +- src/test/overlay/ProtocolVersion_test.cpp | 47 +++++++++++---------- 15 files changed, 166 insertions(+), 65 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index 31c851eb816..aec747e094c 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -86,9 +86,11 @@ RCLConsensus::Adaptor::Adaptor( , inboundTransactions_{inboundTransactions} , j_(journal) , validatorKeys_(validatorKeys) - , valCookie_{rand_int( - 1, - std::numeric_limits::max())} + , valCookie_( + 1 + + rand_int( + crypto_prng(), + std::numeric_limits::max() - 1)) , nUnlVote_(validatorKeys_.nodeID, j_) { assert(valCookie_ != 0); diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index b7f18ba34db..99f3b060b9d 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -52,10 +52,12 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -165,6 +167,8 @@ class ApplicationImp : public Application, public BasicApp std::unique_ptr logs_; std::unique_ptr timeKeeper_; + std::uint64_t const instanceCookie_; + beast::Journal m_journal; std::unique_ptr perfLog_; Application::MutexType m_masterMutex; @@ -274,6 +278,11 @@ class ApplicationImp : public Application, public BasicApp , config_(std::move(config)) , logs_(std::move(logs)) , timeKeeper_(std::move(timeKeeper)) + , instanceCookie_( + 1 + + rand_int( + crypto_prng(), + std::numeric_limits::max() - 1)) , m_journal(logs_->journal("Application")) // PerfLog must be started before any other threads are launched. @@ -508,13 +517,13 @@ class ApplicationImp : public Application, public BasicApp //-------------------------------------------------------------------------- bool - setup() override; + setup(boost::program_options::variables_map const& cmdline) override; void start(bool withTimers) override; void run() override; void - signalStop() override; + signalStop(std::string msg = "") override; bool checkSigs() const override; void @@ -526,6 +535,12 @@ class ApplicationImp : public Application, public BasicApp //-------------------------------------------------------------------------- + std::uint64_t + instanceID() const override + { + return instanceCookie_; + } + Logs& logs() override { @@ -1108,7 +1123,7 @@ class ApplicationImp : public Application, public BasicApp // TODO Break this up into smaller, more digestible initialization segments. bool -ApplicationImp::setup() +ApplicationImp::setup(boost::program_options::variables_map const& cmdline) { // We want to intercept CTRL-C and the standard termination signal SIGTERM // and terminate the process. This handler will NEVER be invoked twice. @@ -1146,8 +1161,10 @@ ApplicationImp::setup() if (logs_->threshold() > kDebug) logs_->threshold(kDebug); } - JLOG(m_journal.info()) << "process starting: " - << BuildInfo::getFullVersionString(); + + JLOG(m_journal.info()) << "Process starting: " + << BuildInfo::getFullVersionString() + << ", Instance Cookie: " << instanceCookie_; if (numberOfThreads(*config_) < 2) { @@ -1265,7 +1282,7 @@ ApplicationImp::setup() if (!config().reporting()) m_orderBookDB.setup(getLedgerMaster().getCurrentLedger()); - nodeIdentity_ = getNodeIdentity(*this); + nodeIdentity_ = getNodeIdentity(*this, cmdline); if (!cluster_->load(config().section(SECTION_CLUSTER_NODES))) { @@ -1627,10 +1644,17 @@ ApplicationImp::run() } void -ApplicationImp::signalStop() +ApplicationImp::signalStop(std::string msg) { if (!isTimeToStop.exchange(true)) + { + if (msg.empty()) + JLOG(m_journal.warn()) << "Server stopping"; + else + JLOG(m_journal.warn()) << "Server stopping: " << msg; + stoppingCondition_.notify_all(); + } } bool diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index 53155ca4f8d..3b357deef3f 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -136,13 +137,14 @@ class Application : public beast::PropertyStream::Source virtual ~Application() = default; virtual bool - setup() = 0; + setup(boost::program_options::variables_map const& options) = 0; + virtual void start(bool withTimers) = 0; virtual void run() = 0; virtual void - signalStop() = 0; + signalStop(std::string msg = "") = 0; virtual bool checkSigs() const = 0; virtual void @@ -154,6 +156,10 @@ class Application : public beast::PropertyStream::Source // --- // + /** Returns a 64-bit instance identifier, generated at startup */ + virtual std::uint64_t + instanceID() const = 0; + virtual Logs& logs() = 0; virtual Config& diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index 70127afe026..f25b83fd51f 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -372,6 +372,10 @@ run(int argc, char** argv) "conf", po::value(), "Specify the configuration file.")( "debug", "Enable normally suppressed debug logging")( "help,h", "Display this message.")( + "newnodeid", "Generate a new node identity for this server.")( + "nodeid", + po::value(), + "Specify the node identity for this server.")( "quorum", po::value(), "Override the minimum validation quorum.")( @@ -756,7 +760,7 @@ run(int argc, char** argv) auto app = make_Application( std::move(config), std::move(logs), std::move(timeKeeper)); - if (!app->setup()) + if (!app->setup(vm)) return -1; // With our configuration parsed, ensure we have diff --git a/src/ripple/app/main/NodeIdentity.cpp b/src/ripple/app/main/NodeIdentity.cpp index a2051bbb674..e66b9e8400f 100644 --- a/src/ripple/app/main/NodeIdentity.cpp +++ b/src/ripple/app/main/NodeIdentity.cpp @@ -20,27 +20,38 @@ #include #include #include -#include #include #include -#include #include namespace ripple { std::pair -getNodeIdentity(Application& app) +getNodeIdentity( + Application& app, + boost::program_options::variables_map const& cmdline) { - // If a seed is specified in the configuration file use that directly. - if (app.config().exists(SECTION_NODE_SEED)) + std::optional seed; + + if (cmdline.count("nodeid")) + { + seed = parseGenericSeed(cmdline["nodeid"].as(), false); + + if (!seed) + Throw("Invalid 'nodeid' in command line"); + } + else if (app.config().exists(SECTION_NODE_SEED)) { - auto const seed = parseBase58( + seed = parseBase58( app.config().section(SECTION_NODE_SEED).lines().front()); if (!seed) - Throw("NodeIdentity: Bad [" SECTION_NODE_SEED - "] specified"); + Throw("Invalid [" SECTION_NODE_SEED + "] in configuration file"); + } + if (seed) + { auto secretKey = generateSecretKey(KeyType::secp256k1, *seed); auto publicKey = derivePublicKey(KeyType::secp256k1, secretKey); @@ -48,6 +59,10 @@ getNodeIdentity(Application& app) } auto db = app.getWalletDB().checkoutDb(); + + if (cmdline.count("newnodeid") != 0) + clearNodeIdentity(*db); + return getNodeIdentity(*db); } diff --git a/src/ripple/app/main/NodeIdentity.h b/src/ripple/app/main/NodeIdentity.h index 60deeed856e..b82b3657aeb 100644 --- a/src/ripple/app/main/NodeIdentity.h +++ b/src/ripple/app/main/NodeIdentity.h @@ -23,13 +23,20 @@ #include #include #include +#include #include namespace ripple { -/** The cryptographic credentials identifying this server instance. */ +/** The cryptographic credentials identifying this server instance. + + @param app The application object + @param cmdline The command line parameters passed into the application. + */ std::pair -getNodeIdentity(Application& app); +getNodeIdentity( + Application& app, + boost::program_options::variables_map const& cmdline); } // namespace ripple diff --git a/src/ripple/app/rdb/Wallet.h b/src/ripple/app/rdb/Wallet.h index 6bf6ca9ea83..2769e459acc 100644 --- a/src/ripple/app/rdb/Wallet.h +++ b/src/ripple/app/rdb/Wallet.h @@ -87,10 +87,19 @@ saveManifests( void addValidatorManifest(soci::session& session, std::string const& serialized); -/** - * @brief getNodeIdentity Returns the public and private keys of this node. - * @param session Session with the database. - * @return Pair of public and private keys. +/** Delete any saved public/private key associated with this node. */ +void +clearNodeIdentity(soci::session& session); + +/** Returns a stable public and private key for this node. + + The node's public identity is defined by a secp256k1 keypair + that is (normally) randomly generated. This function will + return such a keypair, securely generating one if needed. + + @param session Session with the database. + + @return Pair of public and private secp256k1 keys. */ std::pair getNodeIdentity(soci::session& session); diff --git a/src/ripple/app/rdb/impl/Wallet.cpp b/src/ripple/app/rdb/impl/Wallet.cpp index 24715404ce5..25a06bbd97a 100644 --- a/src/ripple/app/rdb/impl/Wallet.cpp +++ b/src/ripple/app/rdb/impl/Wallet.cpp @@ -119,6 +119,12 @@ addValidatorManifest(soci::session& session, std::string const& serialized) tr.commit(); } +void +clearNodeIdentity(soci::session& session) +{ + session << "DELETE FROM NodeIdentity;"; +} + std::pair getNodeIdentity(soci::session& session) { diff --git a/src/ripple/overlay/README.md b/src/ripple/overlay/README.md index 289c5f707b0..8be890ef75f 100644 --- a/src/ripple/overlay/README.md +++ b/src/ripple/overlay/README.md @@ -296,8 +296,8 @@ For more on the Peer Crawler, please visit https://xrpl.org/peer-crawler.html. If present, identifies the hash of the last ledger that the sending server considers to be closed. -The value is presently encoded using **Base64** encoding, but implementations -should support both **Base64** and **HEX** encoding for this value. +The value is encoded as **HEX**, but implementations should support both +**Base64** and **HEX** encoding for this value for legacy purposes. | Field Name | Request | Response | |--------------------- |:-----------------: |:-----------------: | diff --git a/src/ripple/overlay/impl/Handshake.cpp b/src/ripple/overlay/impl/Handshake.cpp index 2ea208f5574..793dec19eef 100644 --- a/src/ripple/overlay/impl/Handshake.cpp +++ b/src/ripple/overlay/impl/Handshake.cpp @@ -204,6 +204,8 @@ buildHandshake( h.insert("Session-Signature", base64_encode(sig.data(), sig.size())); } + h.insert("Instance-Cookie", std::to_string(app.instanceID())); + if (!app.config().SERVER_DOMAIN.empty()) h.insert("Server-Domain", app.config().SERVER_DOMAIN); @@ -215,14 +217,8 @@ buildHandshake( if (auto const cl = app.getLedgerMaster().getClosedLedger()) { - // TODO: Use hex for these - h.insert( - "Closed-Ledger", - base64_encode(cl->info().hash.begin(), cl->info().hash.size())); - h.insert( - "Previous-Ledger", - base64_encode( - cl->info().parentHash.begin(), cl->info().parentHash.size())); + h.insert("Closed-Ledger", strHex(cl->info().hash)); + h.insert("Previous-Ledger", strHex(cl->info().parentHash)); } } @@ -306,7 +302,34 @@ verifyHandshake( }(); if (publicKey == app.nodeIdentity().first) + { + auto const peerInstanceID = [&headers]() { + std::uint64_t iid = 0; + + if (auto const iter = headers.find("Instance-Cookie"); + iter != headers.end()) + { + if (!beast::lexicalCastChecked(iid, iter->value().to_string())) + throw std::runtime_error("Invalid instance cookie"); + + if (iid == 0) + throw std::runtime_error("Invalid instance cookie"); + } + + return iid; + }(); + + // Attempt to differentiate self-connections as opposed to accidental + // node identity reuse caused by accidental misconfiguration. When we + // detect this, we stop the process and log an error message. + if (peerInstanceID != app.instanceID()) + { + app.signalStop("Remote server is using our node identity"); + throw std::runtime_error("Node identity reuse detected"); + } + throw std::runtime_error("Self connection"); + } // This check gets two birds with one stone: // diff --git a/src/ripple/overlay/impl/ProtocolVersion.cpp b/src/ripple/overlay/impl/ProtocolVersion.cpp index 4931dacb431..9a549b56309 100644 --- a/src/ripple/overlay/impl/ProtocolVersion.cpp +++ b/src/ripple/overlay/impl/ProtocolVersion.cpp @@ -36,7 +36,6 @@ namespace ripple { // clang-format off constexpr ProtocolVersion const supportedProtocolList[] { - {2, 0}, {2, 1}, {2, 2} }; diff --git a/src/ripple/protocol/Seed.h b/src/ripple/protocol/Seed.h index c1768d20553..2ebc64970f0 100644 --- a/src/ripple/protocol/Seed.h +++ b/src/ripple/protocol/Seed.h @@ -116,9 +116,13 @@ template <> std::optional parseBase58(std::string const& s); -/** Attempt to parse a string as a seed */ +/** Attempt to parse a string as a seed. + + @param str the string to parse + @param rfc1751 true if we should attempt RFC1751 style parsing (deprecated) + * */ std::optional -parseGenericSeed(std::string const& str); +parseGenericSeed(std::string const& str, bool rfc1751 = true); /** Encode a Seed in RFC1751 format */ std::string diff --git a/src/ripple/protocol/impl/Seed.cpp b/src/ripple/protocol/impl/Seed.cpp index f4c6ee52b2e..49da20a429d 100644 --- a/src/ripple/protocol/impl/Seed.cpp +++ b/src/ripple/protocol/impl/Seed.cpp @@ -87,7 +87,7 @@ parseBase58(std::string const& s) } std::optional -parseGenericSeed(std::string const& str) +parseGenericSeed(std::string const& str, bool rfc1751) { if (str.empty()) return std::nullopt; @@ -111,6 +111,7 @@ parseGenericSeed(std::string const& str) if (auto seed = parseBase58(str)) return seed; + if (rfc1751) { std::string key; if (RFC1751::getKeyFromEnglish(key, str) == 1) diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 3445fd1c9ae..900b9812dae 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -83,7 +83,7 @@ Env::AppBundle::AppBundle( std::move(config), std::move(logs), std::move(timeKeeper_)); app = owned.get(); app->logs().threshold(thresh); - if (!app->setup()) + if (!app->setup({})) Throw("Env::AppBundle: setup failed"); timeKeeper->set(app->getLedgerMaster().getClosedLedger()->info().closeTime); app->start(false /*don't start timers*/); diff --git a/src/test/overlay/ProtocolVersion_test.cpp b/src/test/overlay/ProtocolVersion_test.cpp index 81845345d8e..a5a26fe74ec 100644 --- a/src/test/overlay/ProtocolVersion_test.cpp +++ b/src/test/overlay/ProtocolVersion_test.cpp @@ -25,22 +25,21 @@ namespace ripple { class ProtocolVersion_test : public beast::unit_test::suite { private: - template - static std::string - join(FwdIt first, FwdIt last, char const* sep = ",") - { - std::string result; - if (first == last) - return result; - result = to_string(*first++); - while (first != last) - result += sep + to_string(*first++); - return result; - } - void check(std::string const& s, std::string const& answer) { + auto join = [](auto first, auto last) { + std::string result; + if (first != last) + { + result = to_string(*first++); + + while (first != last) + result += "," + to_string(*first++); + } + return result; + }; + auto const result = parseProtocolVersions(s); BEAST_EXPECT(join(result.begin(), result.end()) == answer); } @@ -60,20 +59,21 @@ class ProtocolVersion_test : public beast::unit_test::suite // Empty string check("", ""); + + // clang-format off check( - "RTXP/1.1,RTXP/1.2,RTXP/1.3,XRPL/2.1,XRPL/2.0", + "RTXP/1.1,RTXP/1.2,RTXP/1.3,XRPL/2.1,XRPL/2.0,/XRPL/3.0", "XRPL/2.0,XRPL/2.1"); check( - "RTXP/0.9,RTXP/1.01,XRPL/0.3,XRPL/2.01,XRPL/19.04,Oscar/" - "123,NIKB", + "RTXP/0.9,RTXP/1.01,XRPL/0.3,XRPL/2.01,websocket", ""); check( - "XRPL/2.0,RTXP/1.2,XRPL/2.0,XRPL/19.4,XRPL/7.89,XRPL/" - "A.1,XRPL/2.01", + "XRPL/2.0,XRPL/2.0,XRPL/19.4,XRPL/7.89,XRPL/XRPL/3.0,XRPL/2.01", "XRPL/2.0,XRPL/7.89,XRPL/19.4"); check( "XRPL/2.0,XRPL/3.0,XRPL/4,XRPL/,XRPL,OPT XRPL/2.2,XRPL/5.67", "XRPL/2.0,XRPL/3.0,XRPL/5.67"); + // clang-format on } { @@ -81,13 +81,14 @@ class ProtocolVersion_test : public beast::unit_test::suite BEAST_EXPECT(negotiateProtocolVersion("RTXP/1.2") == std::nullopt); BEAST_EXPECT( - negotiateProtocolVersion("RTXP/1.2, XRPL/2.0") == - make_protocol(2, 0)); + negotiateProtocolVersion("RTXP/1.2, XRPL/2.0, XRPL/2.1") == + make_protocol(2, 1)); BEAST_EXPECT( - negotiateProtocolVersion("XRPL/2.0") == make_protocol(2, 0)); + negotiateProtocolVersion("XRPL/2.2") == make_protocol(2, 2)); BEAST_EXPECT( - negotiateProtocolVersion("RTXP/1.2, XRPL/2.0, XRPL/999.999") == - make_protocol(2, 0)); + negotiateProtocolVersion( + "RTXP/1.2, XRPL/2.2, XRPL/2.3, XRPL/999.999") == + make_protocol(2, 2)); BEAST_EXPECT( negotiateProtocolVersion("XRPL/999.999, WebSocket/1.0") == std::nullopt); From e2eed966b0ecb6445027e6a023b48d702c5f4832 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Wed, 25 May 2022 11:22:47 -0700 Subject: [PATCH 041/508] Improve AccountID string conversion caching: Caching the base58check encoded version of an `AccountID` has performance advantages, because because of the computationally heavy cost associated with the conversion, which requires the application of SHA-256 twice. This commit makes the cache significantly more efficient in terms of memory used: it eliminates the map, using a vector with a size that is determined by the configured size of the node, and a hash function to directly map any given `AccountID` to a specific slot in the cache; the eviction policy is simple: in case of collision the existing entry is removed and replaced with the new data. Previously, use of the cache was optional and required additional effort by the programmer. Now the cache is automatic and does not require any additional work or information. The new cache also utilizes a 64-way spinlock, to help reduce any contention that the pressure on the cache would impose. --- src/ripple/app/ledger/AcceptedLedger.cpp | 4 +- src/ripple/app/ledger/AcceptedLedgerTx.cpp | 5 +- src/ripple/app/ledger/AcceptedLedgerTx.h | 3 +- src/ripple/app/main/Application.cpp | 11 +- src/ripple/app/main/Application.h | 3 - src/ripple/app/paths/PathRequest.cpp | 19 ++- src/ripple/app/rdb/backend/detail/Node.h | 4 - .../app/rdb/backend/detail/impl/Node.cpp | 21 +-- .../app/rdb/backend/impl/SQLiteDatabase.cpp | 60 +++------ src/ripple/core/Config.h | 3 +- src/ripple/core/impl/Config.cpp | 29 ++-- src/ripple/protocol/AccountID.h | 43 ++---- src/ripple/protocol/impl/AccountID.cpp | 126 +++++++++++------- src/ripple/rpc/handlers/AccountChannels.cpp | 2 +- src/ripple/rpc/handlers/AccountInfo.cpp | 2 +- src/ripple/rpc/handlers/AccountLines.cpp | 2 +- src/ripple/rpc/handlers/AccountObjects.cpp | 4 +- src/ripple/rpc/handlers/AccountOffers.cpp | 2 +- src/ripple/rpc/handlers/GatewayBalances.cpp | 2 +- src/ripple/rpc/handlers/NFTOffers.cpp | 6 +- src/ripple/rpc/handlers/NoRippleCheck.cpp | 2 +- 21 files changed, 157 insertions(+), 196 deletions(-) diff --git a/src/ripple/app/ledger/AcceptedLedger.cpp b/src/ripple/app/ledger/AcceptedLedger.cpp index 526704d1889..4f308653dcf 100644 --- a/src/ripple/app/ledger/AcceptedLedger.cpp +++ b/src/ripple/app/ledger/AcceptedLedger.cpp @@ -31,11 +31,9 @@ AcceptedLedger::AcceptedLedger( transactions_.reserve(256); auto insertAll = [&](auto const& txns) { - auto const& idcache = app.accountIDCache(); - for (auto const& item : txns) transactions_.emplace_back(std::make_unique( - ledger, item.first, item.second, idcache)); + ledger, item.first, item.second)); }; if (app.config().reporting()) diff --git a/src/ripple/app/ledger/AcceptedLedgerTx.cpp b/src/ripple/app/ledger/AcceptedLedgerTx.cpp index f0408b0c049..613a91e437a 100644 --- a/src/ripple/app/ledger/AcceptedLedgerTx.cpp +++ b/src/ripple/app/ledger/AcceptedLedgerTx.cpp @@ -28,8 +28,7 @@ namespace ripple { AcceptedLedgerTx::AcceptedLedgerTx( std::shared_ptr const& ledger, std::shared_ptr const& txn, - std::shared_ptr const& met, - AccountIDCache const& accountCache) + std::shared_ptr const& met) : mTxn(txn) , mMeta(txn->getTransactionID(), ledger->seq(), *met) , mAffected(mMeta.getAffectedAccounts()) @@ -52,7 +51,7 @@ AcceptedLedgerTx::AcceptedLedgerTx( { Json::Value& affected = (mJson[jss::affected] = Json::arrayValue); for (auto const& account : mAffected) - affected.append(accountCache.toBase58(account)); + affected.append(toBase58(account)); } if (mTxn->getTxnType() == ttOFFER_CREATE) diff --git a/src/ripple/app/ledger/AcceptedLedgerTx.h b/src/ripple/app/ledger/AcceptedLedgerTx.h index 7d68978571b..2995d447bba 100644 --- a/src/ripple/app/ledger/AcceptedLedgerTx.h +++ b/src/ripple/app/ledger/AcceptedLedgerTx.h @@ -46,8 +46,7 @@ class AcceptedLedgerTx : public CountedObject AcceptedLedgerTx( std::shared_ptr const& ledger, std::shared_ptr const&, - std::shared_ptr const&, - AccountIDCache const&); + std::shared_ptr const&); std::shared_ptr const& getTxn() const diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 99f3b060b9d..dce11bc38f0 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -181,7 +181,6 @@ class ApplicationImp : public Application, public BasicApp NodeStoreScheduler m_nodeStoreScheduler; std::unique_ptr m_shaMapStore; PendingSaves pendingSaves_; - AccountIDCache accountIDCache_; std::optional openLedger_; NodeCache m_tempNodeCache; @@ -336,8 +335,6 @@ class ApplicationImp : public Application, public BasicApp m_nodeStoreScheduler, logs_->journal("SHAMapStore"))) - , accountIDCache_(128000) - , m_tempNodeCache( "NodeCache", 16384, @@ -494,6 +491,8 @@ class ApplicationImp : public Application, public BasicApp config_->reporting() ? std::make_unique(*this) : nullptr) { + initAccountIdCache(config_->getValueFor(SizedItem::accountIdCacheSize)); + add(m_resourceManager.get()); // @@ -856,12 +855,6 @@ class ApplicationImp : public Application, public BasicApp return pendingSaves_; } - AccountIDCache const& - accountIDCache() const override - { - return accountIDCache_; - } - OpenLedger& openLedger() override { diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index 3b357deef3f..d8cb7d31815 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -90,7 +90,6 @@ class PathRequests; class PendingSaves; class PublicKey; class SecretKey; -class AccountIDCache; class STLedgerEntry; class TimeKeeper; class TransactionMaster; @@ -251,8 +250,6 @@ class Application : public beast::PropertyStream::Source getSHAMapStore() = 0; virtual PendingSaves& pendingSaves() = 0; - virtual AccountIDCache const& - accountIDCache() const = 0; virtual OpenLedger& openLedger() = 0; virtual OpenLedger const& diff --git a/src/ripple/app/paths/PathRequest.cpp b/src/ripple/app/paths/PathRequest.cpp index d1acb3ac1fd..02b46c81e91 100644 --- a/src/ripple/app/paths/PathRequest.cpp +++ b/src/ripple/app/paths/PathRequest.cpp @@ -552,9 +552,16 @@ PathRequest::findPaths( continueCallback); mContext[issue] = ps; - auto& sourceAccount = !isXRP(issue.account) - ? issue.account - : isXRP(issue.currency) ? xrpAccount() : *raSrcAccount; + auto const& sourceAccount = [&] { + if (!isXRP(issue.account)) + return issue.account; + + if (isXRP(issue.currency)) + return xrpAccount(); + + return *raSrcAccount; + }(); + STAmount saMaxAmount = saSendMax.value_or( STAmount({issue.currency, sourceAccount}, 1u, 0, true)); @@ -675,10 +682,8 @@ PathRequest::doUpdate( destCurrencies.append(to_string(c)); } - newStatus[jss::source_account] = - app_.accountIDCache().toBase58(*raSrcAccount); - newStatus[jss::destination_account] = - app_.accountIDCache().toBase58(*raDstAccount); + newStatus[jss::source_account] = toBase58(*raSrcAccount); + newStatus[jss::destination_account] = toBase58(*raDstAccount); newStatus[jss::destination_amount] = saDstAmount.getJson(JsonOptions::none); newStatus[jss::full_reply] = !fast; diff --git a/src/ripple/app/rdb/backend/detail/Node.h b/src/ripple/app/rdb/backend/detail/Node.h index fa7e39c8329..0ebae76dc28 100644 --- a/src/ripple/app/rdb/backend/detail/Node.h +++ b/src/ripple/app/rdb/backend/detail/Node.h @@ -389,7 +389,6 @@ getNewestAccountTxsB( * account which match given criteria starting from given marker * and calls callback for each found transaction. * @param session Session with database. - * @param idCache Account ID cache. * @param onUnsavedLedger Callback function to call on each found unsaved * ledger within given range. * @param onTransaction Callback function to call on each found transaction. @@ -408,7 +407,6 @@ getNewestAccountTxsB( std::pair, int> oldestAccountTxPage( soci::session& session, - AccountIDCache const& idCache, std::function const& onUnsavedLedger, std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& @@ -422,7 +420,6 @@ oldestAccountTxPage( * account which match given criteria starting from given marker * and calls callback for each found transaction. * @param session Session with database. - * @param idCache Account ID cache. * @param onUnsavedLedger Callback function to call on each found unsaved * ledger within given range. * @param onTransaction Callback function to call on each found transaction. @@ -441,7 +438,6 @@ oldestAccountTxPage( std::pair, int> newestAccountTxPage( soci::session& session, - AccountIDCache const& idCache, std::function const& onUnsavedLedger, std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& diff --git a/src/ripple/app/rdb/backend/detail/impl/Node.cpp b/src/ripple/app/rdb/backend/detail/impl/Node.cpp index 0c9f3b0171f..b3b354ebe72 100644 --- a/src/ripple/app/rdb/backend/detail/impl/Node.cpp +++ b/src/ripple/app/rdb/backend/detail/impl/Node.cpp @@ -307,7 +307,7 @@ saveValidatedLedger( sql += txnId; sql += "','"; - sql += app.accountIDCache().toBase58(account); + sql += toBase58(account); sql += "',"; sql += ledgerSeq; sql += ","; @@ -760,8 +760,7 @@ transactionsSQL( sql = boost::str( boost::format("SELECT %s FROM AccountTransactions " "WHERE Account = '%s' %s %s LIMIT %u, %u;") % - selection % app.accountIDCache().toBase58(options.account) % - maxClause % minClause % + selection % toBase58(options.account) % maxClause % minClause % beast::lexicalCastThrow(options.offset) % beast::lexicalCastThrow(numberOfResults)); else @@ -774,9 +773,9 @@ transactionsSQL( "ORDER BY AccountTransactions.LedgerSeq %s, " "AccountTransactions.TxnSeq %s, AccountTransactions.TransID %s " "LIMIT %u, %u;") % - selection % app.accountIDCache().toBase58(options.account) % - maxClause % minClause % (descending ? "DESC" : "ASC") % + selection % toBase58(options.account) % maxClause % minClause % (descending ? "DESC" : "ASC") % (descending ? "DESC" : "ASC") % + (descending ? "DESC" : "ASC") % beast::lexicalCastThrow(options.offset) % beast::lexicalCastThrow(numberOfResults)); JLOG(j.trace()) << "txSQL query: " << sql; @@ -1049,7 +1048,6 @@ getNewestAccountTxsB( * account that matches the given criteria starting from the provided * marker and invokes the callback parameter for each found transaction. * @param session Session with the database. - * @param idCache Account ID cache. * @param onUnsavedLedger Callback function to call on each found unsaved * ledger within the given range. * @param onTransaction Callback function to call on each found transaction. @@ -1069,7 +1067,6 @@ getNewestAccountTxsB( static std::pair, int> accountTxPage( soci::session& session, - AccountIDCache const& idCache, std::function const& onUnsavedLedger, std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& @@ -1135,8 +1132,8 @@ accountTxPage( ORDER BY AccountTransactions.LedgerSeq %s, AccountTransactions.TxnSeq %s LIMIT %u;)")) % - idCache.toBase58(options.account) % options.minLedger % - options.maxLedger % order % order % queryLimit); + toBase58(options.account) % options.minLedger % options.maxLedger % + order % order % queryLimit); } else { @@ -1146,7 +1143,7 @@ accountTxPage( const std::uint32_t maxLedger = forward ? options.maxLedger : findLedger - 1; - auto b58acct = idCache.toBase58(options.account); + auto b58acct = toBase58(options.account); sql = boost::str( boost::format(( R"(SELECT AccountTransactions.LedgerSeq,AccountTransactions.TxnSeq, @@ -1250,7 +1247,6 @@ accountTxPage( std::pair, int> oldestAccountTxPage( soci::session& session, - AccountIDCache const& idCache, std::function const& onUnsavedLedger, std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& @@ -1261,7 +1257,6 @@ oldestAccountTxPage( { return accountTxPage( session, - idCache, onUnsavedLedger, onTransaction, options, @@ -1273,7 +1268,6 @@ oldestAccountTxPage( std::pair, int> newestAccountTxPage( soci::session& session, - AccountIDCache const& idCache, std::function const& onUnsavedLedger, std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& @@ -1284,7 +1278,6 @@ newestAccountTxPage( { return accountTxPage( session, - idCache, onUnsavedLedger, onTransaction, options, diff --git a/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp index e6ec44399a0..547ab843b36 100644 --- a/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp +++ b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp @@ -1322,7 +1322,6 @@ SQLiteDatabaseImp::oldestAccountTxPage(AccountTxPageOptions const& options) return {}; static std::uint32_t const page_length(200); - auto& idCache = app_.accountIDCache(); auto onUnsavedLedger = std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); AccountTxs ret; @@ -1338,15 +1337,10 @@ SQLiteDatabaseImp::oldestAccountTxPage(AccountTxPageOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = detail::oldestAccountTxPage( - *db, - idCache, - onUnsavedLedger, - onTransaction, - options, - 0, - page_length) - .first; + auto newmarker = + detail::oldestAccountTxPage( + *db, onUnsavedLedger, onTransaction, options, 0, page_length) + .first; return {ret, newmarker}; } @@ -1363,7 +1357,6 @@ SQLiteDatabaseImp::oldestAccountTxPage(AccountTxPageOptions const& options) return false; auto [marker, total] = detail::oldestAccountTxPage( session, - idCache, onUnsavedLedger, onTransaction, opt, @@ -1391,7 +1384,6 @@ SQLiteDatabaseImp::newestAccountTxPage(AccountTxPageOptions const& options) return {}; static std::uint32_t const page_length(200); - auto& idCache = app_.accountIDCache(); auto onUnsavedLedger = std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); AccountTxs ret; @@ -1407,15 +1399,10 @@ SQLiteDatabaseImp::newestAccountTxPage(AccountTxPageOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = detail::newestAccountTxPage( - *db, - idCache, - onUnsavedLedger, - onTransaction, - options, - 0, - page_length) - .first; + auto newmarker = + detail::newestAccountTxPage( + *db, onUnsavedLedger, onTransaction, options, 0, page_length) + .first; return {ret, newmarker}; } @@ -1432,7 +1419,6 @@ SQLiteDatabaseImp::newestAccountTxPage(AccountTxPageOptions const& options) return false; auto [marker, total] = detail::newestAccountTxPage( session, - idCache, onUnsavedLedger, onTransaction, opt, @@ -1460,7 +1446,6 @@ SQLiteDatabaseImp::oldestAccountTxPageB(AccountTxPageOptions const& options) return {}; static std::uint32_t const page_length(500); - auto& idCache = app_.accountIDCache(); auto onUnsavedLedger = std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); MetaTxsList ret; @@ -1475,15 +1460,10 @@ SQLiteDatabaseImp::oldestAccountTxPageB(AccountTxPageOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = detail::oldestAccountTxPage( - *db, - idCache, - onUnsavedLedger, - onTransaction, - options, - 0, - page_length) - .first; + auto newmarker = + detail::oldestAccountTxPage( + *db, onUnsavedLedger, onTransaction, options, 0, page_length) + .first; return {ret, newmarker}; } @@ -1500,7 +1480,6 @@ SQLiteDatabaseImp::oldestAccountTxPageB(AccountTxPageOptions const& options) return false; auto [marker, total] = detail::oldestAccountTxPage( session, - idCache, onUnsavedLedger, onTransaction, opt, @@ -1528,7 +1507,6 @@ SQLiteDatabaseImp::newestAccountTxPageB(AccountTxPageOptions const& options) return {}; static std::uint32_t const page_length(500); - auto& idCache = app_.accountIDCache(); auto onUnsavedLedger = std::bind(saveLedgerAsync, std::ref(app_), std::placeholders::_1); MetaTxsList ret; @@ -1543,15 +1521,10 @@ SQLiteDatabaseImp::newestAccountTxPageB(AccountTxPageOptions const& options) if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = detail::newestAccountTxPage( - *db, - idCache, - onUnsavedLedger, - onTransaction, - options, - 0, - page_length) - .first; + auto newmarker = + detail::newestAccountTxPage( + *db, onUnsavedLedger, onTransaction, options, 0, page_length) + .first; return {ret, newmarker}; } @@ -1568,7 +1541,6 @@ SQLiteDatabaseImp::newestAccountTxPageB(AccountTxPageOptions const& options) return false; auto [marker, total] = detail::newestAccountTxPage( session, - idCache, onUnsavedLedger, onTransaction, opt, diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index c4a5076e76f..2d440a1afd9 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -57,7 +57,8 @@ enum class SizedItem : std::size_t { lgrDBCache, openFinalLimit, burstSize, - ramSizeGB + ramSizeGB, + accountIdCacheSize, }; // This entire derived class is deprecated. diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index e53d9688392..f8d8878a771 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -108,26 +108,27 @@ namespace ripple { // clang-format off // The configurable node sizes are "tiny", "small", "medium", "large", "huge" -inline constexpr std::array>, 12> +inline constexpr std::array>, 13> sizedItems {{ // FIXME: We should document each of these items, explaining exactly // what they control and whether there exists an explicit // config option that can be used to override the default. - // tiny small medium large huge - {SizedItem::sweepInterval, {{ 10, 30, 60, 90, 120 }}}, - {SizedItem::treeCacheSize, {{ 262144, 524288, 2097152, 4194304, 8388608 }}}, - {SizedItem::treeCacheAge, {{ 30, 60, 90, 120, 900 }}}, - {SizedItem::ledgerSize, {{ 32, 32, 64, 256, 384 }}}, - {SizedItem::ledgerAge, {{ 30, 60, 180, 300, 600 }}}, - {SizedItem::ledgerFetch, {{ 2, 3, 4, 5, 8 }}}, - {SizedItem::hashNodeDBCache, {{ 4, 12, 24, 64, 128 }}}, - {SizedItem::txnDBCache, {{ 4, 12, 24, 64, 128 }}}, - {SizedItem::lgrDBCache, {{ 4, 8, 16, 32, 128 }}}, - {SizedItem::openFinalLimit, {{ 8, 16, 32, 64, 128 }}}, - {SizedItem::burstSize, {{ 4, 8, 16, 32, 48 }}}, - {SizedItem::ramSizeGB, {{ 8, 12, 16, 24, 32 }}}, + // tiny small medium large huge + {SizedItem::sweepInterval, {{ 10, 30, 60, 90, 120 }}}, + {SizedItem::treeCacheSize, {{ 262144, 524288, 2097152, 4194304, 8388608 }}}, + {SizedItem::treeCacheAge, {{ 30, 60, 90, 120, 900 }}}, + {SizedItem::ledgerSize, {{ 32, 32, 64, 256, 384 }}}, + {SizedItem::ledgerAge, {{ 30, 60, 180, 300, 600 }}}, + {SizedItem::ledgerFetch, {{ 2, 3, 4, 5, 8 }}}, + {SizedItem::hashNodeDBCache, {{ 4, 12, 24, 64, 128 }}}, + {SizedItem::txnDBCache, {{ 4, 12, 24, 64, 128 }}}, + {SizedItem::lgrDBCache, {{ 4, 8, 16, 32, 128 }}}, + {SizedItem::openFinalLimit, {{ 8, 16, 32, 64, 128 }}}, + {SizedItem::burstSize, {{ 4, 8, 16, 32, 48 }}}, + {SizedItem::ramSizeGB, {{ 8, 12, 16, 24, 32 }}}, + {SizedItem::accountIdCacheSize, {{ 20047, 50053, 77081, 150061, 300007 }}} }}; // Ensure that the order of entries in the table corresponds to the diff --git a/src/ripple/protocol/AccountID.h b/src/ripple/protocol/AccountID.h index e514cfb741f..79768eefd7d 100644 --- a/src/ripple/protocol/AccountID.h +++ b/src/ripple/protocol/AccountID.h @@ -106,41 +106,20 @@ operator<<(std::ostream& os, AccountID const& x) return os; } -//------------------------------------------------------------------------------ +/** Initialize the global cache used to map AccountID to base58 conversions. -/** Caches the base58 representations of AccountIDs + The cache is optional and need not be initialized. But because conversion + is expensive (it requires a SHA-256 operation) in most cases the overhead + of the cache is worth the benefit. - This operation occurs with sufficient frequency to - justify having a cache. In the future, rippled should - require clients to receive "binary" results, where - AccountIDs are hex-encoded. -*/ -class AccountIDCache -{ -private: - std::mutex mutable mutex_; - std::size_t capacity_; - hash_map mutable m0_; - hash_map mutable m1_; - -public: - AccountIDCache(AccountIDCache const&) = delete; - AccountIDCache& - operator=(AccountIDCache const&) = delete; + @param count The number of entries the cache should accomodate. Zero will + disable the cache, releasing any memory associated with it. - explicit AccountIDCache(std::size_t capacity); - - /** Return ripple::toBase58 for the AccountID - - Thread Safety: - Safe to call from any thread concurrently - - @note This function intentionally returns a - copy for correctness. - */ - std::string - toBase58(AccountID const&) const; -}; + @note The function will only initialize the cache the first time it is + invoked. Subsequent invocations do nothing. + */ +void +initAccountIdCache(std::size_t count); } // namespace ripple diff --git a/src/ripple/protocol/impl/AccountID.cpp b/src/ripple/protocol/impl/AccountID.cpp index 8ca8d1d153c..c615807cf84 100644 --- a/src/ripple/protocol/impl/AccountID.cpp +++ b/src/ripple/protocol/impl/AccountID.cpp @@ -17,17 +17,95 @@ */ //============================================================================== +#include +#include #include #include #include #include +#include #include +#include namespace ripple { +namespace detail { + +/** Caches the base58 representations of AccountIDs */ +class AccountIdCache +{ +private: + struct CachedAccountID + { + AccountID id; + char encoding[40] = {0}; + }; + + // The actual cache + std::vector cache_; + + // We use a hash function designed to resist algorithmic complexity attacks + hardened_hash<> hasher_; + + // 64 spinlocks, packed into a single 64-bit value + std::atomic locks_ = 0; + +public: + AccountIdCache(std::size_t count) : cache_(count) + { + // This is non-binding, but we try to avoid wasting memory that + // is caused by overallocation. + cache_.shrink_to_fit(); + } + + std::string + toBase58(AccountID const& id) + { + auto const index = hasher_(id) % cache_.size(); + + packed_spinlock sl(locks_, index % 64); + + { + std::lock_guard lock(sl); + + // The check against the first character of the encoding ensures + // that we don't mishandle the case of the all-zero account: + if (cache_[index].encoding[0] != 0 && cache_[index].id == id) + return cache_[index].encoding; + } + + auto ret = + encodeBase58Token(TokenType::AccountID, id.data(), id.size()); + + assert(ret.size() <= 38); + + { + std::lock_guard lock(sl); + cache_[index].id = id; + std::strcpy(cache_[index].encoding, ret.c_str()); + } + + return ret; + } +}; + +} // namespace detail + +static std::unique_ptr accountIdCache; + +void +initAccountIdCache(std::size_t count) +{ + if (!accountIdCache && count != 0) + accountIdCache = std::make_unique(count); +} + std::string toBase58(AccountID const& v) { + if (accountIdCache) + return accountIdCache->toBase58(v); + return encodeBase58Token(TokenType::AccountID, v.data(), v.size()); } @@ -112,52 +190,4 @@ to_issuer(AccountID& issuer, std::string const& s) return true; } -//------------------------------------------------------------------------------ - -/* VFALCO NOTE - An alternate implementation could use a pair of insert-only - hash maps that each use a single large memory allocation - to store a fixed size hash table and all of the AccountID/string - pairs laid out in memory (wouldn't use std::string here just a - length prefixed or zero terminated array). Possibly using - boost::intrusive as the basis for the unordered container. - This would cut down to one allocate/free cycle per swap of - the map. -*/ - -AccountIDCache::AccountIDCache(std::size_t capacity) : capacity_(capacity) -{ - m1_.reserve(capacity_); -} - -std::string -AccountIDCache::toBase58(AccountID const& id) const -{ - std::lock_guard lock(mutex_); - auto iter = m1_.find(id); - if (iter != m1_.end()) - return iter->second; - iter = m0_.find(id); - std::string result; - if (iter != m0_.end()) - { - result = iter->second; - // Can use insert-only hash maps if - // we didn't erase from here. - m0_.erase(iter); - } - else - { - result = ripple::toBase58(id); - } - if (m1_.size() >= capacity_) - { - m0_ = std::move(m1_); - m1_.clear(); - m1_.reserve(capacity_); - } - m1_.emplace(id, result); - return result; -} - } // namespace ripple diff --git a/src/ripple/rpc/handlers/AccountChannels.cpp b/src/ripple/rpc/handlers/AccountChannels.cpp index cc79173e55f..e5059d3ffc5 100644 --- a/src/ripple/rpc/handlers/AccountChannels.cpp +++ b/src/ripple/rpc/handlers/AccountChannels.cpp @@ -202,7 +202,7 @@ doAccountChannels(RPC::JsonContext& context) to_string(*marker) + "," + std::to_string(nextHint); } - result[jss::account] = context.app.accountIDCache().toBase58(accountID); + result[jss::account] = toBase58(accountID); for (auto const& item : visitData.items) addChannel(jsonChannels, *item); diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index 417a3ffcd38..f5432dc65a1 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -215,7 +215,7 @@ doAccountInfo(RPC::JsonContext& context) } else { - result[jss::account] = context.app.accountIDCache().toBase58(accountID); + result[jss::account] = toBase58(accountID); RPC::inject_error(rpcACT_NOT_FOUND, result); } diff --git a/src/ripple/rpc/handlers/AccountLines.cpp b/src/ripple/rpc/handlers/AccountLines.cpp index 843b9ddea56..364d40673fa 100644 --- a/src/ripple/rpc/handlers/AccountLines.cpp +++ b/src/ripple/rpc/handlers/AccountLines.cpp @@ -252,7 +252,7 @@ doAccountLines(RPC::JsonContext& context) to_string(*marker) + "," + std::to_string(nextHint); } - result[jss::account] = context.app.accountIDCache().toBase58(accountID); + result[jss::account] = toBase58(accountID); for (auto const& item : visitData.items) addLine(jsonLines, item); diff --git a/src/ripple/rpc/handlers/AccountObjects.cpp b/src/ripple/rpc/handlers/AccountObjects.cpp index 4dcb3aba7de..6424c3afd3a 100644 --- a/src/ripple/rpc/handlers/AccountObjects.cpp +++ b/src/ripple/rpc/handlers/AccountObjects.cpp @@ -160,7 +160,7 @@ doAccountNFTs(RPC::JsonContext& context) cp = nullptr; } - result[jss::account] = context.app.accountIDCache().toBase58(accountID); + result[jss::account] = toBase58(accountID); context.loadType = Resource::feeMediumBurdenRPC; return result; } @@ -275,7 +275,7 @@ doAccountObjects(RPC::JsonContext& context) result[jss::account_objects] = Json::arrayValue; } - result[jss::account] = context.app.accountIDCache().toBase58(accountID); + result[jss::account] = toBase58(accountID); context.loadType = Resource::feeMediumBurdenRPC; return result; } diff --git a/src/ripple/rpc/handlers/AccountOffers.cpp b/src/ripple/rpc/handlers/AccountOffers.cpp index d3178756300..e957fe8a8e0 100644 --- a/src/ripple/rpc/handlers/AccountOffers.cpp +++ b/src/ripple/rpc/handlers/AccountOffers.cpp @@ -77,7 +77,7 @@ doAccountOffers(RPC::JsonContext& context) } // Get info on account. - result[jss::account] = context.app.accountIDCache().toBase58(accountID); + result[jss::account] = toBase58(accountID); if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); diff --git a/src/ripple/rpc/handlers/GatewayBalances.cpp b/src/ripple/rpc/handlers/GatewayBalances.cpp index 825a74ab843..d0770f31edf 100644 --- a/src/ripple/rpc/handlers/GatewayBalances.cpp +++ b/src/ripple/rpc/handlers/GatewayBalances.cpp @@ -80,7 +80,7 @@ doGatewayBalances(RPC::JsonContext& context) context.loadType = Resource::feeHighBurdenRPC; - result[jss::account] = context.app.accountIDCache().toBase58(accountID); + result[jss::account] = toBase58(accountID); // Parse the specified hotwallet(s), if any std::set hotWallets; diff --git a/src/ripple/rpc/handlers/NFTOffers.cpp b/src/ripple/rpc/handlers/NFTOffers.cpp index 34bbc8446b9..69a090e27ec 100644 --- a/src/ripple/rpc/handlers/NFTOffers.cpp +++ b/src/ripple/rpc/handlers/NFTOffers.cpp @@ -41,12 +41,10 @@ appendNftOfferJson( obj[jss::nft_offer_index] = to_string(offer->key()); obj[jss::flags] = (*offer)[sfFlags]; - obj[jss::owner] = - app.accountIDCache().toBase58(offer->getAccountID(sfOwner)); + obj[jss::owner] = toBase58(offer->getAccountID(sfOwner)); if (offer->isFieldPresent(sfDestination)) - obj[jss::destination] = - app.accountIDCache().toBase58(offer->getAccountID(sfDestination)); + obj[jss::destination] = toBase58(offer->getAccountID(sfDestination)); if (offer->isFieldPresent(sfExpiration)) obj[jss::expiration] = offer->getFieldU32(sfExpiration); diff --git a/src/ripple/rpc/handlers/NoRippleCheck.cpp b/src/ripple/rpc/handlers/NoRippleCheck.cpp index 2a6ab7ca4ed..a2af9845fd7 100644 --- a/src/ripple/rpc/handlers/NoRippleCheck.cpp +++ b/src/ripple/rpc/handlers/NoRippleCheck.cpp @@ -40,7 +40,7 @@ fillTransaction( ReadView const& ledger) { txArray["Sequence"] = Json::UInt(sequence++); - txArray["Account"] = context.app.accountIDCache().toBase58(accountID); + txArray["Account"] = toBase58(accountID); auto& fees = ledger.fees(); // Convert the reference transaction cost in fee units to drops // scaled to represent the current fee load. From 0ecfc7cb1a958b731e5f184876ea89ae2d4214ee Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Mon, 30 May 2022 21:05:10 -0700 Subject: [PATCH 042/508] Improve self-signed certificate generation: When starting, the code generates a new ephemeral private key and a corresponding certificate to go along with it. This process can take time and, while this is unlikely to matter for normal server operations, it can have a significant impact for unit testing and development. Profiling data suggests that ~20% of the time needed for a unit test run can be attributed to this. This commit does several things: 1. It restructures the code so that a new self-signed certificate and its corresponding private key are only initialized once at startup; this has minimal impact on the operation of a regular server. 2. It provides new default DH parameters. This doesn't impact the security of the connection, but those who compile from scratch can generate new parameters if they so choose. 3. It properly sets the version number in the certificate, fixing issue #4007; thanks to @donovanhide for the report. 4. It uses SHA-256 instead of SHA-1 as the hash algorithm for the certificate and adds some X.509 extensions as well as a random 128-bit serial number. 5. It rounds the certificate's "start of validity" period so that the server's precise startup time cannot be easily deduced and limits the validity period to two years, down from ten years. 6. It removes some CBC-based ciphers from the default cipher list to avoid some potential security issues, such as CVE-2016-2107 and CVE-2013-0169. --- src/ripple/basics/impl/make_SSLContext.cpp | 371 +++++++++------------ 1 file changed, 166 insertions(+), 205 deletions(-) diff --git a/src/ripple/basics/impl/make_SSLContext.cpp b/src/ripple/basics/impl/make_SSLContext.cpp index 67beb79d3ed..79a0e9009be 100644 --- a/src/ripple/basics/impl/make_SSLContext.cpp +++ b/src/ripple/basics/impl/make_SSLContext.cpp @@ -17,18 +17,55 @@ */ //============================================================================== -#include #include #include -#include -#include -#include +#include #include namespace ripple { namespace openssl { namespace detail { +/** The default strength of self-signed RSA certifices. + + Per NIST Special Publication 800-57 Part 3, 2048-bit RSA is still + considered acceptably secure. Generally, we would want to go above + and beyond such recommendations (e.g. by using 3072 or 4096 bits) + but there is a computational cost associated with that may not + be worth paying, considering that: + + - We regenerate a new ephemeral certificate and a securely generated + random private key every time the server is started; and + - There should not be any truly secure information (e.g. seeds or private + keys) that gets relayed to the server anyways over these RPCs. + + @note If you increase the number of bits you need to generate new + default DH parameters and update defaultDH accordingly. + * */ +int defaultRSAKeyBits = 2048; + +/** The default DH parameters. + + These were generated using the OpenSSL command: `openssl dhparam 2048` + by Nik Bougalis on May, 29, 2022. + + It is safe to use this, but if you want you can generate different + parameters and put them here. There's no easy way to change this + via the config file at this time. + + @note If you increase the number of bits you need to update + defaultRSAKeyBits accordingly. + */ +static constexpr char const defaultDH[] = + "-----BEGIN DH PARAMETERS-----\n" + "MIIBCAKCAQEApKSWfR7LKy0VoZ/SDCObCvJ5HKX2J93RJ+QN8kJwHh+uuA8G+t8Q\n" + "MDRjL5HanlV/sKN9HXqBc7eqHmmbqYwIXKUt9MUZTLNheguddxVlc2IjdP5i9Ps8\n" + "l7su8tnP0l1JvC6Rfv3epRsEAw/ZW/lC2IwkQPpOmvnENQhQ6TgrUzcGkv4Bn0X6\n" + "pxrDSBpZ+45oehGCUAtcbY8b02vu8zPFoxqo6V/+MIszGzldlik5bVqrJpVF6E8C\n" + "tRqHjj6KuDbPbjc+pRGvwx/BSO3SULxmYu9J1NOk090MU1CMt6IJY7TpEc9Xrac9\n" + "9yqY3xXZID240RRcaJ25+U4lszFPqP+CEwIBAg==\n" + "-----END DH PARAMETERS-----"; + /** The default list of ciphers we accept over TLS. Generally we include cipher suites that are part of TLS v1.2, but @@ -43,190 +80,150 @@ namespace detail { global or per-port basis, using the `ssl_ciphers` directive in the config file. */ -std::string const defaultCipherList = "TLSv1.2:!DSS:!PSK:!eNULL:!aNULL"; +std::string const defaultCipherList = "TLSv1.2:!CBC:!DSS:!PSK:!eNULL:!aNULL"; -template -struct custom_delete; - -template <> -struct custom_delete +static void +initAnonymous(boost::asio::ssl::context& context) { - explicit custom_delete() = default; + using namespace openssl; - void - operator()(RSA* rsa) const - { - RSA_free(rsa); - } -}; + static auto defaultRSA = []() { + BIGNUM* bn = BN_new(); + BN_set_word(bn, RSA_F4); -template <> -struct custom_delete -{ - explicit custom_delete() = default; + auto rsa = RSA_new(); - void - operator()(EVP_PKEY* evp_pkey) const - { - EVP_PKEY_free(evp_pkey); - } -}; + if (!rsa) + LogicError("RSA_new failed"); -template <> -struct custom_delete -{ - explicit custom_delete() = default; + if (RSA_generate_key_ex(rsa, defaultRSAKeyBits, bn, nullptr) != 1) + LogicError("RSA_generate_key_ex failure"); - void - operator()(X509* x509) const - { - X509_free(x509); - } -}; + BN_clear_free(bn); -template <> -struct custom_delete -{ - explicit custom_delete() = default; + return rsa; + }(); - void - operator()(DH* dh) const - { - DH_free(dh); - } -}; + static auto defaultEphemeralPrivateKey = []() { + auto pkey = EVP_PKEY_new(); -template -using custom_delete_unique_ptr = std::unique_ptr>; + if (!pkey) + LogicError("EVP_PKEY_new failed"); -// RSA + // We need to up the reference count of here, since we are retaining a + // copy of the key for (potential) reuse. + if (RSA_up_ref(defaultRSA) != 1) + LogicError( + "EVP_PKEY_assign_RSA: incrementing reference count failed"); -using rsa_ptr = custom_delete_unique_ptr; + if (!EVP_PKEY_assign_RSA(pkey, defaultRSA)) + LogicError("EVP_PKEY_assign_RSA failed"); -static rsa_ptr -rsa_generate_key(int n_bits) -{ -#if OPENSSL_VERSION_NUMBER >= 0x00908000L - BIGNUM* bn = BN_new(); - BN_set_word(bn, RSA_F4); + return pkey; + }(); - RSA* rsa = RSA_new(); - if (RSA_generate_key_ex(rsa, n_bits, bn, nullptr) != 1) - { - RSA_free(rsa); - rsa = nullptr; - } + static auto defaultCert = []() { + auto x509 = X509_new(); - BN_free(bn); -#else - RSA* rsa = RSA_generate_key(n_bits, RSA_F4, nullptr, nullptr); -#endif + if (x509 == nullptr) + LogicError("X509_new failed"); - if (rsa == nullptr) - LogicError("RSA_generate_key failed"); + // According to the standards (X.509 et al), the value should be one + // less than the actualy certificate version we want. Since we want + // version 3, we must use a 2. + X509_set_version(x509, 2); - return rsa_ptr(rsa); -} + // To avoid leaking information about the precise time that the + // server started up, we adjust the validity period: + char buf[16] = {0}; -// EVP_PKEY + auto const ts = std::time(nullptr) - (25 * 60 * 60); -using evp_pkey_ptr = custom_delete_unique_ptr; + int ret = std::strftime( + buf, sizeof(buf) - 1, "%y%m%d000000Z", std::gmtime(&ts)); -static evp_pkey_ptr -evp_pkey_new() -{ - EVP_PKEY* evp_pkey = EVP_PKEY_new(); + buf[ret] = 0; - if (evp_pkey == nullptr) - LogicError("EVP_PKEY_new failed"); + if (ASN1_TIME_set_string_X509(X509_get_notBefore(x509), buf) != 1) + LogicError("Unable to set certificate validity date"); - return evp_pkey_ptr(evp_pkey); -} + // And make it valid for two years + X509_gmtime_adj(X509_get_notAfter(x509), 2 * 365 * 24 * 60 * 60); -static void -evp_pkey_assign_rsa(EVP_PKEY* evp_pkey, rsa_ptr rsa) -{ - if (!EVP_PKEY_assign_RSA(evp_pkey, rsa.get())) - LogicError("EVP_PKEY_assign_RSA failed"); + // Set a serial number + if (auto b = BN_new(); b != nullptr) + { + if (BN_rand(b, 128, BN_RAND_TOP_ANY, BN_RAND_BOTTOM_ANY)) + { + if (auto a = ASN1_INTEGER_new(); a != nullptr) + { + if (BN_to_ASN1_INTEGER(b, a)) + X509_set_serialNumber(x509, a); - rsa.release(); -} + ASN1_INTEGER_free(a); + } + } -// X509 + BN_clear_free(b); + } -using x509_ptr = custom_delete_unique_ptr; + // Some certificate details + { + X509V3_CTX ctx; -static x509_ptr -x509_new() -{ - X509* x509 = X509_new(); + X509V3_set_ctx_nodb(&ctx); + X509V3_set_ctx(&ctx, x509, x509, nullptr, nullptr, 0); - if (x509 == nullptr) - LogicError("X509_new failed"); + if (auto ext = X509V3_EXT_conf_nid( + nullptr, &ctx, NID_basic_constraints, "critical,CA:FALSE")) + { + X509_add_ext(x509, ext, -1); + X509_EXTENSION_free(ext); + } - X509_set_version(x509, NID_X509); + if (auto ext = X509V3_EXT_conf_nid( + nullptr, + &ctx, + NID_ext_key_usage, + "critical,serverAuth,clientAuth")) + { + X509_add_ext(x509, ext, -1); + X509_EXTENSION_free(ext); + } - int const margin = 60 * 60; // 3600, one hour - int const length = 10 * 365.25 * 24 * 60 * 60; // 315576000, ten years + if (auto ext = X509V3_EXT_conf_nid( + nullptr, &ctx, NID_key_usage, "critical,digitalSignature")) + { + X509_add_ext(x509, ext, -1); + X509_EXTENSION_free(ext); + } - X509_gmtime_adj(X509_get_notBefore(x509), -margin); - X509_gmtime_adj(X509_get_notAfter(x509), length); + if (auto ext = X509V3_EXT_conf_nid( + nullptr, &ctx, NID_subject_key_identifier, "hash")) + { + X509_add_ext(x509, ext, -1); + X509_EXTENSION_free(ext); + } + } - return x509_ptr(x509); -} + // And a private key + X509_set_pubkey(x509, defaultEphemeralPrivateKey); -static void -x509_set_pubkey(X509* x509, EVP_PKEY* evp_pkey) -{ - X509_set_pubkey(x509, evp_pkey); -} + if (!X509_sign(x509, defaultEphemeralPrivateKey, EVP_sha256())) + LogicError("X509_sign failed"); -static void -x509_sign(X509* x509, EVP_PKEY* evp_pkey) -{ - if (!X509_sign(x509, evp_pkey, EVP_sha1())) - LogicError("X509_sign failed"); -} + return x509; + }(); -static void -ssl_ctx_use_certificate(SSL_CTX* const ctx, x509_ptr cert) -{ - if (SSL_CTX_use_certificate(ctx, cert.get()) <= 0) + SSL_CTX* const ctx = context.native_handle(); + + if (SSL_CTX_use_certificate(ctx, defaultCert) <= 0) LogicError("SSL_CTX_use_certificate failed"); -} -static void -ssl_ctx_use_privatekey(SSL_CTX* const ctx, evp_pkey_ptr key) -{ - if (SSL_CTX_use_PrivateKey(ctx, key.get()) <= 0) + if (SSL_CTX_use_PrivateKey(ctx, defaultEphemeralPrivateKey) <= 0) LogicError("SSL_CTX_use_PrivateKey failed"); } -static std::string -error_message(std::string const& what, boost::system::error_code const& ec) -{ - std::stringstream ss; - ss << what << ": " << ec.message() << " (" << ec.value() << ")"; - return ss.str(); -} - -static void -initAnonymous(boost::asio::ssl::context& context) -{ - using namespace openssl; - - evp_pkey_ptr pkey = evp_pkey_new(); - evp_pkey_assign_rsa(pkey.get(), rsa_generate_key(2048)); - - x509_ptr cert = x509_new(); - x509_set_pubkey(cert.get(), pkey.get()); - x509_sign(cert.get(), pkey.get()); - - SSL_CTX* const ctx = context.native_handle(); - ssl_ctx_use_certificate(ctx, std::move(cert)); - ssl_ctx_use_privatekey(ctx, std::move(pkey)); -} - static void initAuthenticated( boost::asio::ssl::context& context, @@ -234,6 +231,10 @@ initAuthenticated( std::string const& cert_file, std::string const& chain_file) { + auto fmt_error = [](boost::system::error_code ec) -> std::string { + return " [" + std::to_string(ec.value()) + ": " + ec.message() + "]"; + }; + SSL_CTX* const ssl = context.native_handle(); bool cert_set = false; @@ -246,10 +247,7 @@ initAuthenticated( cert_file, boost::asio::ssl::context::pem, ec); if (ec) - { - LogicError(error_message("Problem with SSL certificate file.", ec) - .c_str()); - } + LogicError("Problem with SSL certificate file" + fmt_error(ec)); cert_set = true; } @@ -261,11 +259,10 @@ initAuthenticated( if (!f) { - LogicError(error_message( - "Problem opening SSL chain file.", - boost::system::error_code( - errno, boost::system::generic_category())) - .c_str()); + LogicError( + "Problem opening SSL chain file" + + fmt_error(boost::system::error_code( + errno, boost::system::generic_category()))); } try @@ -312,8 +309,7 @@ initAuthenticated( if (ec) { LogicError( - error_message("Problem using the SSL private key file.", ec) - .c_str()); + "Problem using the SSL private key file" + fmt_error(ec)); } } @@ -324,7 +320,7 @@ initAuthenticated( } std::shared_ptr -get_context(std::string const& cipherList) +get_context(std::string cipherList) { auto c = std::make_shared( boost::asio::ssl::context::sslv23); @@ -338,55 +334,20 @@ get_context(std::string const& cipherList) boost::asio::ssl::context::single_dh_use | boost::asio::ssl::context::no_compression); - { - auto const& l = !cipherList.empty() ? cipherList : defaultCipherList; - auto result = SSL_CTX_set_cipher_list(c->native_handle(), l.c_str()); - if (result != 1) - LogicError("SSL_CTX_set_cipher_list failed"); - } + if (cipherList.empty()) + cipherList = defaultCipherList; + + if (auto result = + SSL_CTX_set_cipher_list(c->native_handle(), cipherList.c_str()); + result != 1) + LogicError("SSL_CTX_set_cipher_list failed"); - // These are the raw DH parameters that Ripple Labs has - // chosen for Ripple, in the binary format needed by - // d2i_DHparams. - // - unsigned char const params[] = { - 0x30, 0x82, 0x01, 0x08, 0x02, 0x82, 0x01, 0x01, 0x00, 0x8f, 0xca, 0x66, - 0x85, 0x33, 0xcb, 0xcf, 0x36, 0x27, 0xb2, 0x4c, 0xb8, 0x50, 0xb8, 0xf9, - 0x53, 0xf8, 0xb9, 0x2d, 0x1c, 0xa2, 0xad, 0x86, 0x58, 0x29, 0x3b, 0x88, - 0x3e, 0xf5, 0x65, 0xb8, 0xda, 0x22, 0xf4, 0x8b, 0x21, 0x12, 0x18, 0xf7, - 0x16, 0xcd, 0x7c, 0xc7, 0x3a, 0x2d, 0x61, 0xb7, 0x11, 0xf6, 0xb0, 0x65, - 0xa0, 0x5b, 0xa4, 0x06, 0x95, 0x28, 0xa4, 0x4f, 0x76, 0xc0, 0xeb, 0xfa, - 0x95, 0xdf, 0xbf, 0x19, 0x90, 0x64, 0x8f, 0x60, 0xd5, 0x36, 0xba, 0xab, - 0x0d, 0x5a, 0x5c, 0x94, 0xd5, 0xf7, 0x32, 0xd6, 0x2a, 0x76, 0x77, 0x83, - 0x10, 0xc4, 0x2f, 0x10, 0x96, 0x3e, 0x37, 0x84, 0x45, 0x9c, 0xef, 0x33, - 0xf6, 0xd0, 0x2a, 0xa7, 0xce, 0x0a, 0xce, 0x0d, 0xa1, 0xa7, 0x44, 0x5d, - 0x18, 0x3f, 0x4f, 0xa4, 0x23, 0x9c, 0x5d, 0x74, 0x4f, 0xee, 0xdf, 0xaa, - 0x0d, 0x0a, 0x52, 0x57, 0x73, 0xb1, 0xe4, 0xc5, 0x72, 0x93, 0x9d, 0x03, - 0xe9, 0xf5, 0x48, 0x8c, 0xd1, 0xe6, 0x7c, 0x21, 0x65, 0x4e, 0x16, 0x51, - 0xa3, 0x16, 0x51, 0x10, 0x75, 0x60, 0x37, 0x93, 0xb8, 0x15, 0xd6, 0x14, - 0x41, 0x4a, 0x61, 0xc9, 0x1a, 0x4e, 0x9f, 0x38, 0xd8, 0x2c, 0xa5, 0x31, - 0xe1, 0x87, 0xda, 0x1f, 0xa4, 0x31, 0xa2, 0xa4, 0x42, 0x1e, 0xe0, 0x30, - 0xea, 0x2f, 0x9b, 0x77, 0x91, 0x59, 0x3e, 0xd5, 0xd0, 0xc5, 0x84, 0x45, - 0x17, 0x19, 0x74, 0x8b, 0x18, 0xb0, 0xc1, 0xe0, 0xfc, 0x1c, 0xaf, 0xe6, - 0x2a, 0xef, 0x4e, 0x0e, 0x8a, 0x5c, 0xc2, 0x91, 0xb9, 0x2b, 0xf8, 0x17, - 0x8d, 0xed, 0x44, 0xaa, 0x47, 0xaa, 0x52, 0xa2, 0xdb, 0xb6, 0xf5, 0xa1, - 0x88, 0x85, 0xa1, 0xd5, 0x87, 0xb8, 0x07, 0xd3, 0x97, 0xbe, 0x37, 0x74, - 0x72, 0xf1, 0xa8, 0x29, 0xf1, 0xa7, 0x7d, 0x19, 0xc3, 0x27, 0x09, 0xcf, - 0x23, 0x02, 0x01, 0x02}; - - unsigned char const* data = ¶ms[0]; - - custom_delete_unique_ptr const dh{ - d2i_DHparams(nullptr, &data, sizeof(params))}; - if (!dh) - LogicError("d2i_DHparams returned nullptr."); - - SSL_CTX_set_tmp_dh(c->native_handle(), dh.get()); + c->use_tmp_dh({std::addressof(detail::defaultDH), sizeof(defaultDH)}); // Disable all renegotiation support in TLS v1.2. This can help prevent // exploitation of the bug described in CVE-2021-3499 (for details see - // https://www.openssl.org/news/secadv/20210325.txt) when linking against - // OpenSSL versions prior to 1.1.1k. + // https://www.openssl.org/news/secadv/20210325.txt) when linking + // against OpenSSL versions prior to 1.1.1k. SSL_CTX_set_options(c->native_handle(), SSL_OP_NO_RENEGOTIATION); return c; From 7b3507bb873495a974db33c57a888221ddabcacc Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Sat, 21 May 2022 20:42:30 -0700 Subject: [PATCH 043/508] Improve wrapper around OpenSSL RAND --- src/ripple/crypto/csprng.h | 3 -- src/ripple/crypto/impl/csprng.cpp | 65 +++++++++++++------------------ 2 files changed, 28 insertions(+), 40 deletions(-) diff --git a/src/ripple/crypto/csprng.h b/src/ripple/crypto/csprng.h index 39506324963..3ad5d700046 100644 --- a/src/ripple/crypto/csprng.h +++ b/src/ripple/crypto/csprng.h @@ -39,9 +39,6 @@ class csprng_engine private: std::mutex mutex_; - void - mix(void* buffer, std::size_t count, double bitsPerByte); - public: using result_type = std::uint64_t; diff --git a/src/ripple/crypto/impl/csprng.cpp b/src/ripple/crypto/impl/csprng.cpp index a166fe28807..04b3b3fc385 100644 --- a/src/ripple/crypto/impl/csprng.cpp +++ b/src/ripple/crypto/impl/csprng.cpp @@ -17,7 +17,6 @@ */ //============================================================================== -#include #include #include #include @@ -28,25 +27,19 @@ namespace ripple { -void -csprng_engine::mix(void* data, std::size_t size, double bitsPerByte) -{ - assert(data != nullptr); - assert(size != 0); - assert(bitsPerByte != 0); - - std::lock_guard lock(mutex_); - RAND_add(data, size, (size * bitsPerByte) / 8.0); -} - csprng_engine::csprng_engine() { - mix_entropy(); + // This is not strictly necessary + if (RAND_poll() != 1) + Throw("CSPRNG: Initial polling failed"); } csprng_engine::~csprng_engine() { + // This cleanup function is not needed in newer versions of OpenSSL +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) RAND_cleanup(); +#endif } void @@ -64,44 +57,42 @@ csprng_engine::mix_entropy(void* buffer, std::size_t count) e = rd(); } - // Assume 2 bits per byte for the system entropy: - mix(entropy.data(), - entropy.size() * sizeof(std::random_device::result_type), - 2.0); - - // We want to be extremely conservative about estimating - // how much entropy the buffer the user gives us contains - // and assume only 0.5 bits of entropy per byte: - if (buffer != nullptr && count != 0) - mix(buffer, count, 0.5); -} - -csprng_engine::result_type -csprng_engine::operator()() -{ - result_type ret; - std::lock_guard lock(mutex_); - auto const result = - RAND_bytes(reinterpret_cast(&ret), sizeof(ret)); - - if (result == 0) - Throw("Insufficient entropy"); + // We add data to the pool, but we conservatively assume that + // it contributes no actual entropy. + RAND_add( + entropy.data(), + entropy.size() * sizeof(std::random_device::result_type), + 0); - return ret; + if (buffer != nullptr && count != 0) + RAND_add(buffer, count, 0); } void csprng_engine::operator()(void* ptr, std::size_t count) { + // RAND_bytes is thread-safe on OpenSSL 1.1.0 and later when compiled + // with thread support, so we don't need to grab a mutex. + // https://mta.openssl.org/pipermail/openssl-users/2020-November/013146.html +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || !defined(OPENSSL_THREADS) std::lock_guard lock(mutex_); +#endif auto const result = RAND_bytes(reinterpret_cast(ptr), count); if (result != 1) - Throw("Insufficient entropy"); + Throw("CSPRNG: Insufficient entropy"); +} + +csprng_engine::result_type +csprng_engine::operator()() +{ + result_type ret; + (*this)(&ret, sizeof(result_type)); + return ret; } csprng_engine& From fe05b8c4feb815e1aa6afd7ff8c85dbc92bcd651 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Thu, 4 Aug 2022 11:16:10 -0700 Subject: [PATCH 044/508] Set version to 1.10.0-b1 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 734dc11cb38..43f0eedff23 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.3" +char const* const versionString = "1.10.0-b1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From f5af42a64089ab0563c343a9eba234627d8afe5e Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Thu, 18 Aug 2022 20:29:36 -0400 Subject: [PATCH 045/508] Remove use of deprecated std::iterator --- .../detail/aged_container_iterator.h | 59 ++++++++----------- .../container/detail/aged_ordered_container.h | 40 ++++++------- .../detail/aged_unordered_container.h | 42 ++++++------- src/ripple/beast/core/List.h | 3 +- src/ripple/beast/core/LockFreeStack.h | 15 +---- 5 files changed, 69 insertions(+), 90 deletions(-) diff --git a/src/ripple/beast/container/detail/aged_container_iterator.h b/src/ripple/beast/container/detail/aged_container_iterator.h index cd8677ce17f..7467ad33c7d 100644 --- a/src/ripple/beast/container/detail/aged_container_iterator.h +++ b/src/ripple/beast/container/detail/aged_container_iterator.h @@ -30,23 +30,21 @@ class aged_ordered_container; namespace detail { -// Idea for Base template argument to prevent having to repeat -// the base class declaration comes from newbiz on ##c++/Freenode -// // If Iterator is SCARY then this iterator will be as well. -template < - bool is_const, - class Iterator, - class Base = std::iterator< - typename std::iterator_traits::iterator_category, - typename std::conditional< - is_const, - typename Iterator::value_type::stashed::value_type const, - typename Iterator::value_type::stashed::value_type>::type, - typename std::iterator_traits::difference_type>> -class aged_container_iterator : public Base +template +class aged_container_iterator { public: + using iterator_category = + typename std::iterator_traits::iterator_category; + using value_type = typename std::conditional< + is_const, + typename Iterator::value_type::stashed::value_type const, + typename Iterator::value_type::stashed::value_type>::type; + using difference_type = + typename std::iterator_traits::difference_type; + using pointer = value_type*; + using reference = value_type&; using time_point = typename Iterator::value_type::stashed::time_point; aged_container_iterator() = default; @@ -56,13 +54,11 @@ class aged_container_iterator : public Base template < bool other_is_const, class OtherIterator, - class OtherBase, class = typename std::enable_if< (other_is_const == false || is_const == true) && std::is_same::value == false>::type> explicit aged_container_iterator( - aged_container_iterator const& - other) + aged_container_iterator const& other) : m_iter(other.m_iter) { } @@ -70,22 +66,19 @@ class aged_container_iterator : public Base // Disable constructing a const_iterator from a non-const_iterator. template < bool other_is_const, - class OtherBase, class = typename std::enable_if< other_is_const == false || is_const == true>::type> aged_container_iterator( - aged_container_iterator const& - other) + aged_container_iterator const& other) : m_iter(other.m_iter) { } // Disable assigning a const_iterator to a non-const iterator - template + template auto operator=( - aged_container_iterator const& - other) -> + aged_container_iterator const& other) -> typename std::enable_if< other_is_const == false || is_const == true, aged_container_iterator&>::type @@ -94,20 +87,18 @@ class aged_container_iterator : public Base return *this; } - template + template bool - operator==( - aged_container_iterator const& - other) const + operator==(aged_container_iterator const& + other) const { return m_iter == other.m_iter; } - template + template bool - operator!=( - aged_container_iterator const& - other) const + operator!=(aged_container_iterator const& + other) const { return m_iter != other.m_iter; } @@ -142,13 +133,13 @@ class aged_container_iterator : public Base return prev; } - typename Base::reference + reference operator*() const { return m_iter->value; } - typename Base::pointer + pointer operator->() const { return &m_iter->value; @@ -167,7 +158,7 @@ class aged_container_iterator : public Base template friend class aged_unordered_container; - template + template friend class aged_container_iterator; template diff --git a/src/ripple/beast/container/detail/aged_ordered_container.h b/src/ripple/beast/container/detail/aged_ordered_container.h index ed6585dd501..9da5f20a0a4 100644 --- a/src/ripple/beast/container/detail/aged_ordered_container.h +++ b/src/ripple/beast/container/detail/aged_ordered_container.h @@ -989,22 +989,20 @@ class aged_ordered_container template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> - beast::detail::aged_container_iterator - erase(beast::detail::aged_container_iterator pos); + beast::detail::aged_container_iterator + erase(beast::detail::aged_container_iterator pos); // enable_if prevents erase (reverse_iterator first, reverse_iterator last) // from compiling template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> - beast::detail::aged_container_iterator + beast::detail::aged_container_iterator erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last); + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last); template auto @@ -1019,10 +1017,9 @@ class aged_ordered_container template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> void - touch(beast::detail::aged_container_iterator pos) + touch(beast::detail::aged_container_iterator pos) { touch(pos, clock().now()); } @@ -1264,11 +1261,10 @@ class aged_ordered_container template < bool is_const, class Iterator, - class Base, class = std::enable_if_t::value>> void touch( - beast::detail::aged_container_iterator pos, + beast::detail::aged_container_iterator pos, typename clock_type::time_point const& now); template < @@ -2010,13 +2006,13 @@ template < class Clock, class Compare, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_ordered_container:: - erase(beast::detail::aged_container_iterator pos) + erase(beast::detail::aged_container_iterator pos) { unlink_and_delete_element(&*((pos++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( pos.iterator()); } @@ -2028,17 +2024,17 @@ template < class Clock, class Compare, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_ordered_container:: erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last) + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last) { for (; first != last;) unlink_and_delete_element(&*((first++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( first.iterator()); } @@ -2173,11 +2169,11 @@ template < class Clock, class Compare, class Allocator> -template +template void aged_ordered_container:: touch( - beast::detail::aged_container_iterator pos, + beast::detail::aged_container_iterator pos, typename clock_type::time_point const& now) { auto& e(*pos.iterator()); diff --git a/src/ripple/beast/container/detail/aged_unordered_container.h b/src/ripple/beast/container/detail/aged_unordered_container.h index 8bc2330fa9f..920e6196bb9 100644 --- a/src/ripple/beast/container/detail/aged_unordered_container.h +++ b/src/ripple/beast/container/detail/aged_unordered_container.h @@ -1205,15 +1205,15 @@ class aged_unordered_container return emplace(std::forward(args)...); } - template - beast::detail::aged_container_iterator - erase(beast::detail::aged_container_iterator pos); + template + beast::detail::aged_container_iterator + erase(beast::detail::aged_container_iterator pos); - template - beast::detail::aged_container_iterator + template + beast::detail::aged_container_iterator erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last); + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last); template auto @@ -1222,9 +1222,9 @@ class aged_unordered_container void swap(aged_unordered_container& other) noexcept; - template + template void - touch(beast::detail::aged_container_iterator pos) + touch(beast::detail::aged_container_iterator pos) { touch(pos, clock().now()); } @@ -1541,10 +1541,10 @@ class aged_unordered_container insert_unchecked(first, last); } - template + template void touch( - beast::detail::aged_container_iterator pos, + beast::detail::aged_container_iterator pos, typename clock_type::time_point const& now) { auto& e(*pos.iterator()); @@ -3044,8 +3044,8 @@ template < class Hash, class KeyEqual, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_unordered_container< IsMulti, IsMap, @@ -3054,11 +3054,11 @@ aged_unordered_container< Clock, Hash, KeyEqual, - Allocator>:: - erase(beast::detail::aged_container_iterator pos) + Allocator>::erase(beast::detail::aged_container_iterator + pos) { unlink_and_delete_element(&*((pos++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( pos.iterator()); } @@ -3071,8 +3071,8 @@ template < class Hash, class KeyEqual, class Allocator> -template -beast::detail::aged_container_iterator +template +beast::detail::aged_container_iterator aged_unordered_container< IsMulti, IsMap, @@ -3083,13 +3083,13 @@ aged_unordered_container< KeyEqual, Allocator>:: erase( - beast::detail::aged_container_iterator first, - beast::detail::aged_container_iterator last) + beast::detail::aged_container_iterator first, + beast::detail::aged_container_iterator last) { for (; first != last;) unlink_and_delete_element(&*((first++).iterator())); - return beast::detail::aged_container_iterator( + return beast::detail::aged_container_iterator( first.iterator()); } diff --git a/src/ripple/beast/core/List.h b/src/ripple/beast/core/List.h index 1daf5cda76f..9b3c889d6ac 100644 --- a/src/ripple/beast/core/List.h +++ b/src/ripple/beast/core/List.h @@ -72,11 +72,12 @@ class ListNode template class ListIterator - : public std::iterator { public: + using iterator_category = std::bidirectional_iterator_tag; using value_type = typename beast::detail::CopyConst::type; + using difference_type = std::ptrdiff_t; using pointer = value_type*; using reference = value_type&; using size_type = std::size_t; diff --git a/src/ripple/beast/core/LockFreeStack.h b/src/ripple/beast/core/LockFreeStack.h index ff022b96a10..107564415cd 100644 --- a/src/ripple/beast/core/LockFreeStack.h +++ b/src/ripple/beast/core/LockFreeStack.h @@ -29,18 +29,7 @@ namespace beast { //------------------------------------------------------------------------------ template -class LockFreeStackIterator : public std::iterator< - std::forward_iterator_tag, - typename Container::value_type, - typename Container::difference_type, - typename std::conditional< - IsConst, - typename Container::const_pointer, - typename Container::pointer>::type, - typename std::conditional< - IsConst, - typename Container::const_reference, - typename Container::reference>::type> +class LockFreeStackIterator { protected: using Node = typename Container::Node; @@ -48,7 +37,9 @@ class LockFreeStackIterator : public std::iterator< typename std::conditional::type; public: + using iterator_category = std::forward_iterator_tag; using value_type = typename Container::value_type; + using difference_type = typename Container::difference_type; using pointer = typename std::conditional< IsConst, typename Container::const_pointer, From e40e38e8d3ce447668215fd8dfb37a1a2b5504e9 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Tue, 13 Sep 2022 10:16:31 -0700 Subject: [PATCH 046/508] Introduce fixRemoveNFTokenAutoTrustLine amendment: It turns out that the feature enabled by the tfTrustLine flag on an NFTokenMint transaction could be used as a means to attack the NFToken issuer. Details are in https://github.com/XRPLF/rippled/issues/4300 The fixRemoveNFTokenAutoTrustLine amendment removes the ability to set the tfTrustLine flag on an NFTokenMint transaction. Closes 4300. --- src/ripple/app/tx/impl/NFTokenMint.cpp | 18 +- src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/TxFlags.h | 18 +- src/ripple/protocol/impl/Feature.cpp | 1 + src/test/app/NFToken_test.cpp | 287 +++++++++++++------------ 5 files changed, 192 insertions(+), 135 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenMint.cpp b/src/ripple/app/tx/impl/NFTokenMint.cpp index b4e391c3ee8..f4d3eb85676 100644 --- a/src/ripple/app/tx/impl/NFTokenMint.cpp +++ b/src/ripple/app/tx/impl/NFTokenMint.cpp @@ -40,7 +40,23 @@ NFTokenMint::preflight(PreflightContext const& ctx) if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) return ret; - if (ctx.tx.getFlags() & tfNFTokenMintMask) + // Prior to fixRemoveNFTokenAutoTrustLine, transfer of an NFToken between + // accounts allowed a TrustLine to be added to the issuer of that token + // without explicit permission from that issuer. This was enabled by + // minting the NFToken with the tfTrustLine flag set. + // + // That capability could be used to attack the NFToken issuer. It + // would be possible for two accounts to trade the NFToken back and forth + // building up any number of TrustLines on the issuer, increasing the + // issuer's reserve without bound. + // + // The fixRemoveNFTokenAutoTrustLine amendment disables minting with the + // tfTrustLine flag as a way to prevent the attack. But until the + // amendment passes we still need to keep the old behavior available. + std::uint32_t const NFTokenMintMask = + ctx.rules.enabled(fixRemoveNFTokenAutoTrustLine) ? tfNFTokenMintMask + : tfNFTokenMintOldMask; + if (ctx.tx.getFlags() & NFTokenMintMask) return temINVALID_FLAG; if (auto const f = ctx.tx[~sfTransferFee]) diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index f0d0c8efbb5..ffee01e057e 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 50; +static constexpr std::size_t numFeatures = 51; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -338,6 +338,7 @@ extern uint256 const featureExpandedSignerList; extern uint256 const fixNFTokenDirV1; extern uint256 const fixNFTokenNegOffer; extern uint256 const featureNonFungibleTokensV1_1; +extern uint256 const fixRemoveNFTokenAutoTrustLine; } // namespace ripple diff --git a/src/ripple/protocol/TxFlags.h b/src/ripple/protocol/TxFlags.h index 0b907c72235..0ad088c41d6 100644 --- a/src/ripple/protocol/TxFlags.h +++ b/src/ripple/protocol/TxFlags.h @@ -120,9 +120,25 @@ constexpr std::uint32_t const tfOnlyXRP = 0x00000002; constexpr std::uint32_t const tfTrustLine = 0x00000004; constexpr std::uint32_t const tfTransferable = 0x00000008; -constexpr std::uint32_t const tfNFTokenMintMask = +// Prior to fixRemoveNFTokenAutoTrustLine, transfer of an NFToken between +// accounts allowed a TrustLine to be added to the issuer of that token +// without explicit permission from that issuer. This was enabled by +// minting the NFToken with the tfTrustLine flag set. +// +// That capability could be used to attack the NFToken issuer. It +// would be possible for two accounts to trade the NFToken back and forth +// building up any number of TrustLines on the issuer, increasing the +// issuer's reserve without bound. +// +// The fixRemoveNFTokenAutoTrustLine amendment disables minting with the +// tfTrustLine flag as a way to prevent the attack. But until the +// amendment passes we still need to keep the old behavior available. +constexpr std::uint32_t const tfNFTokenMintOldMask = ~(tfUniversal | tfBurnable | tfOnlyXRP | tfTrustLine | tfTransferable); +constexpr std::uint32_t const tfNFTokenMintMask = + ~(tfUniversal | tfBurnable | tfOnlyXRP | tfTransferable); + // NFTokenCreateOffer flags: constexpr std::uint32_t const tfSellNFToken = 0x00000001; constexpr std::uint32_t const tfNFTokenCreateOfferMask = diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index fcd774ce9fd..f8f2fa5c716 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -447,6 +447,7 @@ REGISTER_FEATURE(ExpandedSignerList, Supported::yes, DefaultVote::no) REGISTER_FIX (fixNFTokenDirV1, Supported::yes, DefaultVote::no); REGISTER_FIX (fixNFTokenNegOffer, Supported::yes, DefaultVote::no); REGISTER_FEATURE(NonFungibleTokensV1_1, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, DefaultVote::yes); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 2db31afe0db..ac86bf74988 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -1574,7 +1574,6 @@ class NFToken_test : public beast::unit_test::suite using namespace test::jtx; - Env env{*this, features}; Account const alice{"alice"}; Account const becky{"becky"}; Account const cheri{"cheri"}; @@ -1583,155 +1582,179 @@ class NFToken_test : public beast::unit_test::suite IOU const gwCAD(gw["CAD"]); IOU const gwEUR(gw["EUR"]); - env.fund(XRP(1000), alice, becky, cheri, gw); - env.close(); - - // Set trust lines so becky and cheri can use gw's currency. - env(trust(becky, gwAUD(1000))); - env(trust(cheri, gwAUD(1000))); - env(trust(becky, gwCAD(1000))); - env(trust(cheri, gwCAD(1000))); - env(trust(becky, gwEUR(1000))); - env(trust(cheri, gwEUR(1000))); - env.close(); - env(pay(gw, becky, gwAUD(500))); - env(pay(gw, becky, gwCAD(500))); - env(pay(gw, becky, gwEUR(500))); - env(pay(gw, cheri, gwAUD(500))); - env(pay(gw, cheri, gwCAD(500))); - env.close(); - - // An nft without flagCreateTrustLines but with a non-zero transfer - // fee will not allow creating offers that use IOUs for payment. - for (std::uint32_t xferFee : {0, 1}) + // The behavior of this test changes dramatically based on the + // presence (or absence) of the fixRemoveNFTokenAutoTrustLine + // amendment. So we test both cases here. + for (auto const& tweakedFeatures : + {features - fixRemoveNFTokenAutoTrustLine, + features | fixRemoveNFTokenAutoTrustLine}) { - uint256 const nftNoAutoTrustID{ - token::getNextID(env, alice, 0u, tfTransferable, xferFee)}; - env(token::mint(alice, 0u), - token::xferFee(xferFee), - txflags(tfTransferable)); + Env env{*this, tweakedFeatures}; + env.fund(XRP(1000), alice, becky, cheri, gw); env.close(); - // becky buys the nft for 1 drop. - uint256 const beckyBuyOfferIndex = - keylet::nftoffer(becky, env.seq(becky)).key; - env(token::createOffer(becky, nftNoAutoTrustID, drops(1)), - token::owner(alice)); + // Set trust lines so becky and cheri can use gw's currency. + env(trust(becky, gwAUD(1000))); + env(trust(cheri, gwAUD(1000))); + env(trust(becky, gwCAD(1000))); + env(trust(cheri, gwCAD(1000))); + env(trust(becky, gwEUR(1000))); + env(trust(cheri, gwEUR(1000))); env.close(); - env(token::acceptBuyOffer(alice, beckyBuyOfferIndex)); + env(pay(gw, becky, gwAUD(500))); + env(pay(gw, becky, gwCAD(500))); + env(pay(gw, becky, gwEUR(500))); + env(pay(gw, cheri, gwAUD(500))); + env(pay(gw, cheri, gwCAD(500))); env.close(); - // becky attempts to sell the nft for AUD. - TER const createOfferTER = - xferFee ? TER(tecNO_LINE) : TER(tesSUCCESS); - uint256 const beckyOfferIndex = - keylet::nftoffer(becky, env.seq(becky)).key; - env(token::createOffer(becky, nftNoAutoTrustID, gwAUD(100)), - txflags(tfSellNFToken), - ter(createOfferTER)); - env.close(); + // An nft without flagCreateTrustLines but with a non-zero transfer + // fee will not allow creating offers that use IOUs for payment. + for (std::uint32_t xferFee : {0, 1}) + { + uint256 const nftNoAutoTrustID{ + token::getNextID(env, alice, 0u, tfTransferable, xferFee)}; + env(token::mint(alice, 0u), + token::xferFee(xferFee), + txflags(tfTransferable)); + env.close(); - // cheri offers to buy the nft for CAD. - uint256 const cheriOfferIndex = - keylet::nftoffer(cheri, env.seq(cheri)).key; - env(token::createOffer(cheri, nftNoAutoTrustID, gwCAD(100)), - token::owner(becky), - ter(createOfferTER)); - env.close(); + // becky buys the nft for 1 drop. + uint256 const beckyBuyOfferIndex = + keylet::nftoffer(becky, env.seq(becky)).key; + env(token::createOffer(becky, nftNoAutoTrustID, drops(1)), + token::owner(alice)); + env.close(); + env(token::acceptBuyOffer(alice, beckyBuyOfferIndex)); + env.close(); - // To keep things tidy, cancel the offers. - env(token::cancelOffer(becky, {beckyOfferIndex})); - env(token::cancelOffer(cheri, {cheriOfferIndex})); - env.close(); - } - // An nft with flagCreateTrustLines but with a non-zero transfer - // fee allows transfers using IOUs for payment. - { - std::uint16_t transferFee = 10000; // 10% + // becky attempts to sell the nft for AUD. + TER const createOfferTER = + xferFee ? TER(tecNO_LINE) : TER(tesSUCCESS); + uint256 const beckyOfferIndex = + keylet::nftoffer(becky, env.seq(becky)).key; + env(token::createOffer(becky, nftNoAutoTrustID, gwAUD(100)), + txflags(tfSellNFToken), + ter(createOfferTER)); + env.close(); - uint256 const nftAutoTrustID{token::getNextID( - env, alice, 0u, tfTransferable | tfTrustLine, transferFee)}; - env(token::mint(alice, 0u), - token::xferFee(transferFee), - txflags(tfTransferable | tfTrustLine)); - env.close(); + // cheri offers to buy the nft for CAD. + uint256 const cheriOfferIndex = + keylet::nftoffer(cheri, env.seq(cheri)).key; + env(token::createOffer(cheri, nftNoAutoTrustID, gwCAD(100)), + token::owner(becky), + ter(createOfferTER)); + env.close(); - // becky buys the nft for 1 drop. - uint256 const beckyBuyOfferIndex = - keylet::nftoffer(becky, env.seq(becky)).key; - env(token::createOffer(becky, nftAutoTrustID, drops(1)), - token::owner(alice)); - env.close(); - env(token::acceptBuyOffer(alice, beckyBuyOfferIndex)); - env.close(); + // To keep things tidy, cancel the offers. + env(token::cancelOffer(becky, {beckyOfferIndex})); + env(token::cancelOffer(cheri, {cheriOfferIndex})); + env.close(); + } + // An nft with flagCreateTrustLines but with a non-zero transfer + // fee allows transfers using IOUs for payment. + { + std::uint16_t transferFee = 10000; // 10% - // becky sells the nft for AUD. - uint256 const beckySellOfferIndex = - keylet::nftoffer(becky, env.seq(becky)).key; - env(token::createOffer(becky, nftAutoTrustID, gwAUD(100)), - txflags(tfSellNFToken)); - env.close(); - env(token::acceptSellOffer(cheri, beckySellOfferIndex)); - env.close(); + uint256 const nftAutoTrustID{token::getNextID( + env, alice, 0u, tfTransferable | tfTrustLine, transferFee)}; - // alice should now have a trust line for gwAUD. - BEAST_EXPECT(env.balance(alice, gwAUD) == gwAUD(10)); + // If the fixRemoveNFTokenAutoTrustLine amendment is active + // then this transaction fails. + { + TER const mintTER = + tweakedFeatures[fixRemoveNFTokenAutoTrustLine] + ? static_cast(temINVALID_FLAG) + : static_cast(tesSUCCESS); + + env(token::mint(alice, 0u), + token::xferFee(transferFee), + txflags(tfTransferable | tfTrustLine), + ter(mintTER)); + env.close(); + + // If fixRemoveNFTokenAutoTrustLine is active the rest + // of this test falls on its face. + if (tweakedFeatures[fixRemoveNFTokenAutoTrustLine]) + break; + } + // becky buys the nft for 1 drop. + uint256 const beckyBuyOfferIndex = + keylet::nftoffer(becky, env.seq(becky)).key; + env(token::createOffer(becky, nftAutoTrustID, drops(1)), + token::owner(alice)); + env.close(); + env(token::acceptBuyOffer(alice, beckyBuyOfferIndex)); + env.close(); - // becky buys the nft back for CAD. - uint256 const beckyBuyBackOfferIndex = - keylet::nftoffer(becky, env.seq(becky)).key; - env(token::createOffer(becky, nftAutoTrustID, gwCAD(50)), - token::owner(cheri)); - env.close(); - env(token::acceptBuyOffer(cheri, beckyBuyBackOfferIndex)); - env.close(); + // becky sells the nft for AUD. + uint256 const beckySellOfferIndex = + keylet::nftoffer(becky, env.seq(becky)).key; + env(token::createOffer(becky, nftAutoTrustID, gwAUD(100)), + txflags(tfSellNFToken)); + env.close(); + env(token::acceptSellOffer(cheri, beckySellOfferIndex)); + env.close(); - // alice should now have a trust line for gwAUD and gwCAD. - BEAST_EXPECT(env.balance(alice, gwAUD) == gwAUD(10)); - BEAST_EXPECT(env.balance(alice, gwCAD) == gwCAD(5)); - } - // Now that alice has trust lines already established, an nft without - // flagCreateTrustLines will work for preestablished trust lines. - { - std::uint16_t transferFee = 5000; // 5% - uint256 const nftNoAutoTrustID{ - token::getNextID(env, alice, 0u, tfTransferable, transferFee)}; - env(token::mint(alice, 0u), - token::xferFee(transferFee), - txflags(tfTransferable)); - env.close(); + // alice should now have a trust line for gwAUD. + BEAST_EXPECT(env.balance(alice, gwAUD) == gwAUD(10)); - // alice sells the nft using AUD. - uint256 const aliceSellOfferIndex = - keylet::nftoffer(alice, env.seq(alice)).key; - env(token::createOffer(alice, nftNoAutoTrustID, gwAUD(200)), - txflags(tfSellNFToken)); - env.close(); - env(token::acceptSellOffer(cheri, aliceSellOfferIndex)); - env.close(); + // becky buys the nft back for CAD. + uint256 const beckyBuyBackOfferIndex = + keylet::nftoffer(becky, env.seq(becky)).key; + env(token::createOffer(becky, nftAutoTrustID, gwCAD(50)), + token::owner(cheri)); + env.close(); + env(token::acceptBuyOffer(cheri, beckyBuyBackOfferIndex)); + env.close(); - // alice should now have AUD(210): - // o 200 for this sale and - // o 10 for the previous sale's fee. - BEAST_EXPECT(env.balance(alice, gwAUD) == gwAUD(210)); + // alice should now have a trust line for gwAUD and gwCAD. + BEAST_EXPECT(env.balance(alice, gwAUD) == gwAUD(10)); + BEAST_EXPECT(env.balance(alice, gwCAD) == gwCAD(5)); + } + // Now that alice has trust lines preestablished, an nft without + // flagCreateTrustLines will work for preestablished trust lines. + { + std::uint16_t transferFee = 5000; // 5% + uint256 const nftNoAutoTrustID{token::getNextID( + env, alice, 0u, tfTransferable, transferFee)}; + env(token::mint(alice, 0u), + token::xferFee(transferFee), + txflags(tfTransferable)); + env.close(); - // cheri can't sell the NFT for EUR, but can for CAD. - env(token::createOffer(cheri, nftNoAutoTrustID, gwEUR(50)), - txflags(tfSellNFToken), - ter(tecNO_LINE)); - env.close(); - uint256 const cheriSellOfferIndex = - keylet::nftoffer(cheri, env.seq(cheri)).key; - env(token::createOffer(cheri, nftNoAutoTrustID, gwCAD(100)), - txflags(tfSellNFToken)); - env.close(); - env(token::acceptSellOffer(becky, cheriSellOfferIndex)); - env.close(); + // alice sells the nft using AUD. + uint256 const aliceSellOfferIndex = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftNoAutoTrustID, gwAUD(200)), + txflags(tfSellNFToken)); + env.close(); + env(token::acceptSellOffer(cheri, aliceSellOfferIndex)); + env.close(); + + // alice should now have AUD(210): + // o 200 for this sale and + // o 10 for the previous sale's fee. + BEAST_EXPECT(env.balance(alice, gwAUD) == gwAUD(210)); + + // cheri can't sell the NFT for EUR, but can for CAD. + env(token::createOffer(cheri, nftNoAutoTrustID, gwEUR(50)), + txflags(tfSellNFToken), + ter(tecNO_LINE)); + env.close(); + uint256 const cheriSellOfferIndex = + keylet::nftoffer(cheri, env.seq(cheri)).key; + env(token::createOffer(cheri, nftNoAutoTrustID, gwCAD(100)), + txflags(tfSellNFToken)); + env.close(); + env(token::acceptSellOffer(becky, cheriSellOfferIndex)); + env.close(); - // alice should now have CAD(10): - // o 5 from this sale's fee and - // o 5 for the previous sale's fee. - BEAST_EXPECT(env.balance(alice, gwCAD) == gwCAD(10)); + // alice should now have CAD(10): + // o 5 from this sale's fee and + // o 5 for the previous sale's fee. + BEAST_EXPECT(env.balance(alice, gwCAD) == gwCAD(10)); + } } } From 9a31f321cd5be0f1c6ebcb83c8f681be96672a2e Mon Sep 17 00:00:00 2001 From: seelabs Date: Fri, 9 Sep 2022 14:34:48 -0400 Subject: [PATCH 047/508] Allow gcc 12 compilation: Compiling with gcc 12 on manjaro (arch variant) had compilation errors without adding an additional include file. --- src/ripple/basics/impl/Archive.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ripple/basics/impl/Archive.cpp b/src/ripple/basics/impl/Archive.cpp index 47e3d1ab3e7..73e14a93606 100644 --- a/src/ripple/basics/impl/Archive.cpp +++ b/src/ripple/basics/impl/Archive.cpp @@ -23,6 +23,8 @@ #include #include +#include + namespace ripple { void From df66e4151e2a8402eeb3c4d2c035e3f7a608f045 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Fri, 26 Aug 2022 02:11:39 +0900 Subject: [PATCH 048/508] Fix typo in detail/Node.h minumum -> minimum --- src/ripple/app/rdb/backend/detail/Node.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/app/rdb/backend/detail/Node.h b/src/ripple/app/rdb/backend/detail/Node.h index fa7e39c8329..23f5bce206c 100644 --- a/src/ripple/app/rdb/backend/detail/Node.h +++ b/src/ripple/app/rdb/backend/detail/Node.h @@ -110,7 +110,7 @@ std::size_t getRows(soci::session& session, TableType type); /** - * @brief getRowsMinMax Returns minumum ledger sequence, + * @brief getRowsMinMax Returns minimum ledger sequence, * maximum ledger sequence and total number of rows in given table. * @param session Session with database. * @param type Table ID for which the result is returned. From be1ce5eca970cd2d8d076deef8fe8a85ab378043 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Fri, 19 Aug 2022 15:26:46 -0700 Subject: [PATCH 049/508] Pin postgres, zlib, krb5, libuv and cassandra to stable versions --- Builds/CMake/deps/Postgres.cmake | 2 +- Builds/CMake/deps/cassandra.cmake | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Builds/CMake/deps/Postgres.cmake b/Builds/CMake/deps/Postgres.cmake index 7ad7f169a4f..bb94832a48b 100644 --- a/Builds/CMake/deps/Postgres.cmake +++ b/Builds/CMake/deps/Postgres.cmake @@ -13,7 +13,7 @@ if(reporting) ExternalProject_Add(postgres_src PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/postgres/postgres.git - GIT_TAG master + GIT_TAG REL_14_5 CONFIGURE_COMMAND ./configure --without-readline > /dev/null BUILD_COMMAND ${CMAKE_COMMAND} -E env --unset=MAKELEVEL make UPDATE_COMMAND "" diff --git a/Builds/CMake/deps/cassandra.cmake b/Builds/CMake/deps/cassandra.cmake index 8f1e799dc18..4563a34137e 100644 --- a/Builds/CMake/deps/cassandra.cmake +++ b/Builds/CMake/deps/cassandra.cmake @@ -11,7 +11,7 @@ if(reporting) ExternalProject_Add(zlib_src PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/madler/zlib.git - GIT_TAG master + GIT_TAG v1.2.12 INSTALL_COMMAND "" BUILD_BYPRODUCTS /${ep_lib_prefix}z.a LOG_BUILD TRUE @@ -45,7 +45,7 @@ if(reporting) ExternalProject_Add(krb5_src PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/krb5/krb5.git - GIT_TAG master + GIT_TAG krb5-1.20-final UPDATE_COMMAND "" CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared > /dev/null BUILD_IN_SOURCE 1 @@ -80,7 +80,7 @@ if(reporting) ExternalProject_Add(libuv_src PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/libuv/libuv.git - GIT_TAG v1.x + GIT_TAG v1.44.2 INSTALL_COMMAND "" BUILD_BYPRODUCTS /${ep_lib_prefix}uv_a.a LOG_BUILD TRUE @@ -106,7 +106,7 @@ if(reporting) ExternalProject_Add(cassandra_src PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/datastax/cpp-driver.git - GIT_TAG master + GIT_TAG 2.16.2 CMAKE_ARGS -DLIBUV_ROOT_DIR=${BINARY_DIR} -DLIBUV_LIBARY=${BINARY_DIR}/libuv_a.a From ba3c0e51455a88d76d90b996f20c0f102ac3f5a0 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Wed, 14 Sep 2022 15:52:05 -0700 Subject: [PATCH 050/508] Set version to 1.9.4 --- RELEASENOTES.md | 46 +++++++++++++++++++++++++- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 5f5810fc615..4403110e052 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -7,6 +7,50 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/xrplf/rippled/issues/new/choose) +# Introducing XRP Ledger version 1.9.4 + +Version 1.9.4 of `rippled`, the reference implementation of the XRP Ledger protocol is now available. This release introduces an amendment that removes the ability for an NFT issuer to indicate that trust lines should be automatically created for royalty payments from secondary sales of NFTs, in response to a bug report that indicated how this functionality could be abused to mount a denial of service attack against the issuer. + +## Action Required + +This release introduces a new amendment to the XRP Ledger protocol, **`fixRemoveNFTokenAutoTrustLine`** to mitigate a potential denial-of-service attack against NFT issuers that minted NFTs and allowed secondary trading of those NFTs to create trust lines for any asset. + +This amendment is open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. + +If you operate an XRP Ledger server, then you should upgrade to version 1.9.4 within two weeks, to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. + +For more information about NFTs on the XRP Ledger, see [NFT Conceptual Overview](https://xrpl.org/nft-conceptual-overview.html). + + +## Install / Upgrade + +On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). + +## Changelog + +## Contributions + +The primary change in this release is the following bug fix: + +- **Introduce fixRemoveNFTokenAutoTrustLine amendment**: Introduces the `fixRemoveNFTokenAutoTrustLine` amendment, which disables the `tfTrustLine` flag, which a malicious attacker could exploit to mount denial-of-service attacks against NFT issuers that specified the flag on their NFTs. ([#4301](https://github.com/XRPLF/rippled/4301)) + + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers and help us build the Internet of Value. + +### Credits + +The following people contributed directly to this release: + +- Scott Schurr +- Howard Hinnant +- Scott Determan +- Ikko Ashimine + + # Introducing XRP Ledger version 1.9.3 Version 1.9.3 of `rippled`, the reference server implementation of the XRP Ledger protocol is now available. This release corrects minor technical flaws with the code that loads configured amendment votes after a startup and the copy constructor of `PublicKey`. @@ -19,7 +63,7 @@ On supported platforms, see the [instructions on installing or updating `rippled ## Contributions -This releases contains the following bug fixes: +This release contains the following bug fixes: - **Change by-value to by-reference to persist vote**: A minor technical flaw, caused by use of a copy instead of a reference, resulted in operator-configured "yes" votes to not be properly loaded after a restart. ([#4256](https://github.com/XRPLF/rippled/pull/4256)) - **Properly handle self-assignment of PublicKey**: The `PublicKey` copy assignment operator mishandled the case where a `PublicKey` would be assigned to itself, and could result in undefined behavior. diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 734dc11cb38..9a28a55cb21 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.3" +char const* const versionString = "1.9.4" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 04ef8851081f6ee9176783ad3725960b8a931ebb Mon Sep 17 00:00:00 2001 From: seelabs Date: Sat, 17 Sep 2022 22:25:05 -0400 Subject: [PATCH 051/508] Support for boost 1.80.0: Boost intrusive renamed the `comp()` function -> `get_comp()` --- .../CMakeSettings-example.json | 4 +- .../README.md | 46 +++++++++---------- .../container/detail/aged_ordered_container.h | 19 +++++++- 3 files changed, 42 insertions(+), 27 deletions(-) rename Builds/{VisualStudio2017 => VisualStudio2019}/CMakeSettings-example.json (91%) rename Builds/{VisualStudio2017 => VisualStudio2019}/README.md (86%) diff --git a/Builds/VisualStudio2017/CMakeSettings-example.json b/Builds/VisualStudio2019/CMakeSettings-example.json similarity index 91% rename from Builds/VisualStudio2017/CMakeSettings-example.json rename to Builds/VisualStudio2019/CMakeSettings-example.json index b2889ddf504..b90bfce6bb1 100644 --- a/Builds/VisualStudio2017/CMakeSettings-example.json +++ b/Builds/VisualStudio2019/CMakeSettings-example.json @@ -3,7 +3,7 @@ "configurations": [ { "name": "x64-Debug", - "generator": "Visual Studio 15 2017 Win64", + "generator": "Visual Studio 16 2019", "configurationType": "Debug", "inheritEnvironments": [ "msvc_x64_x64" ], "buildRoot": "${thisFileDir}\\build\\${name}", @@ -23,7 +23,7 @@ }, { "name": "x64-Release", - "generator": "Visual Studio 15 2017 Win64", + "generator": "Visual Studio 16 2019", "configurationType": "Release", "inheritEnvironments": [ "msvc_x64_x64" ], "buildRoot": "${thisFileDir}\\build\\${name}", diff --git a/Builds/VisualStudio2017/README.md b/Builds/VisualStudio2019/README.md similarity index 86% rename from Builds/VisualStudio2017/README.md rename to Builds/VisualStudio2019/README.md index ec8fb1c0881..e369eac6775 100644 --- a/Builds/VisualStudio2017/README.md +++ b/Builds/VisualStudio2019/README.md @@ -1,4 +1,4 @@ -# Visual Studio 2017 Build Instructions +# Visual Studio 2019 Build Instructions ## Important @@ -14,26 +14,26 @@ need these software components | Component | Minimum Recommended Version | |-----------|-----------------------| -| [Visual Studio 2017](README.md#install-visual-studio-2017)| 15.5.4 | +| [Visual Studio 2019](README.md#install-visual-studio-2019)| 15.5.4 | | [Git for Windows](README.md#install-git-for-windows)| 2.16.1 | | [OpenSSL Library](README.md#install-openssl) | 1.1.1L | | [Boost library](README.md#build-boost) | 1.70.0 | | [CMake for Windows](README.md#optional-install-cmake-for-windows)* | 3.12 | -\* Only needed if not using the integrated CMake in VS 2017 and prefer generating dedicated project/solution files. +\* Only needed if not using the integrated CMake in VS 2019 and prefer generating dedicated project/solution files. ## Install Software -### Install Visual Studio 2017 +### Install Visual Studio 2019 If not already installed on your system, download your choice of installer from -the [Visual Studio 2017 +the [Visual Studio 2019 Download](https://www.visualstudio.com/downloads/download-visual-studio-vs) page, run the installer, and follow the directions. **You may need to choose the `Desktop development with C++` workload to install all necessary C++ features.** -Any version of Visual Studio 2017 may be used to build rippled. The **Visual -Studio 2017 Community** edition is available free of charge (see [the product +Any version of Visual Studio 2019 may be used to build rippled. The **Visual +Studio 2019 Community** edition is available free of charge (see [the product page](https://www.visualstudio.com/products/visual-studio-community-vs) for licensing details), while paid editions may be used for an initial free-trial period. @@ -55,7 +55,7 @@ OpenSSL.](http://slproweb.com/products/Win32OpenSSL.html) There will several `Win64` bit variants available, you want the non-light `v1.1` line. As of this writing, you **should** select -* Win64 OpenSSL v1.1.1L +* Win64 OpenSSL v1.1.1q and should **not** select @@ -82,11 +82,11 @@ to get the correct 32-/64-bit variant. Boost 1.70 or later is required. -After [downloading boost](http://www.boost.org/users/download/) and unpacking it -to `c:\lib`. As of this writing, the most recent version of boost is 1.70.0, -which will unpack into a directory named `boost_1_70_0`. We recommended either +[Download boost](http://www.boost.org/users/download/) and unpack it +to `c:\lib`. As of this writing, the most recent version of boost is 1.80.0, +which will unpack into a directory named `boost_1_80_0`. We recommended either renaming this directory to `boost`, or creating a junction link `mklink /J boost -boost_1_70_0`, so that you can more easily switch between versions. +boost_1_80_0`, so that you can more easily switch between versions. Next, open **Developer Command Prompt** and type the following commands @@ -101,11 +101,11 @@ is not affected by changes in outside files. Therefore, it is necessary to build the required boost static libraries using this command: ```powershell -bjam -j --toolset=msvc-14.1 address-model=64 architecture=x86 link=static threading=multi runtime-link=shared,static stage +b2 -j --toolset=msvc-14.2 address-model=64 architecture=x86 link=static threading=multi runtime-link=shared,static stage ``` where you should replace `` with the number of parallel -invocations to use build, e.g. `bjam -j4 ...` would use up to 4 concurrent build +invocations to use build, e.g. `bjam -j8 ...` would use up to 8 concurrent build shell commands for the build. Building the boost libraries may take considerable time. When the build process @@ -115,7 +115,7 @@ library paths as they will be required later. ### (Optional) Install CMake for Windows [CMake](http://cmake.org) is a cross platform build system generator. Visual -Studio 2017 includes an integrated version of CMake that avoids having to +Studio 2019 includes an integrated version of CMake that avoids having to manually run CMake, but it is undergoing continuous improvement. Users that prefer to use standard Visual Studio project and solution files need to install a dedicated version of CMake to generate them. The latest version can be found @@ -141,7 +141,7 @@ repository and optionally switch to the *master* branch. Type the following at the bash prompt: ```powershell -git clone git@github.com:ripple/rippled.git +git clone git@github.com:XRPLF/rippled.git cd rippled ``` If you receive an error about not having the "correct access rights" make sure @@ -160,7 +160,7 @@ To test the latest release candidate, choose the `release` branch. git checkout release ``` -If you are doing development work and want the latest set of untested features, +If you are doing development work and want the latest set of beta features, you can consider using the `develop` branch instead. ``` @@ -177,14 +177,14 @@ To begin, simply: cloned rippled folder. 2. Right-click on `CMakeLists.txt` in the **Solution Explorer - Folder View** to generate a `CMakeSettings.json` file. A sample settings file is provided - [here](/Builds/VisualStudio2017/CMakeSettings-example.json). Customize the + [here](/Builds/VisualStudio2019/CMakeSettings-example.json). Customize the settings for `BOOST_ROOT`, `OPENSSL_ROOT` to match the install paths if they differ from those in the file. 4. Select either the `x64-Release` or `x64-Debug` configuration from the - **Project Setings** drop-down. This should invoke the built-in CMake project + **Project Settings** drop-down. This should invoke the built-in CMake project generator. If not, you can right-click on the `CMakeLists.txt` file and - choose **Cache | Generate Cache**. -5. Select either the `rippled.exe` (unity) or `rippled_classic.exe` (non-unity) + choose **Configure rippled**. +5. Select the `rippled.exe` option in the **Select Startup Item** drop-down. This will be the target built when you press F7. Alternatively, you can choose a target to build from the top-level **CMake | Build** menu. Note that at this time, there are other @@ -216,9 +216,9 @@ execute the following commands within your `rippled` cloned repository: ``` mkdir build\cmake cd build\cmake -cmake ..\.. -G"Visual Studio 15 2017 Win64" -DBOOST_ROOT="C:\lib\boost_1_70_0" -DOPENSSL_ROOT="C:\lib\OpenSSL-Win64" -DCMAKE_GENERATOR_TOOLSET=host=x64 +cmake ..\.. -G"Visual Studio 16 2019" -Ax64 -DBOOST_ROOT="C:\lib\boost" -DOPENSSL_ROOT="C:\lib\OpenSSL-Win64" -DCMAKE_GENERATOR_TOOLSET=host=x64 ``` -Now launch Visual Studio 2017 and select **File | Open | Project/Solution**. +Now launch Visual Studio 2019 and select **File | Open | Project/Solution**. Navigate to the `build\cmake` folder created above and select the `rippled.sln` file. You can then choose whether to build the `Debug` or `Release` solution configuration. diff --git a/src/ripple/beast/container/detail/aged_ordered_container.h b/src/ripple/beast/container/detail/aged_ordered_container.h index 9da5f20a0a4..23534a26bb3 100644 --- a/src/ripple/beast/container/detail/aged_ordered_container.h +++ b/src/ripple/beast/container/detail/aged_ordered_container.h @@ -1432,7 +1432,12 @@ template < class Allocator> aged_ordered_container:: aged_ordered_container(aged_ordered_container const& other) - : m_config(other.m_config), m_cont(other.m_cont.comp()) + : m_config(other.m_config) +#if BOOST_VERSION >= 108000 + , m_cont(other.m_cont.get_comp()) +#else + , m_cont(other.m_cont.comp()) +#endif { insert(other.cbegin(), other.cend()); } @@ -1449,7 +1454,12 @@ aged_ordered_container:: aged_ordered_container( aged_ordered_container const& other, Allocator const& alloc) - : m_config(other.m_config, alloc), m_cont(other.m_cont.comp()) + : m_config(other.m_config, alloc) +#if BOOST_VERSION >= 108000 + , m_cont(other.m_cont.get_comp()) +#else + , m_cont(other.m_cont.comp()) +#endif { insert(other.cbegin(), other.cend()); } @@ -1482,7 +1492,12 @@ aged_ordered_container:: aged_ordered_container&& other, Allocator const& alloc) : m_config(std::move(other.m_config), alloc) +#if BOOST_VERSION >= 108000 + , m_cont(std::move(other.m_cont.get_comp())) +#else , m_cont(std::move(other.m_cont.comp())) +#endif + { insert(other.cbegin(), other.cend()); other.clear(); From 23ce4318768b718c82e01004d23f1abc9a9549ff Mon Sep 17 00:00:00 2001 From: Alexander Kremer Date: Fri, 9 Sep 2022 20:49:42 +0100 Subject: [PATCH 052/508] Remove const_cast usage --- src/ripple/rpc/BookChanges.h | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/ripple/rpc/BookChanges.h b/src/ripple/rpc/BookChanges.h index 5aef1c693dd..7d7978d3fe2 100644 --- a/src/ripple/rpc/BookChanges.h +++ b/src/ripple/rpc/BookChanges.h @@ -88,13 +88,10 @@ computeBookChanges(std::shared_ptr const& lpAccepted) !node.isFieldPresent(sfPreviousFields)) continue; - STObject& finalFields = (const_cast(node)) - .getField(sfFinalFields) - .downcast(); - - STObject& previousFields = (const_cast(node)) - .getField(sfPreviousFields) - .downcast(); + auto const& ffBase = node.peekAtField(sfFinalFields); + auto const& finalFields = ffBase.template downcast(); + auto const& pfBase = node.peekAtField(sfPreviousFields); + auto const& previousFields = pfBase.template downcast(); // defensive case that should never be hit if (!finalFields.isFieldPresent(sfTakerGets) || From 95fabd5762a4917753c06268192e4d4e4baef8e4 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S Date: Wed, 17 Aug 2022 15:45:11 -0700 Subject: [PATCH 053/508] Remove inacessible code paths and outdated data format wchar_t --- src/ripple/overlay/impl/PeerImp.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index 5962ab0dfcd..8e014a10f73 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -642,11 +642,6 @@ PeerImp::gracefulClose() assert(socket_.is_open()); assert(!gracefulClose_); gracefulClose_ = true; -#if 0 - // Flush messages - while(send_queue_.size() > 1) - send_queue_.pop_back(); -#endif if (send_queue_.size() > 0) return; setTimer(); From 28f4cc7817c2e477f0d7e9ade8f07a45ff2b81f1 Mon Sep 17 00:00:00 2001 From: CJ Cobb Date: Wed, 10 Aug 2022 15:44:59 -0400 Subject: [PATCH 054/508] Remove gRPC code previously used for the xpring SDK --- Builds/CMake/RippledCore.cmake | 5 - Builds/levelization/results/ordering.txt | 1 - cfg/rippled-example.cfg | 8 +- src/ripple/app/main/GRPCServer.cpp | 88 - src/ripple/app/reporting/P2pProxy.h | 2 - .../proto/org/xrpl/rpc/v1/account.proto | 14 - src/ripple/proto/org/xrpl/rpc/v1/amount.proto | 48 - src/ripple/proto/org/xrpl/rpc/v1/common.proto | 606 ----- .../org/xrpl/rpc/v1/get_account_info.proto | 93 - .../v1/get_account_transaction_history.proto | 75 - .../proto/org/xrpl/rpc/v1/get_fee.proto | 58 - .../org/xrpl/rpc/v1/get_transaction.proto | 62 - src/ripple/proto/org/xrpl/rpc/v1/ledger.proto | 10 - .../org/xrpl/rpc/v1/ledger_objects.proto | 418 ---- src/ripple/proto/org/xrpl/rpc/v1/meta.proto | 116 - src/ripple/proto/org/xrpl/rpc/v1/submit.proto | 37 - .../proto/org/xrpl/rpc/v1/transaction.proto | 390 --- .../proto/org/xrpl/rpc/v1/xrp_ledger.proto | 29 +- src/ripple/rpc/DeliveredAmount.h | 1 - src/ripple/rpc/GRPCHandlers.h | 22 - src/ripple/rpc/handlers/AccountInfo.cpp | 92 - src/ripple/rpc/handlers/AccountTx.cpp | 244 -- src/ripple/rpc/handlers/Fee1.cpp | 45 - src/ripple/rpc/handlers/LedgerData.cpp | 1 - src/ripple/rpc/handlers/LedgerEntry.cpp | 1 - src/ripple/rpc/handlers/LedgerHandler.cpp | 1 - src/ripple/rpc/handlers/Submit.cpp | 99 - src/ripple/rpc/handlers/Tx.cpp | 140 -- src/ripple/rpc/impl/GRPCHelpers.cpp | 2192 ----------------- src/ripple/rpc/impl/GRPCHelpers.h | 90 - src/ripple/rpc/impl/RPCHelpers.cpp | 7 - src/test/app/AccountTxPaging_test.cpp | 1895 -------------- src/test/consensus/NegativeUNL_test.cpp | 110 - src/test/protocol/KnownFormatToGRPC_test.cpp | 975 -------- src/test/rpc/AccountInfo_test.cpp | 224 -- src/test/rpc/Fee_test.cpp | 138 -- src/test/rpc/ReportingETL_test.cpp | 31 - src/test/rpc/Submit_test.cpp | 276 --- src/test/rpc/Tx_test.cpp | 829 ------- 39 files changed, 8 insertions(+), 9465 deletions(-) delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/account.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/amount.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/common.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/get_fee.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/meta.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/submit.proto delete mode 100644 src/ripple/proto/org/xrpl/rpc/v1/transaction.proto delete mode 100644 src/ripple/rpc/impl/GRPCHelpers.cpp delete mode 100644 src/ripple/rpc/impl/GRPCHelpers.h delete mode 100644 src/test/protocol/KnownFormatToGRPC_test.cpp delete mode 100644 src/test/rpc/Fee_test.cpp delete mode 100644 src/test/rpc/Submit_test.cpp delete mode 100644 src/test/rpc/Tx_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 041044b86df..82a57995a4c 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -639,7 +639,6 @@ target_sources (rippled PRIVATE src/ripple/rpc/handlers/WalletPropose.cpp src/ripple/rpc/impl/DeliveredAmount.cpp src/ripple/rpc/impl/Handler.cpp - src/ripple/rpc/impl/GRPCHelpers.cpp src/ripple/rpc/impl/LegacyPathFind.cpp src/ripple/rpc/impl/RPCHandler.cpp src/ripple/rpc/impl/RPCHelpers.cpp @@ -909,7 +908,6 @@ if (tests) src/test/protocol/BuildInfo_test.cpp src/test/protocol/InnerObjectFormats_test.cpp src/test/protocol/Issue_test.cpp - src/test/protocol/KnownFormatToGRPC_test.cpp src/test/protocol/Hooks_test.cpp src/test/protocol/PublicKey_test.cpp src/test/protocol/Quality_test.cpp @@ -944,7 +942,6 @@ if (tests) src/test/rpc/DepositAuthorized_test.cpp src/test/rpc/DeliveredAmount_test.cpp src/test/rpc/Feature_test.cpp - src/test/rpc/Fee_test.cpp src/test/rpc/GatewayBalances_test.cpp src/test/rpc/GetCounts_test.cpp src/test/rpc/JSONRPC_test.cpp @@ -967,12 +964,10 @@ if (tests) src/test/rpc/ServerInfo_test.cpp src/test/rpc/ShardArchiveHandler_test.cpp src/test/rpc/Status_test.cpp - src/test/rpc/Submit_test.cpp src/test/rpc/Subscribe_test.cpp src/test/rpc/Transaction_test.cpp src/test/rpc/TransactionEntry_test.cpp src/test/rpc/TransactionHistory_test.cpp - src/test/rpc/Tx_test.cpp src/test/rpc/ValidatorInfo_test.cpp src/test/rpc/ValidatorRPC_test.cpp src/test/rpc/Version_test.cpp diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index ed6b4e57c3e..401040fc2d7 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -113,7 +113,6 @@ test.consensus > ripple.basics test.consensus > ripple.beast test.consensus > ripple.consensus test.consensus > ripple.ledger -test.consensus > ripple.rpc test.consensus > test.csf test.consensus > test.toplevel test.consensus > test.unit_test diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index 83a9353aa4d..fda1671e416 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -1629,10 +1629,10 @@ ip = 127.0.0.1 admin = 127.0.0.1 protocol = ws -#[port_grpc] -#port = 50051 -#ip = 0.0.0.0 -#secure_gateway = 127.0.0.1 +[port_grpc] +port = 50051 +ip = 127.0.0.1 +secure_gateway = 127.0.0.1 #[port_ws_public] #port = 6005 diff --git a/src/ripple/app/main/GRPCServer.cpp b/src/ripple/app/main/GRPCServer.cpp index aef2612c0ef..fdef8c1cec8 100644 --- a/src/ripple/app/main/GRPCServer.cpp +++ b/src/ripple/app/main/GRPCServer.cpp @@ -590,94 +590,6 @@ GRPCServerImpl::setupListeners() requests.push_back(std::move(callData)); }; - { - using cd = CallData< - org::xrpl::rpc::v1::GetFeeRequest, - org::xrpl::rpc::v1::GetFeeResponse>; - - addToRequests(std::make_shared( - service_, - *cq_, - app_, - &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService:: - RequestGetFee, - doFeeGrpc, - &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::GetFee, - RPC::NEEDS_CURRENT_LEDGER, - Resource::feeReferenceRPC, - secureGatewayIPs_)); - } - { - using cd = CallData< - org::xrpl::rpc::v1::GetAccountInfoRequest, - org::xrpl::rpc::v1::GetAccountInfoResponse>; - - addToRequests(std::make_shared( - service_, - *cq_, - app_, - &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService:: - RequestGetAccountInfo, - doAccountInfoGrpc, - &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::GetAccountInfo, - RPC::NO_CONDITION, - Resource::feeReferenceRPC, - secureGatewayIPs_)); - } - { - using cd = CallData< - org::xrpl::rpc::v1::GetTransactionRequest, - org::xrpl::rpc::v1::GetTransactionResponse>; - - addToRequests(std::make_shared( - service_, - *cq_, - app_, - &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService:: - RequestGetTransaction, - doTxGrpc, - &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::GetTransaction, - RPC::NEEDS_NETWORK_CONNECTION, - Resource::feeReferenceRPC, - secureGatewayIPs_)); - } - { - using cd = CallData< - org::xrpl::rpc::v1::SubmitTransactionRequest, - org::xrpl::rpc::v1::SubmitTransactionResponse>; - - addToRequests(std::make_shared( - service_, - *cq_, - app_, - &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService:: - RequestSubmitTransaction, - doSubmitGrpc, - &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub::SubmitTransaction, - RPC::NEEDS_CURRENT_LEDGER, - Resource::feeMediumBurdenRPC, - secureGatewayIPs_)); - } - - { - using cd = CallData< - org::xrpl::rpc::v1::GetAccountTransactionHistoryRequest, - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse>; - - addToRequests(std::make_shared( - service_, - *cq_, - app_, - &org::xrpl::rpc::v1::XRPLedgerAPIService::AsyncService:: - RequestGetAccountTransactionHistory, - doAccountTxGrpc, - &org::xrpl::rpc::v1::XRPLedgerAPIService::Stub:: - GetAccountTransactionHistory, - RPC::NO_CONDITION, - Resource::feeMediumBurdenRPC, - secureGatewayIPs_)); - } - { using cd = CallData< org::xrpl::rpc::v1::GetLedgerRequest, diff --git a/src/ripple/app/reporting/P2pProxy.h b/src/ripple/app/reporting/P2pProxy.h index a3984018ee2..92cc508a1a0 100644 --- a/src/ripple/app/reporting/P2pProxy.h +++ b/src/ripple/app/reporting/P2pProxy.h @@ -48,8 +48,6 @@ needCurrentOrClosed(Request& request) { // These are the only gRPC requests that specify a ledger if constexpr ( - std::is_same:: - value || std::is_same::value || std::is_same:: value || diff --git a/src/ripple/proto/org/xrpl/rpc/v1/account.proto b/src/ripple/proto/org/xrpl/rpc/v1/account.proto deleted file mode 100644 index 40e9a7a4f43..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/account.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -// A representation of an account address -// Next field: 2 -message AccountAddress -{ - // base58 encoding of an account - string address = 1; -} - diff --git a/src/ripple/proto/org/xrpl/rpc/v1/amount.proto b/src/ripple/proto/org/xrpl/rpc/v1/amount.proto deleted file mode 100644 index 64ea2f6416f..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/amount.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/account.proto"; - -// Next field: 3 -message CurrencyAmount -{ - oneof amount - { - XRPDropsAmount xrp_amount = 1; - IssuedCurrencyAmount issued_currency_amount = 2; - } -} - -// A representation of an amount of XRP. -// Next field: 2 -message XRPDropsAmount -{ - uint64 drops = 1 [jstype=JS_STRING]; -} - -// A representation of an amount of issued currency. -// Next field: 4 -message IssuedCurrencyAmount -{ - // The currency used to value the amount. - Currency currency = 1; - - // The value of the amount. 8 bytes - string value = 2; - - // Unique account address of the entity issuing the currency. - AccountAddress issuer = 3; -} - -// Next field: 3 -message Currency -{ - // 3 character ASCII code - string name = 1; - - // 160 bit currency code. 20 bytes - bytes code = 2; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/common.proto b/src/ripple/proto/org/xrpl/rpc/v1/common.proto deleted file mode 100644 index fd514cbacee..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/common.proto +++ /dev/null @@ -1,606 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/amount.proto"; -import "org/xrpl/rpc/v1/account.proto"; - -// These fields are used in many different message types. They can be present -// in one or more transactions, as well as metadata of one or more transactions. -// Each is defined as its own message type with a single field "value", to -// ensure the field is the correct type everywhere it's used - - -// *** Messages wrapping uint32 *** - -message BurnedNFTokens -{ - uint32 value = 1; -} - -message CancelAfter -{ - // time in seconds since Ripple epoch - uint32 value = 1; -} - -message ClearFlag -{ - uint32 value = 1; -} - -message CloseTime -{ - // time in seconds since Ripple epoch - uint32 value = 1; -} - -message Date -{ - // time in seconds since Ripple epoch - uint32 value = 1; -} - -message DestinationTag -{ - uint32 value = 1; -} - -message Expiration -{ - // time in seconds since Ripple epoch - uint32 value = 1; -} - -message FinishAfter -{ - // time in seconds since Ripple epoch - uint32 value = 1; -} - -message Flags -{ - uint32 value = 1; -} - -message HighQualityIn -{ - uint32 value = 1; -} - -message HighQualityOut -{ - uint32 value = 1; -} - -message FirstLedgerSequence -{ - uint32 value = 1; -} - -message LastLedgerSequence -{ - uint32 value = 1; -} - -message LowQualityIn -{ - uint32 value = 1; -} - -message LowQualityOut -{ - uint32 value = 1; -} - -message MintedNFTokens -{ - uint32 value = 1; -} - -message OfferSequence -{ - uint32 value = 1; -} - -message OwnerCount -{ - uint32 value = 1; -} - -message PreviousTransactionLedgerSequence -{ - uint32 value = 1; -} - -message QualityIn -{ - uint32 value = 1; -} - -message QualityOut -{ - uint32 value = 1; -} - -message ReferenceFeeUnits -{ - uint32 value = 1; -} - -message ReserveBase -{ - // in drops - uint32 value = 1; -} - -message ReserveIncrement -{ - // in drops - uint32 value = 1; -} - -message Sequence -{ - uint32 value = 1; -} - -message SetFlag -{ - uint32 value = 1; -} - -message SettleDelay -{ - uint32 value = 1; -} - -message SignerListID -{ - uint32 value = 1; -} - -message SignerQuorum -{ - uint32 value = 1; -} - -message SignerWeight -{ - // is actually uint16 - uint32 value = 1; -} - -message SourceTag -{ - uint32 value = 1; -} - -message TickSize -{ - // is actually uint8 - uint32 value = 1; -} - -message Ticket -{ - uint32 value = 1; -} - -message TicketCount -{ - uint32 value = 1; -} - -message TicketSequence -{ - uint32 value = 1; -} - -message NFTokenTaxon -{ - uint32 value = 1; -} - -message TransferFee -{ - // is actually uint16 - uint32 value = 1; -} - -message TransferRate -{ - uint32 value = 1; -} - - -// *** Messages wrapping uint64 *** - -message BaseFee -{ - // in drops - uint64 value = 1 [jstype=JS_STRING]; -} - -message BookNode -{ - uint64 value = 1 [jstype=JS_STRING]; -} - -message DestinationNode -{ - uint64 value = 1 [jstype=JS_STRING]; -} - -message HighNode -{ - uint64 value = 1 [jstype=JS_STRING]; -} - -message IndexNext -{ - uint64 value = 1 [jstype=JS_STRING]; -} - -message IndexPrevious -{ - uint64 value = 1 [jstype=JS_STRING]; -} - -message LowNode -{ - uint64 value = 1 [jstype=JS_STRING]; -} - -message NFTokenOfferNode -{ - uint64 value = 1 [jstype=JS_STRING]; -} - -message OwnerNode -{ - uint64 value = 1 [jstype=JS_STRING]; -} - - -// *** Messages wrapping 16 bytes *** - -message EmailHash -{ - bytes value = 1; -} - -message NFTokenID -{ - bytes value = 1; -} - - -// *** Messages wrapping 20 bytes *** - -message TakerGetsIssuer -{ - // 20 bytes - bytes value = 1; -} - -message TakerPaysIssuer -{ - // 20 bytes - bytes value = 1; -} - - -// *** Messages wrapping 32 bytes *** - -message AccountTransactionID -{ - // 32 bytes - bytes value = 1; -} - -message BookDirectory -{ - // 32 btes - bytes value = 1; -} - -message Channel -{ - // 32 bytes - bytes value = 1; -} - -message CheckID -{ - // 32 bytes - bytes value = 1; -} - -message Hash -{ - // 32 bytes - bytes value = 1; -} - -message Index -{ - // 32 bytes - bytes value = 1; -} - -message InvoiceID -{ - // 32 bytes - bytes value = 1; -} - -message NextPageMin -{ - // 32 bytes - bytes value = 1; -} - -message NFTokenBuyOffer -{ - // 32 bytes - bytes value = 1; -} - -message NFTokenSellOffer -{ - // 32 bytes - bytes value = 1; -} - -message PreviousPageMin -{ - // 32 bytes - bytes value = 1; -} - -message PreviousTransactionID -{ - // 32 bytes - bytes value = 1; -} - -message RootIndex -{ - // 32 bytes - bytes value = 1; -} - -message WalletLocator -{ - // 32 bytes - bytes value = 1; -} - - -// *** Messages wrapping variable length byte arrays *** - -message Condition -{ - bytes value = 1; -} - -message Fulfillment -{ - bytes value = 1; -} - -message MemoData -{ - bytes value = 1; -} - -message MemoFormat -{ - bytes value = 1; -} - -message MemoType -{ - bytes value = 1; -} - -message MessageKey -{ - bytes value = 1; -} - -message PublicKey -{ - bytes value = 1; -} - -message PaymentChannelSignature -{ - bytes value = 1; -} - -message SigningPublicKey -{ - bytes value = 1; -} - -message TransactionSignature -{ - bytes value = 1; -} - -message ValidatorToDisable -{ - bytes value = 1; -} - -message ValidatorToReEnable -{ - bytes value = 1; -} - -// *** Messages wrapping a Currency value *** -// -// TODO: if there's a V2 of the API, fix this misspelling. -message TakerGetsCurreny -{ - Currency value = 1; -} - -message TakerPaysCurrency -{ - Currency value = 1; -} - - -// *** Messages wrapping a CurrencyAmount *** - -message Amount -{ - // Note, CurrencyAmount is a oneof, that can represent an XRP drops amount - // or an Issued Currency amount. However, in some transaction types/ledger - // objects, this value can only be in drops. For instance, the Amount field - // of a Payment transaction can be specified in XRP drops or an Issued - // Currency amount, but the Amount field of a PaymentChannelClaim - // transaction can only be an XRP drops amount. - CurrencyAmount value = 1; -} - -message Balance -{ - CurrencyAmount value = 1; -} - -message NFTokenBrokerFee -{ - CurrencyAmount value = 1; -} - -message DeliverMin -{ - CurrencyAmount value = 1; -} - -message DeliveredAmount -{ - CurrencyAmount value = 1; -} - -message HighLimit -{ - CurrencyAmount value = 1; -} - -message LimitAmount -{ - CurrencyAmount value = 1; -} - -message LowLimit -{ - CurrencyAmount value = 1; -} - -message SendMax -{ - CurrencyAmount value = 1; -} - -message TakerGets -{ - CurrencyAmount value = 1; -} - -message TakerPays -{ - CurrencyAmount value = 1; -} - - -// *** Messages wrapping an AccountAddress *** - -message Account -{ - AccountAddress value = 1; -} - -message Authorize -{ - AccountAddress value = 1; -} - -message Destination -{ - AccountAddress value = 1; -} - -message Issuer -{ - AccountAddress value = 1; -} - -message NFTokenMinter -{ - AccountAddress value = 1; -} - -message Owner -{ - AccountAddress value = 1; -} - -message RegularKey -{ - AccountAddress value = 1; -} - -message Unauthorize -{ - AccountAddress value = 1; -} - - -// *** Messages wrapping a string *** - -message Domain -{ - string value = 1; -} - -message URI -{ - string value = 1; -} - - -// *** Aggregate type messages - -// Next field: 3 -message NFToken -{ - NFTokenID nftoken_id = 1; - - URI uri = 2; -} - -// Next field: 3 -message SignerEntry -{ - Account account = 1; - - SignerWeight signer_weight = 2; - - WalletLocator wallet_locator = 3; -} - -// Next field: 3 -message DisabledValidator -{ - PublicKey public_key = 1; - - FirstLedgerSequence ledger_sequence = 2; -} - diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto deleted file mode 100644 index bc23a8c66d4..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_account_info.proto +++ /dev/null @@ -1,93 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/ledger_objects.proto"; -import "org/xrpl/rpc/v1/amount.proto"; -import "org/xrpl/rpc/v1/account.proto"; -import "org/xrpl/rpc/v1/ledger.proto"; -import "org/xrpl/rpc/v1/common.proto"; - -// A request to get info about an account. -// Next field: 6 -message GetAccountInfoRequest -{ - // The address to get info about. - AccountAddress account = 1; - - bool strict = 2; - - // Which ledger to use to retrieve data. - // If this field is not set, the server will use the open ledger. - // The open ledger includes data that is not validated or final. - // To retrieve the most up to date and validated data, use - // SHORTCUT_VALIDATED - LedgerSpecifier ledger = 3; - - bool queue = 4; - - bool signer_lists = 5; - - string client_ip = 6; -} - -// Response to GetAccountInfo RPC -// Next field: 6 -message GetAccountInfoResponse -{ - AccountRoot account_data = 1; - - SignerList signer_list = 2; - - uint32 ledger_index = 3; - - QueueData queue_data = 4; - - bool validated = 5; -} - -// Aggregate data about queued transactions -// Next field: 11 -message QueueData -{ - uint32 txn_count = 1; - - bool auth_change_queued = 2; - - uint32 lowest_sequence = 3; - - uint32 highest_sequence = 4; - - XRPDropsAmount max_spend_drops_total = 5; - - repeated QueuedTransaction transactions = 6; - - uint32 lowest_ticket = 7; - - uint32 highest_ticket = 8; - - uint32 sequence_count = 9; - - uint32 ticket_count = 10; -} - -// Data about a single queued transaction -// Next field: 8 -message QueuedTransaction -{ - bool auth_change = 1; - - XRPDropsAmount fee = 2; - - uint64 fee_level = 3 [jstype=JS_STRING]; - - XRPDropsAmount max_spend_drops = 4; - - Sequence sequence = 5; - - LastLedgerSequence last_ledger_sequence = 6; - - Ticket ticket = 7; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto deleted file mode 100644 index c4889a6bdbe..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_account_transaction_history.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; - -import "org/xrpl/rpc/v1/get_transaction.proto"; -import "org/xrpl/rpc/v1/account.proto"; -import "org/xrpl/rpc/v1/ledger.proto"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -// Next field: 8 -message GetAccountTransactionHistoryRequest -{ - AccountAddress account = 1; - - // What ledger to include results from. Specifying a not yet validated - // ledger results in an error. Not specifying a ledger uses the entire - // range of validated ledgers available to the server. - // Note, this parameter acts as a filter, and can only reduce the number of - // results. Specifying a single ledger will return only transactions from - // that ledger. This includes specifying a ledger with a Shortcut. For - // example, specifying SHORTCUT_VALIDATED will result in only transactions - // that were part of the most recently validated ledger being returned. - // Specifying a range of ledgers results in only transactions that were - // included in a ledger within the specified range being returned. - oneof ledger - { - LedgerSpecifier ledger_specifier = 2; - LedgerRange ledger_range = 3; - }; - - // Return results as binary blobs. Defaults to false. - bool binary = 4; - - // If set to true, returns values indexed by older ledger first. - // Default to false. - bool forward = 5; - - // Limit the number of results. Server may choose a lower limit. - // If this value is 0, the limit is ignored and the number of results - // returned is determined by the server - uint32 limit = 6; - - // Marker to resume where previous request left off - // Used for pagination - Marker marker = 7; -} - - -// Next field: 8 -message GetAccountTransactionHistoryResponse -{ - AccountAddress account = 1; - - uint32 ledger_index_min = 2; - - uint32 ledger_index_max = 3; - - uint32 limit = 4; - - Marker marker = 5; - - repeated GetTransactionResponse transactions = 6; - - bool validated = 7; -} - -// Next field: 3 -message Marker -{ - uint32 ledger_index = 1; - - uint32 account_sequence = 2; -} - diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_fee.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_fee.proto deleted file mode 100644 index aec3f24133c..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_fee.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/amount.proto"; - -// A request for the current transaction fee on the ledger. -// Next field: 1 -message GetFeeRequest -{ - string client_ip = 1; -} - -// Response to a GetFee RPC -// Next field: 8 -message GetFeeResponse -{ - uint64 current_ledger_size = 1 [jstype=JS_STRING]; - - uint64 current_queue_size = 2 [jstype=JS_STRING]; - - Fee fee = 3; - - uint64 expected_ledger_size = 4 [jstype=JS_STRING]; - - uint32 ledger_current_index = 5; - - FeeLevels levels = 6; - - uint64 max_queue_size = 7 [jstype=JS_STRING]; - -} - -// Next field: 5 -message Fee -{ - XRPDropsAmount base_fee = 1; - - XRPDropsAmount median_fee = 2; - - XRPDropsAmount minimum_fee = 3; - - XRPDropsAmount open_ledger_fee = 4; -} - -// Next field: 5 -message FeeLevels -{ - uint64 median_level = 1 [jstype=JS_STRING]; - - uint64 minimum_level = 2 [jstype=JS_STRING]; - - uint64 open_ledger_level = 3 [jstype=JS_STRING]; - - uint64 reference_level = 4 [jstype=JS_STRING]; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto b/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto deleted file mode 100644 index 93ba48a11ec..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/get_transaction.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/meta.proto"; -import "org/xrpl/rpc/v1/ledger.proto"; -import "org/xrpl/rpc/v1/transaction.proto"; -import "org/xrpl/rpc/v1/common.proto"; - -// Next field: 4 -message GetTransactionRequest { - - // hash of the transaction. 32 bytes - // ATTN: this is in binary, not hex. The JSON API accepts a hex string for - // a transaction hash, but here we need that hex string converted into its - // binary form. Each pair of hex characters should be converted into its - // corresponding byte. For example, the 4 character hex string "00FF" - // should be converted to a 2 byte array: [0, 255] - bytes hash = 1; - - // if true, return data in binary format. defaults to false - bool binary = 2; - - // If the transaction was not found, server will report whether the entire - // specified range was searched. The value is contained in the error message. - // The error message is of the form: - // "txn not found. searched_all = [true,false]" - // If the transaction was found, this parameter is ignored. - LedgerRange ledger_range = 3; - - string client_ip = 4; -} - -// Next field: 9 -message GetTransactionResponse { - - oneof serialized_transaction { - - Transaction transaction = 1; - // Variable length - bytes transaction_binary = 2; - }; - // Sequence number of ledger that contains this transaction - uint32 ledger_index = 3; - - // 32 bytes - bytes hash = 4; - - // whether the ledger has been validated - bool validated = 5; - - // metadata about the transaction - oneof serialized_meta { - Meta meta = 6; - // Variable length - bytes meta_binary = 7; - } - - Date date = 8; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/ledger.proto b/src/ripple/proto/org/xrpl/rpc/v1/ledger.proto index 78e4211f5f0..3bb199de22f 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/ledger.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/ledger.proto @@ -25,16 +25,6 @@ message LedgerSpecifier } } -// Next field: 3 -message LedgerRange -{ - uint32 ledger_index_min = 1; - - // Note, if ledger_index_min is non-zero and ledger_index_max is 0, the - // software will use the max validated ledger in place of ledger_index_max - uint32 ledger_index_max = 2; -}; - // Next field: 3 message RawLedgerObject diff --git a/src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto b/src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto deleted file mode 100644 index d6db469a213..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/ledger_objects.proto +++ /dev/null @@ -1,418 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/common.proto"; - -// Next field: 17 -message LedgerObject -{ - oneof object - { - AccountRoot account_root = 1; - Amendments amendments = 2; - Check check = 3; - DepositPreauthObject deposit_preauth = 4; - DirectoryNode directory_node = 5; - Escrow escrow = 6; - FeeSettings fee_settings = 7; - LedgerHashes ledger_hashes = 8; - NFTokenOffer nftoken_offer = 15; - NFTokenPage nftoken_page = 16; - Offer offer = 9; - PayChannel pay_channel = 10; - RippleState ripple_state = 11; - SignerList signer_list = 12; - NegativeUNL negative_unl = 13; - TicketObject ticket = 14; - } -} - -// Next field: 15 -enum LedgerEntryType -{ - LEDGER_ENTRY_TYPE_UNSPECIFIED = 0; - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT = 1; - LEDGER_ENTRY_TYPE_AMENDMENTS = 2; - LEDGER_ENTRY_TYPE_CHECK = 3; - LEDGER_ENTRY_TYPE_DEPOSIT_PREAUTH = 4; - LEDGER_ENTRY_TYPE_DIRECTORY_NODE = 5; - LEDGER_ENTRY_TYPE_ESCROW = 6; - LEDGER_ENTRY_TYPE_FEE_SETTINGS = 7; - LEDGER_ENTRY_TYPE_LEDGER_HASHES = 8; - LEDGER_ENTRY_TYPE_OFFER = 9; - LEDGER_ENTRY_TYPE_PAY_CHANNEL = 10; - LEDGER_ENTRY_TYPE_RIPPLE_STATE = 11; - LEDGER_ENTRY_TYPE_SIGNER_LIST = 12; - LEDGER_ENTRY_TYPE_NEGATIVE_UNL = 13; - LEDGER_ENTRY_TYPE_TICKET = 14; - LEDGER_ENTRY_TYPE_NFTOKEN_OFFER = 15; - LEDGER_ENTRY_TYPE_NFTOKEN_PAGE = 16; -} - -// Next field: 19 -message AccountRoot -{ - Account account = 1; - - Balance balance = 2; - - BurnedNFTokens burned_nftokens = 16; - - Sequence sequence = 3; - - Flags flags = 4; - - OwnerCount owner_count = 5; - - PreviousTransactionID previous_transaction_id = 6; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 7; - - AccountTransactionID account_transaction_id = 8; - - Domain domain = 9; - - EmailHash email_hash = 10; - - MessageKey message_key = 11; - - MintedNFTokens minted_nftokens = 17; - - NFTokenMinter nftoken_minter = 18; - - RegularKey regular_key = 12; - - TickSize tick_size = 13; - - TicketCount ticket_count = 15; - - TransferRate transfer_rate = 14; -} - -// Next field: 4 -message Amendments -{ - // Next field: 2 - message Amendment - { - // 32 bytes - bytes value = 1; - } - - // Next field: 3 - message Majority - { - Amendment amendment = 1; - - CloseTime close_time = 2; - } - - repeated Amendment amendments = 1; - - repeated Majority majorities = 2; - - Flags flags = 3; -} - -// Next field: 14 -message Check -{ - Account account = 1; - - Destination destination = 2; - - Flags flags = 3; - - OwnerNode owner_node = 4; - - PreviousTransactionID previous_transaction_id = 5; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 6; - - SendMax send_max = 7; - - Sequence sequence = 8; - - DestinationNode destination_node = 9; - - DestinationTag destination_tag = 10; - - Expiration expiration = 11; - - InvoiceID invoice_id = 12; - - SourceTag source_tag = 13; -} - -// Next field: 7 -message DepositPreauthObject -{ - Account account = 1; - - Authorize authorize = 2; - - Flags flags = 3; - - OwnerNode owner_node = 4; - - PreviousTransactionID previous_transaction_id = 5; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 6; -} - -// Next field: 12 -message DirectoryNode -{ - Flags flags = 1; - - RootIndex root_index = 2; - - repeated Index indexes = 3; - - IndexNext index_next = 4; - - IndexPrevious index_previous = 5; - - Owner owner = 6; - - TakerPaysCurrency taker_pays_currency = 7; - - TakerPaysIssuer taker_pays_issuer = 8; - - TakerGetsCurreny taker_gets_currency = 9; - - TakerGetsIssuer taker_gets_issuer = 10; - - NFTokenID nftoken_id = 11; -} - -// Next field: 14 -message Escrow -{ - Account account = 1; - - Destination destination = 2; - - Amount amount = 3; - - Condition condition = 4; - - CancelAfter cancel_after = 5; - - FinishAfter finish_after = 6; - - Flags flags = 7; - - SourceTag source_tag = 8; - - DestinationTag destination_tag = 9; - - OwnerNode owner_node = 10; - - DestinationNode destination_node = 11; - - PreviousTransactionID previous_transaction_id = 12; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 13; -} - -// Next field: 6 -message FeeSettings -{ - BaseFee base_fee = 1; - - ReferenceFeeUnits reference_fee_units = 2; - - ReserveBase reserve_base = 3; - - ReserveIncrement reserve_increment = 4; - - Flags flags = 5; -} - -// Next field: 4 -message LedgerHashes -{ - LastLedgerSequence last_ledger_sequence = 1; - - repeated Hash hashes = 2; - - Flags flags = 3; -} - -// Next field: 12 -message Offer -{ - Account account = 1; - - Sequence sequence = 2; - - Flags flags = 3; - - TakerPays taker_pays = 4; - - TakerGets taker_gets = 5; - - BookDirectory book_directory = 6; - - BookNode book_node = 7; - - OwnerNode owner_node = 8; - - Expiration expiration = 9; - - PreviousTransactionID previous_transaction_id = 10; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 11; -} - -// Next field: 11 -message NFTokenOffer -{ - Flags flags = 1; - - Owner owner = 2; - - NFTokenID nftoken_id = 3; - - Amount amount = 4; - - OwnerNode owner_node = 5; - - NFTokenOfferNode nftoken_offer_node = 6; - - Destination destination = 7; - - Expiration expiration = 8; - - PreviousTransactionID previous_transaction_id = 9; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 10; -} - -// Next field: 7 -message NFTokenPage -{ - Flags flags = 1; - - PreviousPageMin previous_page_min = 2; - - NextPageMin next_page_min = 3; - - repeated NFToken nftokens = 4; - - PreviousTransactionID previous_transaction_id = 5; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 6; -} - -// Next field: 13 -message PayChannel -{ - Account account = 1; - - Destination destination = 2; - - Amount amount = 3; - - Balance balance = 4; - - PublicKey public_key = 5; - - SettleDelay settle_delay = 6; - - OwnerNode owner_node = 7; - - PreviousTransactionID previous_transaction_id = 8; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 9; - - Flags flags = 10; - - Expiration expiration = 11; - - CancelAfter cancel_after = 12; - - SourceTag source_tag = 13; - - DestinationTag destination_tag = 14; - - DestinationNode destination_node = 15; -} - -// Next field: 13 -message RippleState -{ - Balance balance = 1; - - Flags flags = 2; - - LowLimit low_limit = 3; - - HighLimit high_limit = 4; - - LowNode low_node = 5; - - HighNode high_node = 6; - - LowQualityIn low_quality_in = 7; - - LowQualityOut low_quality_out = 8; - - HighQualityIn high_quality_in = 9; - - HighQualityOut high_quality_out = 10; - - PreviousTransactionID previous_transaction_id = 11; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 12; -} - -// Next field: 8 -message SignerList -{ - Flags flags = 1; - - PreviousTransactionID previous_transaction_id = 2; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 3; - - OwnerNode owner_node = 4; - - repeated SignerEntry signer_entries = 5; - - SignerListID signer_list_id = 6; - - SignerQuorum signer_quorum = 7; -} - -// Next field: 7 -message TicketObject -{ - Flags flags = 1; - - Account account = 2; - - OwnerNode owner_node = 3; - - PreviousTransactionID previous_transaction_id = 4; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 5; - - TicketSequence ticket_sequence = 6; -} - -// Next field: 5 -message NegativeUNL -{ - repeated DisabledValidator disabled_validators = 1; - - ValidatorToDisable validator_to_disable = 2; - - ValidatorToReEnable validator_to_re_enable = 3; - - Flags flags = 4; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/meta.proto b/src/ripple/proto/org/xrpl/rpc/v1/meta.proto deleted file mode 100644 index 35660b3ddc0..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/meta.proto +++ /dev/null @@ -1,116 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/ledger_objects.proto"; -import "org/xrpl/rpc/v1/common.proto"; - -message SubmitMetadataRequest -{ - - repeated AffectedNode affected_nodes = 1; - uint32 ledger_sequence = 2; -} - -message SubmitMetadataResponse -{ - bool success = 1; - - string msg = 2; -} - -message PrepareLedgerRequest -{ - uint32 ledger_index = 1; -} - -message PrepareLedgerResponse -{ - bool success = 1; - - string msg = 2; -} - -// Next field: 5 -message Meta -{ - // index in ledger - uint64 transaction_index = 1 [jstype=JS_STRING]; - - // result code indicating whether the transaction succeeded or failed - TransactionResult transaction_result = 2; - - repeated AffectedNode affected_nodes = 3; - - DeliveredAmount delivered_amount = 4; -} - -// Next field: 3 -message TransactionResult -{ - // Next field: 7 - enum ResultType - { - RESULT_TYPE_UNSPECIFIED = 0; - // Claimed cost only - RESULT_TYPE_TEC = 1; - // Failure - RESULT_TYPE_TEF = 2; - // Local error - RESULT_TYPE_TEL = 3; - // Malformed transaction - RESULT_TYPE_TEM = 4; - // Retry - RESULT_TYPE_TER = 5; - // Success - RESULT_TYPE_TES = 6; - } - - // category of the transaction result - ResultType result_type = 1; - - // full result string, i.e. tesSUCCESS - string result = 2; -} - -// Next field: 6 -message AffectedNode -{ - LedgerEntryType ledger_entry_type = 1; - - // 32 bytes - bytes ledger_index = 2; - - oneof node - { - CreatedNode created_node = 3; - DeletedNode deleted_node = 4; - ModifiedNode modified_node = 5; - } -} - -// Next field: 2 -message CreatedNode -{ - LedgerObject new_fields = 1; -} - -// Next field: 2 -message DeletedNode -{ - LedgerObject final_fields = 1; -} - -// Next field: 5 -message ModifiedNode { - - LedgerObject final_fields = 1; - - LedgerObject previous_fields = 2; - - PreviousTransactionID previous_transaction_id = 3; - - PreviousTransactionLedgerSequence previous_transaction_ledger_sequence = 4; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/submit.proto b/src/ripple/proto/org/xrpl/rpc/v1/submit.proto deleted file mode 100644 index f8b0a82708f..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/submit.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/meta.proto"; - -// A request to submit the signed transaction to the ledger. -// Next field: 3 -message SubmitTransactionRequest -{ - // The signed transaction to submit. - bytes signed_transaction = 1; - - bool fail_hard = 2; - - string client_ip = 3; -} - -// A response when a signed transaction is submitted to the ledger. -// Next field: 5 -message SubmitTransactionResponse -{ - // Code indicating the preliminary result of the transaction. - TransactionResult engine_result = 1; - - // Numeric code indicating the preliminary result of the transaction, - // directly correlated to engine_result. - int64 engine_result_code = 2; - - // Human-readable explanation of the transaction's preliminary result. - string engine_result_message = 3; - - // 32 bytes - bytes hash = 4; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/transaction.proto b/src/ripple/proto/org/xrpl/rpc/v1/transaction.proto deleted file mode 100644 index 05300422b1a..00000000000 --- a/src/ripple/proto/org/xrpl/rpc/v1/transaction.proto +++ /dev/null @@ -1,390 +0,0 @@ -syntax = "proto3"; - -package org.xrpl.rpc.v1; -option java_package = "org.xrpl.rpc.v1"; -option java_multiple_files = true; - -import "org/xrpl/rpc/v1/common.proto"; -import "org/xrpl/rpc/v1/amount.proto"; -import "org/xrpl/rpc/v1/account.proto"; - -// A message encompassing all transaction types -// Next field: 37 -message Transaction -{ - Account account = 1; - - XRPDropsAmount fee = 2; - - Sequence sequence = 3; - - // Data specific to the type of transaction - oneof transaction_data - { - Payment payment = 4; - - AccountSet account_set = 13; - - AccountDelete account_delete = 14; - - CheckCancel check_cancel = 15; - - CheckCash check_cash = 16; - - CheckCreate check_create = 17; - - DepositPreauth deposit_preauth = 18; - - EscrowCancel escrow_cancel = 19; - - EscrowCreate escrow_create = 20; - - EscrowFinish escrow_finish = 21; - - NFTokenAcceptOffer nftoken_accept_offer = 32; - - NFTokenBurn nftoken_burn = 33; - - NFTokenCancelOffer nftoken_cancel_offer = 34; - - NFTokenCreateOffer nftoken_create_offer = 35; - - NFTokenMint nftoken_mint = 36; - - OfferCancel offer_cancel = 22; - - OfferCreate offer_create = 23; - - PaymentChannelClaim payment_channel_claim = 24; - - PaymentChannelCreate payment_channel_create= 25; - - PaymentChannelFund payment_channel_fund = 26; - - SetRegularKey set_regular_key = 27; - - SignerListSet signer_list_set = 28; - - TicketCreate ticket_create = 30; - - TrustSet trust_set = 29; - } - - SigningPublicKey signing_public_key = 5; - - TransactionSignature transaction_signature = 6; - - Flags flags = 7; - - LastLedgerSequence last_ledger_sequence = 8; - - SourceTag source_tag = 9; - - repeated Memo memos = 10; - - repeated Signer signers = 11; - - AccountTransactionID account_transaction_id = 12; - - TicketSequence ticket_sequence = 31; -} - -// Next field: 4 -message Memo -{ - MemoData memo_data = 1; - - MemoFormat memo_format = 2; - - MemoType memo_type = 3; -} - -// Next field: 4 -message Signer -{ - Account account = 1; - - TransactionSignature transaction_signature = 2; - - SigningPublicKey signing_public_key = 3; -} - -// Next field: 9 -message AccountSet -{ - ClearFlag clear_flag = 1; - - Domain domain = 2; - - EmailHash email_hash = 3; - - MessageKey message_key = 4; - - SetFlag set_flag = 5; - - TransferRate transfer_rate = 6; - - TickSize tick_size = 7; - - NFTokenMinter nftoken_minter = 8; -} - -// Next field: 3 -message AccountDelete -{ - Destination destination = 1; - - DestinationTag destination_tag = 2; -} - -// Next field: 2 -message CheckCancel -{ - CheckID check_id = 1; -} - -// Next field: 4 -message CheckCash -{ - CheckID check_id = 1; - - oneof amount_oneof - { - Amount amount = 2; - - DeliverMin deliver_min = 3; - } -} - -// Next field: 6 -message CheckCreate -{ - Destination destination = 1; - - SendMax send_max = 2; - - DestinationTag destination_tag = 3; - - Expiration expiration = 4; - - InvoiceID invoice_id = 5; -} - -// Next field: 3 -message DepositPreauth -{ - oneof authorization_oneof - { - Authorize authorize = 1; - - Unauthorize unauthorize = 2; - } -} - -// Next field: 3 -message EscrowCancel -{ - Owner owner = 1; - - OfferSequence offer_sequence = 2; -} - -// Next field: 7 -message EscrowCreate -{ - Amount amount = 1; - - Destination destination = 2; - - CancelAfter cancel_after = 3; - - FinishAfter finish_after = 4; - - Condition condition = 5; - - DestinationTag destination_tag = 6; -} - -// Next field: 5 -message EscrowFinish -{ - Owner owner = 1; - - OfferSequence offer_sequence = 2; - - Condition condition = 3; - - Fulfillment fulfillment = 4; -} - -// Next field: 4 -message NFTokenAcceptOffer -{ - NFTokenBrokerFee nftoken_broker_fee = 1; - - NFTokenBuyOffer nftoken_buy_offer = 2; - - NFTokenSellOffer nftoken_sell_offer = 3; -} - -// Next field: 3 -message NFTokenBurn -{ - Owner owner = 1; - - NFTokenID nftoken_id = 2; -} - -// Next field: 2 -message NFTokenCancelOffer -{ - repeated Index nftoken_offers = 1; -} - -// Next field: 6 -message NFTokenCreateOffer -{ - Amount amount = 1; - - Destination destination = 2; - - Expiration expiration = 3; - - Owner owner = 4; - - NFTokenID nftoken_id = 5; -} - -// Next field: 5 -message NFTokenMint -{ - Issuer issuer = 1; - - NFTokenTaxon nftoken_taxon = 2; - - TransferFee transfer_fee = 3; - - URI uri = 4; -} - -// Next field: 2 -message OfferCancel -{ - OfferSequence offer_sequence = 1; -} - -// Next field: 5 -message OfferCreate -{ - Expiration expiration = 1; - - OfferSequence offer_sequence = 2; - - TakerGets taker_gets = 3; - - TakerPays taker_pays = 4; -} - -// Next field: 8 -message Payment -{ - // Next field: 4 - message PathElement - { - AccountAddress account = 1; - - Currency currency = 2; - - AccountAddress issuer = 3; - } - - // Next field: 2 - message Path - { - repeated PathElement elements = 1; - } - - Amount amount = 1; - - Destination destination = 2; - - DestinationTag destination_tag = 3; - - InvoiceID invoice_id = 4; - - repeated Path paths = 5; - - SendMax send_max = 6; - - DeliverMin deliver_min = 7; -} - -// Next field: 6 -message PaymentChannelClaim -{ - Channel channel = 1; - - Balance balance = 2; - - Amount amount = 3; - - PaymentChannelSignature payment_channel_signature = 4; - - PublicKey public_key = 5; -} - -// Next field: 7 -message PaymentChannelCreate -{ - Amount amount = 1; - - Destination destination = 2; - - SettleDelay settle_delay = 3; - - PublicKey public_key = 4; - - CancelAfter cancel_after = 5; - - DestinationTag destination_tag = 6; -} - -// Next field: 4 -message PaymentChannelFund -{ - Channel channel = 1; - - Amount amount = 2; - - Expiration expiration = 3; -} - -// Next field: 2 -message SetRegularKey -{ - RegularKey regular_key = 1; -} - -// Next field: 3 -message SignerListSet -{ - SignerQuorum signer_quorum = 1; - - repeated SignerEntry signer_entries = 2; -} - -// Next field: 2 -message TicketCreate -{ - TicketCount count = 1; -} - -// Next field: 4 -message TrustSet -{ - LimitAmount limit_amount = 1; - - QualityIn quality_in = 2; - - QualityOut quality_out = 3; -} diff --git a/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto b/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto index 6f48d486087..995edba48a1 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/xrp_ledger.proto @@ -4,39 +4,18 @@ package org.xrpl.rpc.v1; option java_package = "org.xrpl.rpc.v1"; option java_multiple_files = true; -import "org/xrpl/rpc/v1/get_account_info.proto"; -import "org/xrpl/rpc/v1/get_fee.proto"; -import "org/xrpl/rpc/v1/submit.proto"; -import "org/xrpl/rpc/v1/get_transaction.proto"; -import "org/xrpl/rpc/v1/get_account_transaction_history.proto"; import "org/xrpl/rpc/v1/get_ledger.proto"; import "org/xrpl/rpc/v1/get_ledger_entry.proto"; import "org/xrpl/rpc/v1/get_ledger_data.proto"; import "org/xrpl/rpc/v1/get_ledger_diff.proto"; -// RPCs available to interact with the XRP Ledger. -// The gRPC API mimics the JSON API. Refer to xrpl.org for documentation +// These methods are binary only methods for retrieiving arbitrary ledger state +// via gRPC. These methods are used by clio and reporting mode, but can also be +// used by any client that wants to extract ledger state in an efficient manner. +// They do not directly mimic the JSON equivalent methods. service XRPLedgerAPIService { - // Get account info for an account on the XRP Ledger. - rpc GetAccountInfo (GetAccountInfoRequest) returns (GetAccountInfoResponse); - - // Get the fee for a transaction on the XRP Ledger. - rpc GetFee (GetFeeRequest) returns (GetFeeResponse); - - // Submit a signed transaction to the XRP Ledger. - rpc SubmitTransaction (SubmitTransactionRequest) returns (SubmitTransactionResponse); - - // Get the status of a transaction - rpc GetTransaction(GetTransactionRequest) returns (GetTransactionResponse); - - // Get all validated transactions associated with a given account - rpc GetAccountTransactionHistory(GetAccountTransactionHistoryRequest) returns (GetAccountTransactionHistoryResponse); - - ///////////////////////////////////////////////////////////////////////////// - // The below methods do not mimic the JSON API exactly, and are mostly binary - // Get a specific ledger, optionally including transactions and any modified, // added or deleted ledger objects rpc GetLedger(GetLedgerRequest) returns (GetLedgerResponse); diff --git a/src/ripple/rpc/DeliveredAmount.h b/src/ripple/rpc/DeliveredAmount.h index 8b417b95446..94fff68f7bd 100644 --- a/src/ripple/rpc/DeliveredAmount.h +++ b/src/ripple/rpc/DeliveredAmount.h @@ -22,7 +22,6 @@ #include #include -#include #include #include diff --git a/src/ripple/rpc/GRPCHandlers.h b/src/ripple/rpc/GRPCHandlers.h index d7b1cc3a927..493de7a5c47 100644 --- a/src/ripple/rpc/GRPCHandlers.h +++ b/src/ripple/rpc/GRPCHandlers.h @@ -34,28 +34,6 @@ namespace ripple { * the status will be sent to the client, and the response will be ommitted */ -std::pair -doAccountInfoGrpc( - RPC::GRPCContext& context); - -std::pair -doFeeGrpc(RPC::GRPCContext& context); - -std::pair -doSubmitGrpc( - RPC::GRPCContext& context); - -// NOTE, this only supports Payment transactions at this time -std::pair -doTxGrpc(RPC::GRPCContext& context); - -std::pair< - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, - grpc::Status> -doAccountTxGrpc( - RPC::GRPCContext& - context); - std::pair doLedgerGrpc(RPC::GRPCContext& context); diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index f5432dc65a1..f08805761ee 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -27,7 +27,6 @@ #include #include #include -#include #include #include @@ -222,95 +221,4 @@ doAccountInfo(RPC::JsonContext& context) return result; } -std::pair -doAccountInfoGrpc( - RPC::GRPCContext& context) -{ - // Return values - org::xrpl::rpc::v1::GetAccountInfoResponse result; - grpc::Status status = grpc::Status::OK; - - // input - org::xrpl::rpc::v1::GetAccountInfoRequest& params = context.params; - - // get ledger - std::shared_ptr ledger; - auto lgrStatus = RPC::ledgerFromRequest(ledger, context); - if (lgrStatus || !ledger) - { - grpc::Status errorStatus; - if (lgrStatus.toErrorCode() == rpcINVALID_PARAMS) - { - errorStatus = grpc::Status( - grpc::StatusCode::INVALID_ARGUMENT, lgrStatus.message()); - } - else - { - errorStatus = - grpc::Status(grpc::StatusCode::NOT_FOUND, lgrStatus.message()); - } - return {result, errorStatus}; - } - - result.set_ledger_index(ledger->info().seq); - result.set_validated( - RPC::isValidated(context.ledgerMaster, *ledger, context.app)); - - // decode account - AccountID accountID; - std::string strIdent = params.account().address(); - error_code_i code = - RPC::accountFromStringWithCode(accountID, strIdent, params.strict()); - if (code != rpcSUCCESS) - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, "invalid account"}; - return {result, errorStatus}; - } - - // get account data - auto const sleAccepted = ledger->read(keylet::account(accountID)); - if (sleAccepted) - { - RPC::convert(*result.mutable_account_data(), *sleAccepted); - - // signer lists - if (params.signer_lists()) - { - auto const sleSigners = ledger->read(keylet::signers(accountID)); - if (sleSigners) - { - org::xrpl::rpc::v1::SignerList& signerListProto = - *result.mutable_signer_list(); - RPC::convert(signerListProto, *sleSigners); - } - } - - // queued transactions - if (params.queue()) - { - if (!ledger->open()) - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, - "requested queue but ledger is not open"}; - return {result, errorStatus}; - } - std::vector const txs = - context.app.getTxQ().getAccountTxs(accountID); - org::xrpl::rpc::v1::QueueData& queueData = - *result.mutable_queue_data(); - RPC::convert(queueData, txs); - } - } - else - { - grpc::Status errorStatus{ - grpc::StatusCode::NOT_FOUND, "account not found"}; - return {result, errorStatus}; - } - - return {result, status}; -} - } // namespace ripple diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index e5e7d54382a..67c80ad9bdc 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -35,7 +35,6 @@ #include #include #include -#include #include #include @@ -51,69 +50,6 @@ using AccountTxResult = RelationalDatabase::AccountTxResult; using LedgerShortcut = RelationalDatabase::LedgerShortcut; using LedgerSpecifier = RelationalDatabase::LedgerSpecifier; -// parses args into a ledger specifier, or returns a grpc status object on error -std::variant, grpc::Status> -parseLedgerArgs( - org::xrpl::rpc::v1::GetAccountTransactionHistoryRequest const& params) -{ - grpc::Status status; - if (params.has_ledger_range()) - { - uint32_t min = params.ledger_range().ledger_index_min(); - uint32_t max = params.ledger_range().ledger_index_max(); - - // if min is set but not max, need to set max - if (min != 0 && max == 0) - { - max = UINT32_MAX; - } - - return LedgerRange{min, max}; - } - else if (params.has_ledger_specifier()) - { - LedgerSpecifier ledger; - - auto& specifier = params.ledger_specifier(); - using LedgerCase = org::xrpl::rpc::v1::LedgerSpecifier::LedgerCase; - LedgerCase ledgerCase = specifier.ledger_case(); - - if (ledgerCase == LedgerCase::kShortcut) - { - using LedgerSpecifier = org::xrpl::rpc::v1::LedgerSpecifier; - - if (specifier.shortcut() == LedgerSpecifier::SHORTCUT_VALIDATED) - ledger = LedgerShortcut::VALIDATED; - else if (specifier.shortcut() == LedgerSpecifier::SHORTCUT_CLOSED) - ledger = LedgerShortcut::CLOSED; - else if (specifier.shortcut() == LedgerSpecifier::SHORTCUT_CURRENT) - ledger = LedgerShortcut::CURRENT; - else - return {}; - } - else if (ledgerCase == LedgerCase::kSequence) - { - ledger = specifier.sequence(); - } - else if (ledgerCase == LedgerCase::kHash) - { - if (auto hash = uint256::fromVoidChecked(specifier.hash())) - { - ledger = *hash; - } - else - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, - "ledger hash malformed"}; - return errorStatus; - } - } - return ledger; - } - return std::optional{}; -} - // parses args into a ledger specifier, or returns a Json object on error std::variant, Json::Value> parseLedgerArgs(Json::Value const& params) @@ -331,131 +267,6 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) return {result, rpcSUCCESS}; } -std::pair< - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, - grpc::Status> -populateProtoResponse( - std::pair const& res, - AccountTxArgs const& args, - RPC::GRPCContext< - org::xrpl::rpc::v1::GetAccountTransactionHistoryRequest> const& context) -{ - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse response; - grpc::Status status = grpc::Status::OK; - - RPC::Status const& error = res.second; - if (error.toErrorCode() != rpcSUCCESS) - { - if (error.toErrorCode() == rpcLGR_NOT_FOUND) - { - status = {grpc::StatusCode::NOT_FOUND, error.message()}; - } - else if (error.toErrorCode() == rpcNOT_SYNCED) - { - status = {grpc::StatusCode::FAILED_PRECONDITION, error.message()}; - } - else - { - status = {grpc::StatusCode::INVALID_ARGUMENT, error.message()}; - } - } - else - { - AccountTxResult const& result = res.first; - - // account_tx always returns validated data - response.set_validated(true); - response.set_limit(result.limit); - response.mutable_account()->set_address( - context.params.account().address()); - response.set_ledger_index_min(result.ledgerRange.min); - response.set_ledger_index_max(result.ledgerRange.max); - - if (auto txnsData = std::get_if(&result.transactions)) - { - assert(!args.binary); - for (auto const& [txn, txnMeta] : *txnsData) - { - if (txn) - { - auto txnProto = response.add_transactions(); - - RPC::convert( - *txnProto->mutable_transaction(), - txn->getSTransaction()); - - // account_tx always returns validated data - txnProto->set_validated(true); - txnProto->set_ledger_index(txn->getLedger()); - auto& hash = txn->getID(); - txnProto->set_hash(hash.data(), hash.size()); - auto closeTime = - context.app.getLedgerMaster().getCloseTimeBySeq( - txn->getLedger()); - if (closeTime) - txnProto->mutable_date()->set_value( - closeTime->time_since_epoch().count()); - if (txnMeta) - { - RPC::convert(*txnProto->mutable_meta(), txnMeta); - if (!txnProto->meta().has_delivered_amount()) - { - if (auto amt = getDeliveredAmount( - context, - txn->getSTransaction(), - *txnMeta, - txn->getLedger())) - { - RPC::convert( - *txnProto->mutable_meta() - ->mutable_delivered_amount(), - *amt); - } - } - } - } - } - } - else - { - assert(args.binary); - - for (auto const& binaryData : - std::get(result.transactions)) - { - auto txnProto = response.add_transactions(); - Blob const& txnBlob = std::get<0>(binaryData); - txnProto->set_transaction_binary( - txnBlob.data(), txnBlob.size()); - - Blob const& metaBlob = std::get<1>(binaryData); - txnProto->set_meta_binary(metaBlob.data(), metaBlob.size()); - - txnProto->set_ledger_index(std::get<2>(binaryData)); - - // account_tx always returns validated data - txnProto->set_validated(true); - - auto closeTime = - context.app.getLedgerMaster().getCloseTimeBySeq( - std::get<2>(binaryData)); - if (closeTime) - txnProto->mutable_date()->set_value( - closeTime->time_since_epoch().count()); - } - } - - if (result.marker) - { - response.mutable_marker()->set_ledger_index( - result.marker->ledgerSeq); - response.mutable_marker()->set_account_sequence( - result.marker->txnSeq); - } - } - return {response, status}; -} - Json::Value populateJsonResponse( std::pair const& res, @@ -597,59 +408,4 @@ doAccountTxJson(RPC::JsonContext& context) return populateJsonResponse(res, args, context); } -std::pair< - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, - grpc::Status> -doAccountTxGrpc( - RPC::GRPCContext& - context) -{ - if (!context.app.config().useTxTables()) - { - return { - {}, - {grpc::StatusCode::UNIMPLEMENTED, "Not enabled in configuration."}}; - } - - // return values - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse response; - grpc::Status status = grpc::Status::OK; - AccountTxArgs args; - - auto& request = context.params; - - auto const account = parseBase58(request.account().address()); - if (!account) - { - return { - {}, - {grpc::StatusCode::INVALID_ARGUMENT, "Could not decode account"}}; - } - - args.account = *account; - args.limit = request.limit(); - args.binary = request.binary(); - args.forward = request.forward(); - - if (request.has_marker()) - { - args.marker = { - request.marker().ledger_index(), - request.marker().account_sequence()}; - } - - auto parseRes = parseLedgerArgs(request); - if (auto stat = std::get_if(&parseRes)) - { - return {response, *stat}; - } - else - { - args.ledger = std::get>(parseRes); - } - - auto res = doAccountTxHelp(context, args); - return populateProtoResponse(res, args, context); -} - } // namespace ripple diff --git a/src/ripple/rpc/handlers/Fee1.cpp b/src/ripple/rpc/handlers/Fee1.cpp index f28f9b4475a..554480f10af 100644 --- a/src/ripple/rpc/handlers/Fee1.cpp +++ b/src/ripple/rpc/handlers/Fee1.cpp @@ -38,49 +38,4 @@ doFee(RPC::JsonContext& context) return context.params; } -std::pair -doFeeGrpc(RPC::GRPCContext& context) -{ - org::xrpl::rpc::v1::GetFeeResponse reply; - grpc::Status status = grpc::Status::OK; - - Application& app = context.app; - auto const view = app.openLedger().current(); - if (!view) - { - BOOST_ASSERT(false); - return {reply, status}; - } - - auto const metrics = app.getTxQ().getMetrics(*view); - - // current ledger data - reply.set_current_ledger_size(metrics.txInLedger); - reply.set_current_queue_size(metrics.txCount); - reply.set_expected_ledger_size(metrics.txPerLedger); - reply.set_ledger_current_index(view->info().seq); - reply.set_max_queue_size(*metrics.txQMaxSize); - - // fee levels data - org::xrpl::rpc::v1::FeeLevels& levels = *reply.mutable_levels(); - levels.set_median_level(metrics.medFeeLevel.fee()); - levels.set_minimum_level(metrics.minProcessingFeeLevel.fee()); - levels.set_open_ledger_level(metrics.openLedgerFeeLevel.fee()); - levels.set_reference_level(metrics.referenceFeeLevel.fee()); - - // fee data - org::xrpl::rpc::v1::Fee& fee = *reply.mutable_fee(); - auto const baseFee = view->fees().base; - fee.mutable_base_fee()->set_drops( - toDrops(metrics.referenceFeeLevel, baseFee).drops()); - fee.mutable_minimum_fee()->set_drops( - toDrops(metrics.minProcessingFeeLevel, baseFee).drops()); - fee.mutable_median_fee()->set_drops( - toDrops(metrics.medFeeLevel, baseFee).drops()); - - fee.mutable_open_ledger_fee()->set_drops( - (toDrops(metrics.openLedgerFeeLevel - FeeLevel64{1}, baseFee) + 1) - .drops()); - return {reply, status}; -} } // namespace ripple diff --git a/src/ripple/rpc/handlers/LedgerData.cpp b/src/ripple/rpc/handlers/LedgerData.cpp index fbc17b75b7b..7392b505126 100644 --- a/src/ripple/rpc/handlers/LedgerData.cpp +++ b/src/ripple/rpc/handlers/LedgerData.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include #include diff --git a/src/ripple/rpc/handlers/LedgerEntry.cpp b/src/ripple/rpc/handlers/LedgerEntry.cpp index 4b2526698b4..fff2ceac01c 100644 --- a/src/ripple/rpc/handlers/LedgerEntry.cpp +++ b/src/ripple/rpc/handlers/LedgerEntry.cpp @@ -27,7 +27,6 @@ #include #include #include -#include #include namespace ripple { diff --git a/src/ripple/rpc/handlers/LedgerHandler.cpp b/src/ripple/rpc/handlers/LedgerHandler.cpp index e28b181fcba..b3a441f48a0 100644 --- a/src/ripple/rpc/handlers/LedgerHandler.cpp +++ b/src/ripple/rpc/handlers/LedgerHandler.cpp @@ -27,7 +27,6 @@ #include #include #include -#include #include namespace ripple { diff --git a/src/ripple/rpc/handlers/Submit.cpp b/src/ripple/rpc/handlers/Submit.cpp index 220760172cc..2b5c8bba925 100644 --- a/src/ripple/rpc/handlers/Submit.cpp +++ b/src/ripple/rpc/handlers/Submit.cpp @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -194,102 +193,4 @@ doSubmit(RPC::JsonContext& context) } } -std::pair -doSubmitGrpc( - RPC::GRPCContext& context) -{ - // return values - org::xrpl::rpc::v1::SubmitTransactionResponse result; - grpc::Status status = grpc::Status::OK; - - // input - auto request = context.params; - - std::string const& tx = request.signed_transaction(); - - // convert to blob - Blob blob{tx.begin(), tx.end()}; - - // serialize - SerialIter sitTrans(makeSlice(blob)); - std::shared_ptr stpTrans; - try - { - stpTrans = std::make_shared(std::ref(sitTrans)); - } - catch (std::exception& e) - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, - "invalid transaction: " + std::string(e.what())}; - return {result, errorStatus}; - } - - // check validity - { - if (!context.app.checkSigs()) - forceValidity( - context.app.getHashRouter(), - stpTrans->getTransactionID(), - Validity::SigGoodOnly); - auto [validity, reason] = checkValidity( - context.app.getHashRouter(), - *stpTrans, - context.ledgerMaster.getCurrentLedger()->rules(), - context.app.config()); - if (validity != Validity::Valid) - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, - "invalid transaction: " + reason}; - return {result, errorStatus}; - } - } - - std::string reason; - auto tpTrans = std::make_shared(stpTrans, reason, context.app); - if (tpTrans->getStatus() != NEW) - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, - "invalid transaction: " + reason}; - return {result, errorStatus}; - } - - try - { - auto const failType = NetworkOPs::doFailHard(request.fail_hard()); - - // submit to network - context.netOps.processTransaction( - tpTrans, isUnlimited(context.role), true, failType); - } - catch (std::exception& e) - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, - "invalid transaction : " + std::string(e.what())}; - return {result, errorStatus}; - } - - // return preliminary result - if (temUNCERTAIN != tpTrans->getResult()) - { - RPC::convert(*result.mutable_engine_result(), tpTrans->getResult()); - - std::string sToken; - std::string sHuman; - - transResultInfo(tpTrans->getResult(), sToken, sHuman); - - result.mutable_engine_result()->set_result(sToken); - result.set_engine_result_code(TERtoInt(tpTrans->getResult())); - result.set_engine_result_message(sHuman); - - uint256 hash = tpTrans->getID(); - result.set_hash(hash.data(), hash.size()); - } - return {result, status}; -} - } // namespace ripple diff --git a/src/ripple/rpc/handlers/Tx.cpp b/src/ripple/rpc/handlers/Tx.cpp index aa941d795c3..4a70f1fe061 100644 --- a/src/ripple/rpc/handlers/Tx.cpp +++ b/src/ripple/rpc/handlers/Tx.cpp @@ -28,7 +28,6 @@ #include #include #include -#include #include namespace ripple { @@ -251,101 +250,6 @@ doTxHelp(RPC::Context& context, TxArgs const& args) return {result, rpcSUCCESS}; } -std::pair -populateProtoResponse( - std::pair const& res, - TxArgs const& args, - RPC::GRPCContext const& context) -{ - org::xrpl::rpc::v1::GetTransactionResponse response; - grpc::Status status = grpc::Status::OK; - RPC::Status const& error = res.second; - TxResult const& result = res.first; - // handle errors - if (error.toErrorCode() != rpcSUCCESS) - { - if (error.toErrorCode() == rpcTXN_NOT_FOUND && - result.searchedAll != TxSearched::unknown) - { - status = { - grpc::StatusCode::NOT_FOUND, - "txn not found. searched_all = " + - to_string( - (result.searchedAll == TxSearched::all ? "true" - : "false"))}; - } - else - { - if (error.toErrorCode() == rpcTXN_NOT_FOUND) - status = {grpc::StatusCode::NOT_FOUND, "txn not found"}; - else - status = {grpc::StatusCode::INTERNAL, error.message()}; - } - } - // no errors - else if (result.txn) - { - auto& txn = result.txn; - - std::shared_ptr stTxn = txn->getSTransaction(); - if (args.binary) - { - Serializer s = stTxn->getSerializer(); - response.set_transaction_binary(s.data(), s.size()); - } - else - { - RPC::convert(*response.mutable_transaction(), stTxn); - } - - response.set_hash(context.params.hash()); - - auto ledgerIndex = txn->getLedger(); - response.set_ledger_index(ledgerIndex); - if (ledgerIndex) - { - auto ct = - context.app.getLedgerMaster().getCloseTimeBySeq(ledgerIndex); - if (ct) - response.mutable_date()->set_value( - ct->time_since_epoch().count()); - } - - RPC::convert( - *response.mutable_meta()->mutable_transaction_result(), - txn->getResult()); - response.mutable_meta()->mutable_transaction_result()->set_result( - transToken(txn->getResult())); - - // populate binary metadata - if (auto blob = std::get_if(&result.meta)) - { - assert(args.binary); - Slice slice = makeSlice(*blob); - response.set_meta_binary(slice.data(), slice.size()); - } - // populate meta data - else if (auto m = std::get_if>(&result.meta)) - { - auto& meta = *m; - if (meta) - { - RPC::convert(*response.mutable_meta(), meta); - auto amt = - getDeliveredAmount(context, stTxn, *meta, txn->getLedger()); - if (amt) - { - RPC::convert( - *response.mutable_meta()->mutable_delivered_amount(), - *amt); - } - } - } - response.set_validated(result.validated); - } - return {response, status}; -} - Json::Value populateJsonResponse( std::pair const& res, @@ -437,48 +341,4 @@ doTxJson(RPC::JsonContext& context) return populateJsonResponse(res, args, context); } -std::pair -doTxGrpc(RPC::GRPCContext& context) -{ - if (!context.app.config().useTxTables()) - { - return { - {}, - {grpc::StatusCode::UNIMPLEMENTED, "Not enabled in configuration."}}; - } - - // return values - org::xrpl::rpc::v1::GetTransactionResponse response; - grpc::Status status = grpc::Status::OK; - - // input - org::xrpl::rpc::v1::GetTransactionRequest& request = context.params; - - TxArgs args; - - if (auto hash = uint256::fromVoidChecked(request.hash())) - { - args.hash = *hash; - } - else - { - grpc::Status errorStatus{ - grpc::StatusCode::INVALID_ARGUMENT, "tx hash malformed"}; - return {response, errorStatus}; - } - - args.binary = request.binary(); - - if (request.ledger_range().ledger_index_min() != 0 && - request.ledger_range().ledger_index_max() != 0) - { - args.ledgerRange = std::make_pair( - request.ledger_range().ledger_index_min(), - request.ledger_range().ledger_index_max()); - } - - std::pair res = doTxHelp(context, args); - return populateProtoResponse(res, args, context); -} - } // namespace ripple diff --git a/src/ripple/rpc/impl/GRPCHelpers.cpp b/src/ripple/rpc/impl/GRPCHelpers.cpp deleted file mode 100644 index e06512ce2c8..00000000000 --- a/src/ripple/rpc/impl/GRPCHelpers.cpp +++ /dev/null @@ -1,2192 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include - -namespace ripple { -namespace RPC { - -// In the below populateProto* functions, getProto is a function that returns -// a reference to the mutable protobuf message to be populated. The reason this -// is a function, as opposed to just a pointer or reference to the object, -// is that there is no way to get a non-const reference, and getting a pointer -// to the proto object causes default initialization of the object. However, -// if the corresponding field is not present in the STObject, we don't want to -// initialize the proto object. To get around this, getProto is a function that -// is called only if the field is present in the STObject -template -void -populateProtoPrimitive( - L const& getProto, - STObject const& from, - TypedField const& field) -{ - if (!from.isFieldPresent(field)) - return; - - if constexpr (std::is_integral_v) - { - getProto()->set_value(from[field]); - } - else - { - auto v = from[field]; - getProto()->set_value(v.data(), v.size()); - } -} - -template -void -populateProtoVLasString( - T const& getProto, - STObject const& from, - SF_VL const& field) -{ - if (from.isFieldPresent(field)) - { - auto data = from.getFieldVL(field); - getProto()->set_value( - reinterpret_cast(data.data()), data.size()); - } -} - -template -void -populateProtoVec256( - T const& getProto, - STObject const& from, - SF_VECTOR256 const& field) -{ - if (from.isFieldPresent(field)) - { - const STVector256& vec = from.getFieldV256(field); - for (size_t i = 0; i < vec.size(); ++i) - { - uint256 const& elt = vec[i]; - getProto()->set_value(elt.data(), elt.size()); - } - } -} - -template -void -populateProtoAccount( - T const& getProto, - STObject const& from, - SF_ACCOUNT const& field) -{ - if (from.isFieldPresent(field)) - { - getProto()->mutable_value()->set_address( - toBase58(from.getAccountID(field))); - } -} - -template -void -populateProtoAmount( - T const& getProto, - STObject const& from, - SF_AMOUNT const& field) -{ - if (from.isFieldPresent(field)) - { - auto amount = from.getFieldAmount(field); - convert(*getProto(), amount); - } -} - -template -void -populateProtoCurrency( - T const& getProto, - STObject const& from, - SF_UINT160 const& field) -{ - if (from.isFieldPresent(field)) - { - auto cur = from.getFieldH160(field); - auto proto = getProto()->mutable_value(); - proto->set_code(cur.data(), cur.size()); - proto->set_name(to_string(cur)); - } -} - -template -void -populateProtoArray( - T const& getProto, - R const& populateProto, - STObject const& from, - SField const& outerField, - SField const& innerField) -{ - if (from.isFieldPresent(outerField) && - from.peekAtField(outerField).getSType() == SerializedTypeID::STI_ARRAY) - { - auto arr = from.getFieldArray(outerField); - for (auto it = arr.begin(); it != arr.end(); ++it) - { - populateProto(*it, *getProto()); - } - } -} - -template -void -populateClearFlag(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_clear_flag(); }, from, sfClearFlag); -} - -template -void -populateDomain(T& to, STObject const& from) -{ - populateProtoVLasString( - [&to]() { return to.mutable_domain(); }, from, sfDomain); -} - -template -void -populateEmailHash(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_email_hash(); }, from, sfEmailHash); -} - -template -void -populateMessageKey(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_message_key(); }, from, sfMessageKey); -} - -template -void -populateSetFlag(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_set_flag(); }, from, sfSetFlag); -} - -template -void -populateTransferRate(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_transfer_rate(); }, from, sfTransferRate); -} - -template -void -populateTickSize(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_tick_size(); }, from, sfTickSize); -} - -template -void -populateExpiration(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_expiration(); }, from, sfExpiration); -} - -template -void -populateOfferSequence(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_offer_sequence(); }, from, sfOfferSequence); -} - -template -void -populateTakerGets(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_taker_gets(); }, from, sfTakerGets); -} - -template -void -populateTakerPays(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_taker_pays(); }, from, sfTakerPays); -} - -template -void -populateDestination(T& to, STObject const& from) -{ - populateProtoAccount( - [&to]() { return to.mutable_destination(); }, from, sfDestination); -} - -template -void -populateCheckID(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_check_id(); }, from, sfCheckID); -} - -template -void -populateAmount(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_amount(); }, from, sfAmount); -} - -template -void -populateDeliverMin(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_deliver_min(); }, from, sfDeliverMin); -} - -template -void -populateSendMax(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_send_max(); }, from, sfSendMax); -} - -template -void -populateDeliveredAmount(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_delivered_amount(); }, - from, - sfDeliveredAmount); -} - -template -void -populateDestinationTag(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_destination_tag(); }, - from, - sfDestinationTag); -} - -template -void -populateInvoiceID(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_invoice_id(); }, from, sfInvoiceID); -} - -template -void -populateAuthorize(T& to, STObject const& from) -{ - populateProtoAccount( - [&to]() { return to.mutable_authorize(); }, from, sfAuthorize); -} - -template -void -populateUnauthorize(T& to, STObject const& from) -{ - populateProtoAccount( - [&to]() { return to.mutable_unauthorize(); }, from, sfUnauthorize); -} - -template -void -populateOwner(T& to, STObject const& from) -{ - populateProtoAccount([&to]() { return to.mutable_owner(); }, from, sfOwner); -} - -template -void -populateCancelAfter(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_cancel_after(); }, from, sfCancelAfter); -} - -template -void -populateFinishAfter(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_finish_after(); }, from, sfFinishAfter); -} - -template -void -populateCondition(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_condition(); }, from, sfCondition); -} - -template -void -populateFulfillment(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_fulfillment(); }, from, sfFulfillment); -} - -template -void -populateChannel(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_channel(); }, from, sfChannel); -} - -template -void -populateBalance(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_balance(); }, from, sfBalance); -} - -template -void -populatePaymentChannelSignature(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_payment_channel_signature(); }, - from, - sfSignature); -} - -template -void -populatePublicKey(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_public_key(); }, from, sfPublicKey); -} - -template -void -populateSettleDelay(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_settle_delay(); }, from, sfSettleDelay); -} - -template -void -populateRegularKey(T& to, STObject const& from) -{ - populateProtoAccount( - [&to]() { return to.mutable_regular_key(); }, from, sfRegularKey); -} - -template -void -populateSignerQuorum(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_signer_quorum(); }, from, sfSignerQuorum); -} - -template -void -populateTicketCount(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_count(); }, from, sfTicketCount); -} - -template -void -populateLimitAmount(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_limit_amount(); }, from, sfLimitAmount); -} -template -void -populateQualityIn(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_quality_in(); }, from, sfQualityIn); -} - -template -void -populateQualityOut(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_quality_out(); }, from, sfQualityOut); -} - -template -void -populateAccount(T& to, STObject const& from) -{ - populateProtoAccount( - [&to]() { return to.mutable_account(); }, from, sfAccount); -} - -template -void -populateFee(T& to, STObject const& from) -{ - if (from.isFieldPresent(sfFee)) - { - to.mutable_fee()->set_drops(from.getFieldAmount(sfFee).xrp().drops()); - } -} - -template -void -populateSigningPublicKey(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_signing_public_key(); }, - from, - sfSigningPubKey); -} - -template -void -populateTransactionSignature(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_transaction_signature(); }, - from, - sfTxnSignature); -} - -template -void -populateFlags(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_flags(); }, from, sfFlags); -} - -template -void -populateFirstLedgerSequence(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_ledger_sequence(); }, - from, - sfFirstLedgerSequence); -} - -template -void -populateValidatorToDisable(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_validator_to_disable(); }, - from, - sfValidatorToDisable); -} - -template -void -populateValidatorToReEnable(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_validator_to_re_enable(); }, - from, - sfValidatorToReEnable); -} - -template -void -populateLastLedgerSequence(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_last_ledger_sequence(); }, - from, - sfLastLedgerSequence); -} - -template -void -populateSourceTag(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_source_tag(); }, from, sfSourceTag); -} - -template -void -populateAccountTransactionID(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_account_transaction_id(); }, - from, - sfAccountTxnID); -} - -template -void -populateMemoData(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_memo_data(); }, from, sfMemoData); -} - -template -void -populateMemoFormat(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_memo_format(); }, from, sfMemoFormat); -} - -template -void -populateMemoType(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_memo_type(); }, from, sfMemoType); -} - -template -void -populateSequence(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_sequence(); }, from, sfSequence); -} - -template -void -populateAmendment(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_amendment(); }, from, sfAmendment); -} - -template -void -populateCloseTime(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_close_time(); }, from, sfCloseTime); -} - -template -void -populateSignerWeight(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_signer_weight(); }, from, sfSignerWeight); -} - -template -void -populateAmendments(T& to, STObject const& from) -{ - populateProtoVec256( - [&to]() { return to.add_amendments(); }, from, sfAmendments); -} - -template -void -populateOwnerCount(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_owner_count(); }, from, sfOwnerCount); -} - -template -void -populatePreviousTransactionID(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_previous_transaction_id(); }, - from, - sfPreviousTxnID); -} - -template -void -populatePreviousTransactionLedgerSequence(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_previous_transaction_ledger_sequence(); }, - from, - sfPreviousTxnLgrSeq); -} - -template -void -populateLowLimit(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_low_limit(); }, from, sfLowLimit); -} - -template -void -populateHighLimit(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_high_limit(); }, from, sfHighLimit); -} - -template -void -populateLowNode(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_low_node(); }, from, sfLowNode); -} - -template -void -populateHighNode(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_high_node(); }, from, sfHighNode); -} - -template -void -populateLowQualityIn(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_low_quality_in(); }, from, sfLowQualityIn); -} - -template -void -populateLowQualityOut(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_low_quality_out(); }, - from, - sfLowQualityOut); -} - -template -void -populateHighQualityIn(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_high_quality_in(); }, - from, - sfHighQualityIn); -} - -template -void -populateHighQualityOut(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_high_quality_out(); }, - from, - sfHighQualityOut); -} - -template -void -populateBookDirectory(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_book_directory(); }, from, sfBookDirectory); -} - -template -void -populateBookNode(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_book_node(); }, from, sfBookNode); -} - -template -void -populateOwnerNode(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_owner_node(); }, from, sfOwnerNode); -} - -template -void -populateSignerListID(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_signer_list_id(); }, from, sfSignerListID); -} - -template -void -populateWalletLocator(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_wallet_locator(); }, from, sfWalletLocator); -} - -template -void -populateTicketSequence(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_ticket_sequence(); }, - from, - sfTicketSequence); -} - -template -void -populateHashes(T& to, STObject const& from) -{ - populateProtoVec256([&to]() { return to.add_hashes(); }, from, sfHashes); -} - -template -void -populateIndexes(T& to, STObject const& from) -{ - populateProtoVec256([&to]() { return to.add_indexes(); }, from, sfIndexes); -} - -template -void -populateNFTokenOffers(T& to, STObject const& from) -{ - populateProtoVec256( - [&to]() { return to.add_nftoken_offers(); }, from, sfNFTokenOffers); -} - -template -void -populateRootIndex(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_root_index(); }, from, sfRootIndex); -} - -template -void -populateIndexNext(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_index_next(); }, from, sfIndexNext); -} - -template -void -populateIndexPrevious(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_index_previous(); }, from, sfIndexPrevious); -} - -template -void -populateTakerPaysCurrency(T& to, STObject const& from) -{ - populateProtoCurrency( - [&to]() { return to.mutable_taker_pays_currency(); }, - from, - sfTakerPaysCurrency); -} - -template -void -populateTakerPaysIssuer(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_taker_pays_issuer(); }, - from, - sfTakerPaysIssuer); -} - -template -void -populateTakerGetsCurrency(T& to, STObject const& from) -{ - populateProtoCurrency( - [&to]() { return to.mutable_taker_gets_currency(); }, - from, - sfTakerGetsCurrency); -} - -template -void -populateTakerGetsIssuer(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_taker_gets_issuer(); }, - from, - sfTakerGetsIssuer); -} - -template -void -populateDestinationNode(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_destination_node(); }, - from, - sfDestinationNode); -} - -template -void -populateBaseFee(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_base_fee(); }, from, sfBaseFee); -} - -template -void -populateReferenceFeeUnits(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_reference_fee_units(); }, - from, - sfReferenceFeeUnits); -} - -template -void -populatePreviousPageMin(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_previous_page_min(); }, - from, - sfPreviousPageMin); -} - -template -void -populateNextPageMin(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_next_page_min(); }, from, sfNextPageMin); -} - -template -void -populateNFTokenID(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_nftoken_id(); }, from, sfNFTokenID); -} - -template -void -populateURI(T& to, STObject const& from) -{ - populateProtoVLasString([&to]() { return to.mutable_uri(); }, from, sfURI); -} - -template -void -populateBurnedNFTokens(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_burned_nftokens(); }, - from, - sfBurnedNFTokens); -} - -template -void -populateMintedNFTokens(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_minted_nftokens(); }, - from, - sfMintedNFTokens); -} - -template -void -populateNFTokenMinter(T& to, STObject const& from) -{ - populateProtoAccount( - [&to]() { return to.mutable_nftoken_minter(); }, from, sfNFTokenMinter); -} - -template -void -populateNFTokenBrokerFee(T& to, STObject const& from) -{ - populateProtoAmount( - [&to]() { return to.mutable_nftoken_broker_fee(); }, - from, - sfNFTokenBrokerFee); -} - -template -void -populateNFTokenBuyOffer(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_nftoken_buy_offer(); }, - from, - sfNFTokenBuyOffer); -} - -template -void -populateNFTokenSellOffer(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_nftoken_sell_offer(); }, - from, - sfNFTokenSellOffer); -} - -template -void -populateIssuer(T& to, STObject const& from) -{ - populateProtoAccount( - [&to]() { return to.mutable_issuer(); }, from, sfIssuer); -} - -template -void -populateNFTokenTaxon(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_nftoken_taxon(); }, from, sfNFTokenTaxon); -} - -template -void -populateTransferFee(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_transfer_fee(); }, from, sfTransferFee); -} - -template -void -populateReserveBase(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_reserve_base(); }, from, sfReserveBase); -} - -template -void -populateReserveIncrement(T& to, STObject const& from) -{ - populateProtoPrimitive( - [&to]() { return to.mutable_reserve_increment(); }, - from, - sfReserveIncrement); -} - -template -void -populateSignerEntries(T& to, STObject const& from) -{ - populateProtoArray( - [&to]() { return to.add_signer_entries(); }, - [](auto& innerObj, auto& innerProto) { - populateAccount(innerProto, innerObj); - populateSignerWeight(innerProto, innerObj); - populateWalletLocator(innerProto, innerObj); - }, - from, - sfSignerEntries, - sfSignerEntry); -} - -template -void -populateDisabledValidators(T& to, STObject const& from) -{ - populateProtoArray( - [&to]() { return to.add_disabled_validators(); }, - [](auto& innerObj, auto& innerProto) { - populatePublicKey(innerProto, innerObj); - populateFirstLedgerSequence(innerProto, innerObj); - }, - from, - sfDisabledValidators, - sfDisabledValidator); -} - -template -void -populateMemos(T& to, STObject const& from) -{ - populateProtoArray( - [&to]() { return to.add_memos(); }, - [](auto& innerObj, auto& innerProto) { - populateMemoData(innerProto, innerObj); - populateMemoType(innerProto, innerObj); - populateMemoFormat(innerProto, innerObj); - }, - from, - sfMemos, - sfMemo); -} - -template -void -populateSigners(T& to, STObject const& from) -{ - populateProtoArray( - [&to]() { return to.add_signers(); }, - [](auto& innerObj, auto& innerProto) { - populateAccount(innerProto, innerObj); - populateTransactionSignature(innerProto, innerObj); - populateSigningPublicKey(innerProto, innerObj); - }, - from, - sfSigners, - sfSigner); -} - -template -void -populateMajorities(T& to, STObject const& from) -{ - populateProtoArray( - [&to]() { return to.add_majorities(); }, - [](auto innerObj, auto innerProto) { - populateAmendment(innerProto, innerObj); - populateCloseTime(innerProto, innerObj); - }, - from, - sfMajorities, - sfMajority); -} - -template -void -populateNFTokens(T& to, STObject const& from) -{ - populateProtoArray( - [&to]() { return to.add_nftokens(); }, - [](auto innerObj, auto innerProto) { - populateNFTokenID(innerProto, innerObj); - populateURI(innerProto, innerObj); - }, - from, - sfNFTokens, - sfNFToken); -} - -void -convert(org::xrpl::rpc::v1::TransactionResult& to, TER from) -{ - if (isTecClaim(from)) - { - to.set_result_type( - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TEC); - } - if (isTefFailure(from)) - { - to.set_result_type( - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TEF); - } - if (isTelLocal(from)) - { - to.set_result_type( - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TEL); - } - if (isTemMalformed(from)) - { - to.set_result_type( - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TEM); - } - if (isTerRetry(from)) - { - to.set_result_type( - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TER); - } - if (isTesSuccess(from)) - { - to.set_result_type( - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TES); - } -} - -void -convert(org::xrpl::rpc::v1::AccountSet& to, STObject const& from) -{ - populateClearFlag(to, from); - - populateDomain(to, from); - - populateEmailHash(to, from); - - populateMessageKey(to, from); - - populateNFTokenMinter(to, from); - - populateSetFlag(to, from); - - populateTransferRate(to, from); - - populateTickSize(to, from); -} - -void -convert(org::xrpl::rpc::v1::OfferCreate& to, STObject const& from) -{ - populateExpiration(to, from); - - populateOfferSequence(to, from); - - populateTakerGets(to, from); - - populateTakerPays(to, from); -} - -void -convert(org::xrpl::rpc::v1::OfferCancel& to, STObject const& from) -{ - populateOfferSequence(to, from); -} - -void -convert(org::xrpl::rpc::v1::AccountDelete& to, STObject const& from) -{ - populateDestination(to, from); -} - -void -convert(org::xrpl::rpc::v1::CheckCancel& to, STObject const& from) -{ - populateCheckID(to, from); -} - -void -convert(org::xrpl::rpc::v1::CheckCash& to, STObject const& from) -{ - populateCheckID(to, from); - - populateAmount(to, from); - - populateDeliverMin(to, from); -} - -void -convert(org::xrpl::rpc::v1::CheckCreate& to, STObject const& from) -{ - populateDestination(to, from); - - populateSendMax(to, from); - - populateDestinationTag(to, from); - - populateExpiration(to, from); - - populateInvoiceID(to, from); -} - -void -convert(org::xrpl::rpc::v1::DepositPreauth& to, STObject const& from) -{ - populateAuthorize(to, from); - - populateUnauthorize(to, from); -} - -void -convert(org::xrpl::rpc::v1::EscrowCancel& to, STObject const& from) -{ - populateOwner(to, from); - - populateOfferSequence(to, from); -} - -void -convert(org::xrpl::rpc::v1::EscrowCreate& to, STObject const& from) -{ - populateAmount(to, from); - - populateDestination(to, from); - - populateCancelAfter(to, from); - - populateFinishAfter(to, from); - - populateCondition(to, from); - - populateDestinationTag(to, from); -} - -void -convert(org::xrpl::rpc::v1::EscrowFinish& to, STObject const& from) -{ - populateOwner(to, from); - - populateOfferSequence(to, from); - - populateCondition(to, from); - - populateFulfillment(to, from); -} - -void -convert(org::xrpl::rpc::v1::NFTokenAcceptOffer& to, STObject const& from) -{ - populateNFTokenBrokerFee(to, from); - - populateNFTokenBuyOffer(to, from); - - populateNFTokenSellOffer(to, from); -} - -void -convert(org::xrpl::rpc::v1::NFTokenBurn& to, STObject const& from) -{ - populateOwner(to, from); - - populateNFTokenID(to, from); -} - -void -convert(org::xrpl::rpc::v1::NFTokenCancelOffer& to, STObject const& from) -{ - populateNFTokenOffers(to, from); -} - -void -convert(org::xrpl::rpc::v1::NFTokenCreateOffer& to, STObject const& from) -{ - populateAmount(to, from); - - populateDestination(to, from); - - populateExpiration(to, from); - - populateOwner(to, from); - - populateNFTokenID(to, from); -} - -void -convert(org::xrpl::rpc::v1::NFTokenMint& to, STObject const& from) -{ - populateIssuer(to, from); - - populateNFTokenTaxon(to, from); - - populateTransferFee(to, from); - - populateURI(to, from); -} - -void -convert(org::xrpl::rpc::v1::PaymentChannelClaim& to, STObject const& from) -{ - populateChannel(to, from); - - populateBalance(to, from); - - populateAmount(to, from); - - populatePaymentChannelSignature(to, from); - - populatePublicKey(to, from); -} - -void -convert(org::xrpl::rpc::v1::PaymentChannelCreate& to, STObject const& from) -{ - populateAmount(to, from); - - populateDestination(to, from); - - populateSettleDelay(to, from); - - populatePublicKey(to, from); - - populateCancelAfter(to, from); - - populateDestinationTag(to, from); -} - -void -convert(org::xrpl::rpc::v1::PaymentChannelFund& to, STObject const& from) -{ - populateChannel(to, from); - - populateAmount(to, from); - - populateExpiration(to, from); -} - -void -convert(org::xrpl::rpc::v1::SetRegularKey& to, STObject const& from) -{ - populateRegularKey(to, from); -} - -void -convert(org::xrpl::rpc::v1::SignerListSet& to, STObject const& from) -{ - populateSignerQuorum(to, from); - - populateSignerEntries(to, from); -} - -void -convert(org::xrpl::rpc::v1::TicketCreate& to, STObject const& from) -{ - populateTicketCount(to, from); -} - -void -convert(org::xrpl::rpc::v1::TrustSet& to, STObject const& from) -{ - populateLimitAmount(to, from); - - populateQualityIn(to, from); - - populateQualityOut(to, from); -} - -void -convert(org::xrpl::rpc::v1::Payment& to, STObject const& from) -{ - populateAmount(to, from); - - populateDestination(to, from); - - populateDestinationTag(to, from); - - populateInvoiceID(to, from); - - populateSendMax(to, from); - - populateDeliverMin(to, from); - - if (from.isFieldPresent(sfPaths)) - { - // populate path data - STPathSet const& pathset = from.getFieldPathSet(sfPaths); - for (auto it = pathset.begin(); it < pathset.end(); ++it) - { - STPath const& path = *it; - - org::xrpl::rpc::v1::Payment_Path* protoPath = to.add_paths(); - - for (auto it2 = path.begin(); it2 != path.end(); ++it2) - { - org::xrpl::rpc::v1::Payment_PathElement* protoElement = - protoPath->add_elements(); - STPathElement const& elt = *it2; - - if (elt.isOffer()) - { - if (elt.hasCurrency()) - { - Currency const& currency = elt.getCurrency(); - protoElement->mutable_currency()->set_name( - to_string(currency)); - } - if (elt.hasIssuer()) - { - AccountID const& issuer = elt.getIssuerID(); - protoElement->mutable_issuer()->set_address( - toBase58(issuer)); - } - } - else if (elt.isAccount()) - { - AccountID const& pathAccount = elt.getAccountID(); - protoElement->mutable_account()->set_address( - toBase58(pathAccount)); - } - } - } - } -} - -void -convert(org::xrpl::rpc::v1::AccountRoot& to, STObject const& from) -{ - populateAccount(to, from); - - populateBalance(to, from); - - populateSequence(to, from); - - populateFlags(to, from); - - populateOwnerCount(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); - - populateAccountTransactionID(to, from); - - populateDomain(to, from); - - populateEmailHash(to, from); - - populateMessageKey(to, from); - - populateRegularKey(to, from); - - populateTickSize(to, from); - - populateTransferRate(to, from); - - populateBurnedNFTokens(to, from); - - populateMintedNFTokens(to, from); - - populateNFTokenMinter(to, from); -} - -void -convert(org::xrpl::rpc::v1::Amendments& to, STObject const& from) -{ - populateAmendments(to, from); - - populateMajorities(to, from); -} - -void -convert(org::xrpl::rpc::v1::Check& to, STObject const& from) -{ - populateAccount(to, from); - - populateDestination(to, from); - - populateFlags(to, from); - - populateOwnerNode(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); - - populateSendMax(to, from); - - populateSequence(to, from); - - populateDestinationNode(to, from); - - populateDestinationTag(to, from); - - populateExpiration(to, from); - - populateInvoiceID(to, from); - - populateSourceTag(to, from); -} - -void -convert(org::xrpl::rpc::v1::DepositPreauthObject& to, STObject const& from) -{ - populateAccount(to, from); - - populateAuthorize(to, from); - - populateFlags(to, from); - - populateOwnerNode(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); -} - -void -convert(org::xrpl::rpc::v1::FeeSettings& to, STObject const& from) -{ - populateBaseFee(to, from); - - populateReferenceFeeUnits(to, from); - - populateReserveBase(to, from); - - populateReserveIncrement(to, from); - - populateFlags(to, from); -} - -void -convert(org::xrpl::rpc::v1::Escrow& to, STObject const& from) -{ - populateAccount(to, from); - - populateDestination(to, from); - - populateAmount(to, from); - - populateCondition(to, from); - - populateCancelAfter(to, from); - - populateFinishAfter(to, from); - - populateFlags(to, from); - - populateSourceTag(to, from); - - populateDestinationTag(to, from); - - populateOwnerNode(to, from); - - populateDestinationNode(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); -} - -void -convert(org::xrpl::rpc::v1::LedgerHashes& to, STObject const& from) -{ - populateLastLedgerSequence(to, from); - - populateHashes(to, from); - - populateFlags(to, from); -} - -void -convert(org::xrpl::rpc::v1::PayChannel& to, STObject const& from) -{ - populateAccount(to, from); - - populateAmount(to, from); - - populateBalance(to, from); - - populatePublicKey(to, from); - - populateSettleDelay(to, from); - - populateOwnerNode(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); - - populateFlags(to, from); - - populateExpiration(to, from); - - populateCancelAfter(to, from); - - populateSourceTag(to, from); - - populateDestinationTag(to, from); - - populateDestinationNode(to, from); -} - -void -convert(org::xrpl::rpc::v1::DirectoryNode& to, STObject const& from) -{ - populateFlags(to, from); - - populateRootIndex(to, from); - - populateIndexes(to, from); - - populateIndexNext(to, from); - - populateIndexPrevious(to, from); - - populateTakerPaysIssuer(to, from); - - populateTakerPaysCurrency(to, from); - - populateTakerGetsCurrency(to, from); - - populateTakerGetsIssuer(to, from); - - populateNFTokenID(to, from); -} - -void -convert(org::xrpl::rpc::v1::Offer& to, STObject const& from) -{ - populateAccount(to, from); - - populateSequence(to, from); - - populateFlags(to, from); - - populateTakerPays(to, from); - - populateTakerGets(to, from); - - populateBookDirectory(to, from); - - populateBookNode(to, from); -} - -void -convert(org::xrpl::rpc::v1::RippleState& to, STObject const& from) -{ - populateBalance(to, from); - - populateFlags(to, from); - - populateLowNode(to, from); - - populateHighNode(to, from); - - populateLowQualityIn(to, from); - - populateLowQualityOut(to, from); - - populateHighQualityIn(to, from); - - populateHighQualityOut(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); -} - -void -convert(org::xrpl::rpc::v1::SignerList& to, STObject const& from) -{ - populateFlags(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); - - populateOwnerNode(to, from); - - populateSignerEntries(to, from); - - populateSignerQuorum(to, from); - - populateSignerListID(to, from); -} - -void -convert(org::xrpl::rpc::v1::NegativeUNL& to, STObject const& from) -{ - populateDisabledValidators(to, from); - - populateValidatorToDisable(to, from); - - populateValidatorToReEnable(to, from); - - populateFlags(to, from); -} - -void -convert(org::xrpl::rpc::v1::TicketObject& to, STObject const& from) -{ - populateAccount(to, from); - - populateFlags(to, from); - - populateOwnerNode(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); - - populateTicketSequence(to, from); -} - -void -convert(org::xrpl::rpc::v1::NFTokenOffer& to, STObject const& from) -{ - populateFlags(to, from); - - populateOwner(to, from); - - populateNFTokenID(to, from); - - populateAmount(to, from); - - populateOwnerNode(to, from); - - populateDestination(to, from); - - populateExpiration(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); -} - -void -convert(org::xrpl::rpc::v1::NFTokenPage& to, STObject const& from) -{ - populateFlags(to, from); - - populatePreviousPageMin(to, from); - - populateNextPageMin(to, from); - - populateNFTokens(to, from); - - populatePreviousTransactionID(to, from); - - populatePreviousTransactionLedgerSequence(to, from); -} - -void -setLedgerEntryType( - org::xrpl::rpc::v1::AffectedNode& proto, - std::uint16_t lgrType) -{ - switch (lgrType) - { - case ltACCOUNT_ROOT: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_ACCOUNT_ROOT); - break; - case ltDIR_NODE: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_DIRECTORY_NODE); - break; - case ltRIPPLE_STATE: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_RIPPLE_STATE); - break; - case ltSIGNER_LIST: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_SIGNER_LIST); - break; - case ltOFFER: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_OFFER); - break; - case ltLEDGER_HASHES: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_LEDGER_HASHES); - break; - case ltAMENDMENTS: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_AMENDMENTS); - break; - case ltFEE_SETTINGS: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_FEE_SETTINGS); - break; - case ltESCROW: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_ESCROW); - break; - case ltPAYCHAN: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_PAY_CHANNEL); - break; - case ltCHECK: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_CHECK); - break; - case ltDEPOSIT_PREAUTH: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_DEPOSIT_PREAUTH); - break; - case ltNEGATIVE_UNL: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_NEGATIVE_UNL); - break; - case ltTICKET: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_TICKET); - break; - case ltNFTOKEN_OFFER: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_NFTOKEN_OFFER); - break; - case ltNFTOKEN_PAGE: - proto.set_ledger_entry_type( - org::xrpl::rpc::v1::LEDGER_ENTRY_TYPE_NFTOKEN_PAGE); - break; - } -} - -template -void -convert(T& to, STObject& from, std::uint16_t type) -{ - switch (type) - { - case ltACCOUNT_ROOT: - RPC::convert(*to.mutable_account_root(), from); - break; - case ltAMENDMENTS: - RPC::convert(*to.mutable_amendments(), from); - break; - case ltDIR_NODE: - RPC::convert(*to.mutable_directory_node(), from); - break; - case ltRIPPLE_STATE: - RPC::convert(*to.mutable_ripple_state(), from); - break; - case ltSIGNER_LIST: - RPC::convert(*to.mutable_signer_list(), from); - break; - case ltOFFER: - RPC::convert(*to.mutable_offer(), from); - break; - case ltLEDGER_HASHES: - RPC::convert(*to.mutable_ledger_hashes(), from); - break; - case ltFEE_SETTINGS: - RPC::convert(*to.mutable_fee_settings(), from); - break; - case ltESCROW: - RPC::convert(*to.mutable_escrow(), from); - break; - case ltPAYCHAN: - RPC::convert(*to.mutable_pay_channel(), from); - break; - case ltCHECK: - RPC::convert(*to.mutable_check(), from); - break; - case ltDEPOSIT_PREAUTH: - RPC::convert(*to.mutable_deposit_preauth(), from); - break; - case ltNEGATIVE_UNL: - RPC::convert(*to.mutable_negative_unl(), from); - break; - case ltTICKET: - RPC::convert(*to.mutable_ticket(), from); - break; - case ltNFTOKEN_OFFER: - RPC::convert(*to.mutable_nftoken_offer(), from); - break; - case ltNFTOKEN_PAGE: - RPC::convert(*to.mutable_nftoken_page(), from); - break; - } -} - -template -void -populateFields( - T const& getProto, - STObject& obj, - SField const& field, - uint16_t lgrType) -{ - // final fields - if (obj.isFieldPresent(field)) - { - STObject& data = obj.getField(field).downcast(); - - convert(*getProto(), data, lgrType); - } -} - -template -void -populateFinalFields(T const& getProto, STObject& obj, uint16_t lgrType) -{ - populateFields(getProto, obj, sfFinalFields, lgrType); -} - -template -void -populatePreviousFields(T const& getProto, STObject& obj, uint16_t lgrType) -{ - populateFields(getProto, obj, sfPreviousFields, lgrType); -} - -template -void -populateNewFields(T const& getProto, STObject& obj, uint16_t lgrType) -{ - populateFields(getProto, obj, sfNewFields, lgrType); -} - -void -convert(org::xrpl::rpc::v1::Meta& to, std::shared_ptr const& from) -{ - to.set_transaction_index(from->getIndex()); - - convert(*to.mutable_transaction_result(), from->getResultTER()); - to.mutable_transaction_result()->set_result( - transToken(from->getResultTER())); - - if (from->hasDeliveredAmount()) - convert(*to.mutable_delivered_amount(), from->getDeliveredAmount()); - - STArray& nodes = from->getNodes(); - for (auto it = nodes.begin(); it != nodes.end(); ++it) - { - STObject& obj = *it; - org::xrpl::rpc::v1::AffectedNode* node = to.add_affected_nodes(); - - // ledger index - uint256 ledgerIndex = obj.getFieldH256(sfLedgerIndex); - node->set_ledger_index(ledgerIndex.data(), ledgerIndex.size()); - - // ledger entry type - std::uint16_t lgrType = obj.getFieldU16(sfLedgerEntryType); - setLedgerEntryType(*node, lgrType); - - // modified node - if (obj.getFName() == sfModifiedNode) - { - populateFinalFields( - [&node]() { - return node->mutable_modified_node() - ->mutable_final_fields(); - }, - obj, - lgrType); - - populatePreviousFields( - [&node]() { - return node->mutable_modified_node() - ->mutable_previous_fields(); - }, - obj, - lgrType); - - populatePreviousTransactionID(*node->mutable_modified_node(), obj); - - populatePreviousTransactionLedgerSequence( - *node->mutable_modified_node(), obj); - } - // created node - else if (obj.getFName() == sfCreatedNode) - { - populateNewFields( - [&node]() { - return node->mutable_created_node()->mutable_new_fields(); - }, - obj, - lgrType); - } - // deleted node - else if (obj.getFName() == sfDeletedNode) - { - populateFinalFields( - [&node]() { - return node->mutable_deleted_node()->mutable_final_fields(); - }, - obj, - lgrType); - } - } -} - -void -convert( - org::xrpl::rpc::v1::QueueData& to, - std::vector const& from) -{ - if (!from.empty()) - { - to.set_txn_count(from.size()); - - std::uint32_t seqCount = 0; - std::uint32_t ticketCount = 0; - std::optional lowestSeq; - std::optional highestSeq; - std::optional lowestTicket; - std::optional highestTicket; - bool anyAuthChanged = false; - XRPAmount totalSpend(0); - - for (auto const& tx : from) - { - org::xrpl::rpc::v1::QueuedTransaction& qt = *to.add_transactions(); - - if (tx.seqProxy.isSeq()) - { - qt.mutable_sequence()->set_value(tx.seqProxy.value()); - ++seqCount; - if (!lowestSeq) - lowestSeq = tx.seqProxy.value(); - highestSeq = tx.seqProxy.value(); - } - else - { - qt.mutable_ticket()->set_value(tx.seqProxy.value()); - ++ticketCount; - if (!lowestTicket) - lowestTicket = tx.seqProxy.value(); - highestTicket = tx.seqProxy.value(); - } - - qt.set_fee_level(tx.feeLevel.fee()); - if (tx.lastValid) - qt.mutable_last_ledger_sequence()->set_value(*tx.lastValid); - - qt.mutable_fee()->set_drops(tx.consequences.fee().drops()); - auto const spend = - tx.consequences.potentialSpend() + tx.consequences.fee(); - qt.mutable_max_spend_drops()->set_drops(spend.drops()); - totalSpend += spend; - bool const authChanged = tx.consequences.isBlocker(); - if (authChanged) - anyAuthChanged = true; - qt.set_auth_change(authChanged); - } - - if (seqCount) - to.set_sequence_count(seqCount); - if (ticketCount) - to.set_ticket_count(ticketCount); - if (lowestSeq) - to.set_lowest_sequence(*lowestSeq); - if (highestSeq) - to.set_highest_sequence(*highestSeq); - if (lowestTicket) - to.set_lowest_ticket(*lowestTicket); - if (highestTicket) - to.set_highest_ticket(*highestTicket); - - to.set_auth_change_queued(anyAuthChanged); - to.mutable_max_spend_drops_total()->set_drops(totalSpend.drops()); - } -} - -void -convert( - org::xrpl::rpc::v1::Transaction& to, - std::shared_ptr const& from) -{ - STObject const& fromObj = *from; - - populateAccount(to, fromObj); - - populateFee(to, fromObj); - - populateSequence(to, fromObj); - - populateSigningPublicKey(to, fromObj); - - populateTransactionSignature(to, fromObj); - - populateFlags(to, fromObj); - - populateLastLedgerSequence(to, fromObj); - - populateSourceTag(to, fromObj); - - populateAccountTransactionID(to, fromObj); - - populateMemos(to, fromObj); - - populateSigners(to, fromObj); - - populateTicketSequence(to, fromObj); - - auto type = safe_cast(fromObj.getFieldU16(sfTransactionType)); - - switch (type) - { - case TxType::ttPAYMENT: - convert(*to.mutable_payment(), fromObj); - break; - case TxType::ttESCROW_CREATE: - convert(*to.mutable_escrow_create(), fromObj); - break; - case TxType::ttESCROW_FINISH: - convert(*to.mutable_escrow_finish(), fromObj); - break; - case TxType::ttACCOUNT_SET: - convert(*to.mutable_account_set(), fromObj); - break; - case TxType::ttESCROW_CANCEL: - convert(*to.mutable_escrow_cancel(), fromObj); - break; - case TxType::ttREGULAR_KEY_SET: - convert(*to.mutable_set_regular_key(), fromObj); - break; - case TxType::ttOFFER_CREATE: - convert(*to.mutable_offer_create(), fromObj); - break; - case TxType::ttOFFER_CANCEL: - convert(*to.mutable_offer_cancel(), fromObj); - break; - case TxType::ttSIGNER_LIST_SET: - convert(*to.mutable_signer_list_set(), fromObj); - break; - case TxType::ttPAYCHAN_CREATE: - convert(*to.mutable_payment_channel_create(), fromObj); - break; - case TxType::ttPAYCHAN_FUND: - convert(*to.mutable_payment_channel_fund(), fromObj); - break; - case TxType::ttPAYCHAN_CLAIM: - convert(*to.mutable_payment_channel_claim(), fromObj); - break; - case TxType::ttCHECK_CREATE: - convert(*to.mutable_check_create(), fromObj); - break; - case TxType::ttCHECK_CASH: - convert(*to.mutable_check_cash(), fromObj); - break; - case TxType::ttCHECK_CANCEL: - convert(*to.mutable_check_cancel(), fromObj); - break; - case TxType::ttDEPOSIT_PREAUTH: - convert(*to.mutable_deposit_preauth(), fromObj); - break; - case TxType::ttTRUST_SET: - convert(*to.mutable_trust_set(), fromObj); - break; - case TxType::ttACCOUNT_DELETE: - convert(*to.mutable_account_delete(), fromObj); - break; - case TxType::ttTICKET_CREATE: - convert(*to.mutable_ticket_create(), fromObj); - break; - case TxType::ttNFTOKEN_MINT: - convert(*to.mutable_nftoken_mint(), fromObj); - break; - case TxType::ttNFTOKEN_BURN: - convert(*to.mutable_nftoken_burn(), fromObj); - break; - case TxType::ttNFTOKEN_CREATE_OFFER: - convert(*to.mutable_nftoken_create_offer(), fromObj); - break; - case TxType::ttNFTOKEN_CANCEL_OFFER: - convert(*to.mutable_nftoken_cancel_offer(), fromObj); - break; - case TxType::ttNFTOKEN_ACCEPT_OFFER: - convert(*to.mutable_nftoken_accept_offer(), fromObj); - break; - default: - break; - } -} - -} // namespace RPC -} // namespace ripple diff --git a/src/ripple/rpc/impl/GRPCHelpers.h b/src/ripple/rpc/impl/GRPCHelpers.h deleted file mode 100644 index 80c34f96c29..00000000000 --- a/src/ripple/rpc/impl/GRPCHelpers.h +++ /dev/null @@ -1,90 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_RPC_GRPCHELPERS_H_INCLUDED -#define RIPPLE_RPC_GRPCHELPERS_H_INCLUDED - -#include "org/xrpl/rpc/v1/get_account_info.pb.h" -#include "org/xrpl/rpc/v1/ledger_objects.pb.h" -#include "org/xrpl/rpc/v1/meta.pb.h" -#include "org/xrpl/rpc/v1/transaction.pb.h" - -#include -#include -#include -#include -#include - -#include - -namespace ripple { -namespace RPC { - -void -convert(org::xrpl::rpc::v1::Meta& to, std::shared_ptr const& from); - -void -convert( - org::xrpl::rpc::v1::QueueData& to, - std::vector const& from); - -void -convert( - org::xrpl::rpc::v1::Transaction& to, - std::shared_ptr const& from); - -void -convert(org::xrpl::rpc::v1::TransactionResult& to, TER from); - -void -convert(org::xrpl::rpc::v1::AccountRoot& to, STObject const& from); - -void -convert(org::xrpl::rpc::v1::SignerList& to, STObject const& from); - -void -convert(org::xrpl::rpc::v1::NegativeUNL& to, STObject const& from); - -template -void -convert(T& to, STAmount const& from) -{ - if (from.native()) - { - to.mutable_value()->mutable_xrp_amount()->set_drops(from.xrp().drops()); - } - else - { - Issue const& issue = from.issue(); - - org::xrpl::rpc::v1::IssuedCurrencyAmount* issued = - to.mutable_value()->mutable_issued_currency_amount(); - - issued->mutable_currency()->set_name(to_string(issue.currency)); - issued->mutable_currency()->set_code( - issue.currency.data(), Currency::size()); - issued->mutable_issuer()->set_address(toBase58(issue.account)); - issued->set_value(to_string(from.iou())); - } -} - -} // namespace RPC -} // namespace ripple - -#endif diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 77255a5e6f6..3d1bfe6375b 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -33,7 +33,6 @@ #include #include -#include namespace ripple { namespace RPC { @@ -300,12 +299,6 @@ ledgerFromRequest(T& ledger, GRPCContext& context) return ledgerFromSpecifier(ledger, request.ledger(), context); } -// explicit instantiation of above function -template Status -ledgerFromRequest<>( - std::shared_ptr&, - GRPCContext&); - // explicit instantiation of above function template Status ledgerFromRequest<>( diff --git a/src/test/app/AccountTxPaging_test.cpp b/src/test/app/AccountTxPaging_test.cpp index 332ef213186..d3969e279b7 100644 --- a/src/test/app/AccountTxPaging_test.cpp +++ b/src/test/app/AccountTxPaging_test.cpp @@ -263,1906 +263,11 @@ class AccountTxPaging_test : public beast::unit_test::suite } } - class GrpcAccountTxClient : public test::GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetAccountTransactionHistoryRequest request; - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse reply; - - explicit GrpcAccountTxClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - AccountTx() - { - status = - stub_->GetAccountTransactionHistory(&context, request, &reply); - } - }; - - bool - checkTransaction( - org::xrpl::rpc::v1::GetTransactionResponse const& tx, - int sequence, - int ledger) - { - return ( - tx.transaction().sequence().value() == sequence && - tx.ledger_index() == ledger); - } - - std::pair< - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, - grpc::Status> - nextBinary( - std::string grpcPort, - test::jtx::Env& env, - std::string const& account = "", - int ledger_min = -1, - int ledger_max = -1, - int limit = -1, - bool forward = false, - org::xrpl::rpc::v1::Marker* marker = nullptr) - { - GrpcAccountTxClient client{grpcPort}; - auto& request = client.request; - if (account != "") - request.mutable_account()->set_address(account); - if (ledger_min != -1) - request.mutable_ledger_range()->set_ledger_index_min(ledger_min); - if (ledger_max != -1) - request.mutable_ledger_range()->set_ledger_index_max(ledger_max); - request.set_forward(forward); - request.set_binary(true); - if (limit != -1) - request.set_limit(limit); - if (marker) - { - *request.mutable_marker() = *marker; - } - - client.AccountTx(); - return {client.reply, client.status}; - } - - std::pair< - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, - grpc::Status> - next( - std::string grpcPort, - test::jtx::Env& env, - std::string const& account = "", - int ledger_min = -1, - int ledger_max = -1, - int limit = -1, - bool forward = false, - org::xrpl::rpc::v1::Marker* marker = nullptr) - { - GrpcAccountTxClient client{grpcPort}; - auto& request = client.request; - if (account != "") - request.mutable_account()->set_address(account); - if (ledger_min != -1) - request.mutable_ledger_range()->set_ledger_index_min(ledger_min); - if (ledger_max != -1) - request.mutable_ledger_range()->set_ledger_index_max(ledger_max); - request.set_forward(forward); - if (limit != -1) - request.set_limit(limit); - if (marker) - { - *request.mutable_marker() = *marker; - } - - client.AccountTx(); - return {client.reply, client.status}; - } - - std::pair< - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, - grpc::Status> - nextWithSeq( - std::string grpcPort, - test::jtx::Env& env, - std::string const& account = "", - int ledger_seq = -1, - int limit = -1, - bool forward = false, - org::xrpl::rpc::v1::Marker* marker = nullptr) - { - GrpcAccountTxClient client{grpcPort}; - auto& request = client.request; - if (account != "") - request.mutable_account()->set_address(account); - if (ledger_seq != -1) - request.mutable_ledger_specifier()->set_sequence(ledger_seq); - request.set_forward(forward); - if (limit != -1) - request.set_limit(limit); - if (marker) - { - *request.mutable_marker() = *marker; - } - - client.AccountTx(); - return {client.reply, client.status}; - } - - std::pair< - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse, - grpc::Status> - nextWithHash( - std::string grpcPort, - test::jtx::Env& env, - std::string const& account = "", - uint256 const& hash = beast::zero, - int limit = -1, - bool forward = false, - org::xrpl::rpc::v1::Marker* marker = nullptr) - { - GrpcAccountTxClient client{grpcPort}; - auto& request = client.request; - if (account != "") - request.mutable_account()->set_address(account); - if (hash != beast::zero) - request.mutable_ledger_specifier()->set_hash( - hash.data(), hash.size()); - request.set_forward(forward); - if (limit != -1) - request.set_limit(limit); - if (marker) - { - *request.mutable_marker() = *marker; - } - - client.AccountTx(); - return {client.reply, client.status}; - } - - void - testAccountTxParametersGrpc() - { - testcase("Test Account_tx Grpc"); - - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - - Account A1{"A1"}; - env.fund(XRP(10000), A1); - env.close(); - - // Ledger 3 has the two txs associated with funding the account - // All other ledgers have no txs - - auto hasTxs = [](auto res) { - return res.second.error_code() == 0 && - (res.first.transactions().size() == 2) && - //(res.transactions()[0u].transaction().has_account_set()) && - (res.first.transactions()[1u].transaction().has_payment()); - }; - auto noTxs = [](auto res) { - return res.second.error_code() == 0 && - (res.first.transactions().size() == 0); - }; - - auto isErr = [](auto res, auto expect) { - return res.second.error_code() == expect; - }; - - BEAST_EXPECT( - isErr(next(grpcPort, env, ""), grpc::StatusCode::INVALID_ARGUMENT)); - - BEAST_EXPECT(isErr( - next(grpcPort, env, "0xDEADBEEF"), - grpc::StatusCode::INVALID_ARGUMENT)); - - BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human()))); - - // Ledger min/max index - { - BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human()))); - - BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), 0, 100))); - - BEAST_EXPECT(noTxs(next(grpcPort, env, A1.human(), 1, 2))); - - BEAST_EXPECT(isErr( - next(grpcPort, env, A1.human(), 2, 1), - grpc::StatusCode::INVALID_ARGUMENT)); - } - - // Ledger index min only - { - BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), -1))); - - BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), 1))); - - BEAST_EXPECT(isErr( - next(grpcPort, env, A1.human(), env.current()->info().seq), - grpc::StatusCode::INVALID_ARGUMENT)); - } - - // Ledger index max only - { - BEAST_EXPECT(hasTxs(next(grpcPort, env, A1.human(), -1, -1))); - - BEAST_EXPECT(hasTxs(next( - grpcPort, env, A1.human(), -1, env.current()->info().seq))); - - BEAST_EXPECT(hasTxs( - next(grpcPort, env, A1.human(), -1, env.closed()->info().seq))); - - BEAST_EXPECT(noTxs(next( - grpcPort, env, A1.human(), -1, env.closed()->info().seq - 1))); - } - // Ledger Sequence - { - BEAST_EXPECT(hasTxs(nextWithSeq( - grpcPort, env, A1.human(), env.closed()->info().seq))); - - BEAST_EXPECT(noTxs(nextWithSeq( - grpcPort, env, A1.human(), env.closed()->info().seq - 1))); - - BEAST_EXPECT(isErr( - nextWithSeq( - grpcPort, env, A1.human(), env.current()->info().seq), - grpc::StatusCode::INVALID_ARGUMENT)); - - BEAST_EXPECT(isErr( - nextWithSeq( - grpcPort, env, A1.human(), env.current()->info().seq + 1), - grpc::StatusCode::NOT_FOUND)); - } - - // Ledger Hash - { - BEAST_EXPECT(hasTxs(nextWithHash( - grpcPort, env, A1.human(), env.closed()->info().hash))); - - BEAST_EXPECT(noTxs(nextWithHash( - grpcPort, env, A1.human(), env.closed()->info().parentHash))); - } - } - - struct TxCheck - { - uint32_t sequence; - uint32_t ledgerIndex; - std::string hash; - std::function - checkTxn; - }; - - void - testAccountTxContentsGrpc() - { - testcase("Test AccountTx context grpc"); - // Get results for all transaction types that can be associated - // with an account. Start by generating all transaction types. - using namespace test::jtx; - using namespace std::chrono_literals; - - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - // Set time to this value (or greater) to get delivered_amount in meta - env.timeKeeper().set(NetClock::time_point{446000001s}); - Account const alice{"alice"}; - Account const alie{"alie"}; - Account const gw{"gw"}; - auto const USD{gw["USD"]}; - - std::vector> txns; - - env.fund(XRP(1000000), alice, gw); - env.close(); - - // AccountSet - env(noop(alice)); - - txns.emplace_back(env.tx()); - // Payment - env(pay(alice, gw, XRP(100)), stag(42), dtag(24), last_ledger_seq(20)); - - txns.emplace_back(env.tx()); - // Regular key set - env(regkey(alice, alie)); - env.close(); - - txns.emplace_back(env.tx()); - // Trust and Offers - env(trust(alice, USD(200)), sig(alie)); - - txns.emplace_back(env.tx()); - std::uint32_t const offerSeq{env.seq(alice)}; - env(offer(alice, USD(50), XRP(150)), sig(alie)); - - txns.emplace_back(env.tx()); - env.close(); - - env(offer_cancel(alice, offerSeq), sig(alie)); - env.close(); - - txns.emplace_back(env.tx()); - - // SignerListSet - env(signers(alice, 1, {{"bogie", 1}, {"demon", 1}, {gw, 1}}), - sig(alie)); - - txns.emplace_back(env.tx()); - // Escrow - { - // Create an escrow. Requires either a CancelAfter or FinishAfter. - auto escrow = [](Account const& account, - Account const& to, - STAmount const& amount) { - Json::Value escro; - escro[jss::TransactionType] = jss::EscrowCreate; - escro[jss::Flags] = tfUniversal; - escro[jss::Account] = account.human(); - escro[jss::Destination] = to.human(); - escro[jss::Amount] = amount.getJson(JsonOptions::none); - return escro; - }; - - NetClock::time_point const nextTime{env.now() + 2s}; - - Json::Value escrowWithFinish{escrow(alice, alice, XRP(500))}; - escrowWithFinish[sfFinishAfter.jsonName] = - nextTime.time_since_epoch().count(); - - std::uint32_t const escrowFinishSeq{env.seq(alice)}; - env(escrowWithFinish, sig(alie)); - - txns.emplace_back(env.tx()); - Json::Value escrowWithCancel{escrow(alice, alice, XRP(500))}; - escrowWithCancel[sfFinishAfter.jsonName] = - nextTime.time_since_epoch().count(); - escrowWithCancel[sfCancelAfter.jsonName] = - nextTime.time_since_epoch().count() + 1; - - std::uint32_t const escrowCancelSeq{env.seq(alice)}; - env(escrowWithCancel, sig(alie)); - env.close(); - - txns.emplace_back(env.tx()); - { - Json::Value escrowFinish; - escrowFinish[jss::TransactionType] = jss::EscrowFinish; - escrowFinish[jss::Flags] = tfUniversal; - escrowFinish[jss::Account] = alice.human(); - escrowFinish[sfOwner.jsonName] = alice.human(); - escrowFinish[sfOfferSequence.jsonName] = escrowFinishSeq; - env(escrowFinish, sig(alie)); - - txns.emplace_back(env.tx()); - } - { - Json::Value escrowCancel; - escrowCancel[jss::TransactionType] = jss::EscrowCancel; - escrowCancel[jss::Flags] = tfUniversal; - escrowCancel[jss::Account] = alice.human(); - escrowCancel[sfOwner.jsonName] = alice.human(); - escrowCancel[sfOfferSequence.jsonName] = escrowCancelSeq; - env(escrowCancel, sig(alie)); - - txns.emplace_back(env.tx()); - } - env.close(); - } - - // PayChan - { - std::uint32_t payChanSeq{env.seq(alice)}; - Json::Value payChanCreate; - payChanCreate[jss::TransactionType] = jss::PaymentChannelCreate; - payChanCreate[jss::Flags] = tfUniversal; - payChanCreate[jss::Account] = alice.human(); - payChanCreate[jss::Destination] = gw.human(); - payChanCreate[jss::Amount] = - XRP(500).value().getJson(JsonOptions::none); - payChanCreate[sfSettleDelay.jsonName] = - NetClock::duration{100s}.count(); - payChanCreate[sfPublicKey.jsonName] = strHex(alice.pk().slice()); - env(payChanCreate, sig(alie)); - env.close(); - - txns.emplace_back(env.tx()); - std::string const payChanIndex{ - strHex(keylet::payChan(alice, gw, payChanSeq).key)}; - - { - Json::Value payChanFund; - payChanFund[jss::TransactionType] = jss::PaymentChannelFund; - payChanFund[jss::Flags] = tfUniversal; - payChanFund[jss::Account] = alice.human(); - payChanFund[sfChannel.jsonName] = payChanIndex; - payChanFund[jss::Amount] = - XRP(200).value().getJson(JsonOptions::none); - env(payChanFund, sig(alie)); - env.close(); - - txns.emplace_back(env.tx()); - } - { - Json::Value payChanClaim; - payChanClaim[jss::TransactionType] = jss::PaymentChannelClaim; - payChanClaim[jss::Flags] = tfClose; - payChanClaim[jss::Account] = gw.human(); - payChanClaim[sfChannel.jsonName] = payChanIndex; - payChanClaim[sfPublicKey.jsonName] = strHex(alice.pk().slice()); - env(payChanClaim); - env.close(); - - txns.emplace_back(env.tx()); - } - } - - // Check - { - auto const aliceCheckId = keylet::check(alice, env.seq(alice)).key; - env(check::create(alice, gw, XRP(300)), sig(alie)); - - auto txn = env.tx(); - auto const gwCheckId = keylet::check(gw, env.seq(gw)).key; - env(check::create(gw, alice, XRP(200))); - env.close(); - - // need to switch the order of the previous 2 txns, since they are - // in the same ledger and account_tx returns them in a different - // order - txns.emplace_back(env.tx()); - txns.emplace_back(txn); - env(check::cash(alice, gwCheckId, XRP(200)), sig(alie)); - - txns.emplace_back(env.tx()); - env(check::cancel(alice, aliceCheckId), sig(alie)); - - txns.emplace_back(env.tx()); - env.close(); - } - - // Deposit preauthorization. - env(deposit::auth(alice, gw), sig(alie)); - env.close(); - - txns.emplace_back(env.tx()); - // Multi Sig with memo - auto const baseFee = env.current()->fees().base; - env(noop(alice), - msig(gw), - fee(2 * baseFee), - memo("data", "format", "type")); - env.close(); - - txns.emplace_back(env.tx()); - if (!BEAST_EXPECT(txns.size() == 20)) - return; - // Setup is done. Look at the transactions returned by account_tx. - - static const TxCheck txCheck[]{ - {21, - 15, - strHex(txns[txns.size() - 1]->getTransactionID()), - [this, &txns](auto res) { - auto txnJson = - txns[txns.size() - 1]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_account_set()) && - BEAST_EXPECT(res.has_fee()) && - BEAST_EXPECT(res.fee().drops() == 20) && - BEAST_EXPECT(res.memos_size() == 1) && - BEAST_EXPECT(res.memos(0).has_memo_data()) && - BEAST_EXPECT(res.memos(0).memo_data().value() == "data") && - BEAST_EXPECT(res.memos(0).has_memo_format()) && - BEAST_EXPECT( - res.memos(0).memo_format().value() == "format") && - BEAST_EXPECT(res.memos(0).has_memo_type()) && - BEAST_EXPECT(res.memos(0).memo_type().value() == "type") && - BEAST_EXPECT(res.has_signing_public_key()) && - BEAST_EXPECT(res.signing_public_key().value() == "") && - BEAST_EXPECT(res.signers_size() == 1) && - BEAST_EXPECT(res.signers(0).has_account()) && - BEAST_EXPECT( - res.signers(0).account().value().address() == - txnJson["Signers"][0u]["Signer"]["Account"]) && - BEAST_EXPECT(res.signers(0).has_transaction_signature()) && - BEAST_EXPECT( - strHex(res.signers(0) - .transaction_signature() - .value()) == - txnJson["Signers"][0u]["Signer"]["TxnSignature"]) && - BEAST_EXPECT(res.signers(0).has_signing_public_key()) && - BEAST_EXPECT( - strHex( - res.signers(0).signing_public_key().value()) == - txnJson["Signers"][0u]["Signer"]["SigningPubKey"]); - }}, - {20, - 14, - strHex(txns[txns.size() - 2]->getTransactionID()), - [&txns, this](auto res) { - return BEAST_EXPECT(res.has_deposit_preauth()) && - BEAST_EXPECT( - res.deposit_preauth() - .authorize() - .value() - .address() == - // TODO do them all like this - txns[txns.size() - 2]->getJson( - JsonOptions::none)["Authorize"]); - }}, - {19, - 13, - strHex(txns[txns.size() - 3]->getTransactionID()), - [&txns, this](auto res) { - return BEAST_EXPECT(res.has_check_cancel()) && - BEAST_EXPECT( - strHex(res.check_cancel().check_id().value()) == - - txns[txns.size() - 3]->getJson( - JsonOptions::none)["CheckID"]); - }}, - {18, - 13, - strHex(txns[txns.size() - 4]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 4]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_check_cash()) && - BEAST_EXPECT( - strHex(res.check_cash().check_id().value()) == - txnJson["CheckID"]) && - BEAST_EXPECT(res.check_cash() - .amount() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.check_cash() - .amount() - .value() - .xrp_amount() - .drops() == txnJson["Amount"].asUInt()); - }}, - {17, - 12, - strHex(txns[txns.size() - 5]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 5]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_check_create()) && - BEAST_EXPECT( - res.check_create() - .destination() - .value() - .address() == txnJson["Destination"]) && - BEAST_EXPECT(res.check_create() - .send_max() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.check_create() - .send_max() - .value() - .xrp_amount() - .drops() == txnJson["SendMax"].asUInt()); - }}, - {5, - 12, - strHex(txns[txns.size() - 6]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 6]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_check_create()) && - BEAST_EXPECT( - res.check_create() - .destination() - .value() - .address() == txnJson["Destination"]) && - BEAST_EXPECT(res.check_create() - .send_max() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.check_create() - .send_max() - .value() - .xrp_amount() - .drops() == - - txnJson["SendMax"].asUInt()); - }}, - {4, - 11, - strHex(txns[txns.size() - 7]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 7]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_payment_channel_claim()) && - BEAST_EXPECT( - strHex(res.payment_channel_claim() - .channel() - .value()) == txnJson["Channel"]) && - BEAST_EXPECT( - strHex(res.payment_channel_claim() - .public_key() - .value()) == txnJson["PublicKey"]); - }}, - {16, - 10, - strHex(txns[txns.size() - 8]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 8]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_payment_channel_fund()) && - BEAST_EXPECT( - strHex( - res.payment_channel_fund().channel().value()) == - txnJson["Channel"]) && - BEAST_EXPECT(res.payment_channel_fund() - .amount() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.payment_channel_fund() - .amount() - .value() - .xrp_amount() - .drops() == txnJson["Amount"].asUInt()); - }}, - {15, - 9, - strHex(txns[txns.size() - 9]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 9]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_payment_channel_create()) && - BEAST_EXPECT(res.payment_channel_create() - .amount() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.payment_channel_create() - .amount() - .value() - .xrp_amount() - .drops() == txnJson["Amount"].asUInt()) && - BEAST_EXPECT( - res.payment_channel_create() - .destination() - .value() - .address() == txnJson["Destination"]) && - BEAST_EXPECT( - res.payment_channel_create() - .settle_delay() - .value() == txnJson["SettleDelay"].asUInt()) && - BEAST_EXPECT( - strHex(res.payment_channel_create() - .public_key() - .value()) == txnJson["PublicKey"]); - }}, - {14, - 8, - strHex(txns[txns.size() - 10]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 10]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_escrow_cancel()) && - BEAST_EXPECT( - res.escrow_cancel().owner().value().address() == - txnJson["Owner"]) && - BEAST_EXPECT( - res.escrow_cancel().offer_sequence().value() == - txnJson["OfferSequence"].asUInt() - - ); - }}, - {13, - 8, - strHex(txns[txns.size() - 11]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 11]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_escrow_finish()) && - BEAST_EXPECT( - res.escrow_finish().owner().value().address() == - txnJson["Owner"]) && - BEAST_EXPECT( - res.escrow_finish().offer_sequence().value() == - txnJson["OfferSequence"].asUInt() - - ); - }}, - {12, - 7, - strHex(txns[txns.size() - 12]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 12]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_escrow_create()) && - BEAST_EXPECT(res.escrow_create() - .amount() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.escrow_create() - .amount() - .value() - .xrp_amount() - .drops() == txnJson["Amount"].asUInt()) && - BEAST_EXPECT( - res.escrow_create() - .destination() - .value() - .address() == txnJson["Destination"]) && - BEAST_EXPECT( - res.escrow_create().cancel_after().value() == - txnJson["CancelAfter"].asUInt()) && - BEAST_EXPECT( - res.escrow_create().finish_after().value() == - txnJson["FinishAfter"].asUInt()); - }}, - {11, - 7, - strHex(txns[txns.size() - 13]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 13]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_escrow_create()) && - BEAST_EXPECT(res.escrow_create() - .amount() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.escrow_create() - .amount() - .value() - .xrp_amount() - .drops() == txnJson["Amount"].asUInt()) && - BEAST_EXPECT( - res.escrow_create() - .destination() - .value() - .address() == txnJson["Destination"]) && - BEAST_EXPECT( - res.escrow_create().finish_after().value() == - txnJson["FinishAfter"].asUInt()); - }}, - {10, - 7, - strHex(txns[txns.size() - 14]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 14]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_signer_list_set()) && - BEAST_EXPECT( - res.signer_list_set().signer_quorum().value() == - txnJson["SignerQuorum"].asUInt()) && - BEAST_EXPECT( - res.signer_list_set().signer_entries().size() == - 3) && - BEAST_EXPECT( - res.signer_list_set() - .signer_entries()[0] - .account() - .value() - .address() == - txnJson["SignerEntries"][0u]["SignerEntry"] - ["Account"]) && - BEAST_EXPECT( - res.signer_list_set() - .signer_entries()[0] - .signer_weight() - .value() == - txnJson["SignerEntries"][0u]["SignerEntry"] - ["SignerWeight"] - .asUInt()) && - BEAST_EXPECT( - res.signer_list_set() - .signer_entries()[1] - .account() - .value() - .address() == - txnJson["SignerEntries"][1u]["SignerEntry"] - ["Account"]) && - BEAST_EXPECT( - res.signer_list_set() - .signer_entries()[1] - .signer_weight() - .value() == - txnJson["SignerEntries"][1u]["SignerEntry"] - ["SignerWeight"] - .asUInt()) && - BEAST_EXPECT( - res.signer_list_set() - .signer_entries()[2] - .account() - .value() - .address() == - txnJson["SignerEntries"][2u]["SignerEntry"] - ["Account"]) && - BEAST_EXPECT( - res.signer_list_set() - .signer_entries()[2] - .signer_weight() - .value() == - txnJson["SignerEntries"][2u]["SignerEntry"] - ["SignerWeight"] - .asUInt()); - }}, - {9, - 6, - strHex(txns[txns.size() - 15]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 15]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_offer_cancel()) && - BEAST_EXPECT( - res.offer_cancel().offer_sequence().value() == - txnJson["OfferSequence"].asUInt()); - }}, - {8, - 5, - strHex(txns[txns.size() - 16]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 16]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_offer_create()) && - BEAST_EXPECT(res.offer_create() - .taker_gets() - .value() - .has_xrp_amount()) && - BEAST_EXPECT( - res.offer_create() - .taker_gets() - .value() - .xrp_amount() - .drops() == txnJson["TakerGets"].asUInt()) && - BEAST_EXPECT(res.offer_create() - .taker_pays() - .value() - .has_issued_currency_amount()) && - BEAST_EXPECT( - res.offer_create() - .taker_pays() - .value() - .issued_currency_amount() - .currency() - .name() == txnJson["TakerPays"]["currency"]) && - BEAST_EXPECT( - res.offer_create() - .taker_pays() - .value() - .issued_currency_amount() - .value() == txnJson["TakerPays"]["value"]) && - BEAST_EXPECT( - res.offer_create() - .taker_pays() - .value() - .issued_currency_amount() - .issuer() - .address() == txnJson["TakerPays"]["issuer"]); - }}, - {7, - 5, - strHex(txns[txns.size() - 17]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 17]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_trust_set()) && - BEAST_EXPECT(res.trust_set() - .limit_amount() - .value() - .has_issued_currency_amount()) && - BEAST_EXPECT( - res.trust_set() - .limit_amount() - .value() - .issued_currency_amount() - .currency() - .name() == - txnJson["LimitAmount"]["currency"]) && - BEAST_EXPECT( - res.trust_set() - .limit_amount() - .value() - .issued_currency_amount() - .value() == txnJson["LimitAmount"]["value"]) && - BEAST_EXPECT( - res.trust_set() - .limit_amount() - .value() - .issued_currency_amount() - .issuer() - .address() == txnJson["LimitAmount"]["issuer"]); - }}, - {6, - 4, - strHex(txns[txns.size() - 18]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 18]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_set_regular_key()) && - BEAST_EXPECT( - res.set_regular_key() - .regular_key() - .value() - .address() == txnJson["RegularKey"]); - }}, - {5, - 4, - strHex(txns[txns.size() - 19]->getTransactionID()), - [&txns, this](auto res) { - auto txnJson = - txns[txns.size() - 19]->getJson(JsonOptions::none); - return BEAST_EXPECT(res.has_payment()) && - BEAST_EXPECT( - res.payment().amount().value().has_xrp_amount()) && - BEAST_EXPECT( - res.payment() - .amount() - .value() - .xrp_amount() - .drops() == txnJson["Amount"].asUInt()) && - BEAST_EXPECT( - res.payment().destination().value().address() == - txnJson["Destination"]) && - BEAST_EXPECT(res.has_source_tag()) && - BEAST_EXPECT( - res.source_tag().value() == - txnJson["SourceTag"].asUInt()) && - BEAST_EXPECT(res.payment().has_destination_tag()) && - BEAST_EXPECT( - res.payment().destination_tag().value() == - txnJson["DestinationTag"].asUInt()) && - BEAST_EXPECT(res.has_last_ledger_sequence()) && - BEAST_EXPECT( - res.last_ledger_sequence().value() == - txnJson["LastLedgerSequence"].asUInt()) && - BEAST_EXPECT(res.has_transaction_signature()) && - BEAST_EXPECT(res.has_account()) && - BEAST_EXPECT( - res.account().value().address() == - txnJson["Account"]) && - BEAST_EXPECT(res.has_flags()) && - BEAST_EXPECT( - res.flags().value() == txnJson["Flags"].asUInt()); - }}, - {4, - 4, - strHex(txns[txns.size() - 20]->getTransactionID()), - [this](auto res) { return BEAST_EXPECT(res.has_account_set()); }}, - {3, - 3, - "9CE54C3B934E473A995B477E92EC229F99CED5B62BF4D2ACE4DC42719103AE2F", - [this](auto res) { - return BEAST_EXPECT(res.has_account_set()) && - BEAST_EXPECT(res.account_set().set_flag().value() == 8); - }}, - {1, - 3, - "2B5054734FA43C6C7B54F61944FAD6178ACD5D0272B39BA7FCD32A5D3932FBFF", - [&alice, this](auto res) { - return BEAST_EXPECT(res.has_payment()) && - BEAST_EXPECT( - res.payment().amount().value().has_xrp_amount()) && - BEAST_EXPECT( - res.payment() - .amount() - .value() - .xrp_amount() - .drops() == 1000000000010) && - BEAST_EXPECT( - res.payment().destination().value().address() == - alice.human()); - }}}; - - using MetaCheck = - std::function; - static const MetaCheck txMetaCheck[]{ - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](org::xrpl::rpc::v1::AffectedNode const& - entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 3) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DEPOSIT_PREAUTH; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 1) && - BEAST_EXPECT(meta.affected_nodes_size() == 5) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_CHECK; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 5) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_CHECK; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 1) && - BEAST_EXPECT(meta.affected_nodes_size() == 5) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_CHECK; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 5) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_CHECK; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 5) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_PAY_CHANNEL; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 2) && - - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_PAY_CHANNEL; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 5) && - - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_PAY_CHANNEL; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 1) && - BEAST_EXPECT(meta.affected_nodes_size() == 3) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ESCROW; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 3) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ESCROW; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 2) && - BEAST_EXPECT(meta.affected_nodes_size() == 3) && - - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ESCROW; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 1) && - BEAST_EXPECT(meta.affected_nodes_size() == 3) && - - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ESCROW; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 3) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_SIGNER_LIST; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 4) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_OFFER; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 1) && - BEAST_EXPECT(meta.affected_nodes_size() == 4) && - - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_OFFER; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 5) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_DIRECTORY_NODE; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_RIPPLE_STATE; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 2) && - BEAST_EXPECT(meta.affected_nodes_size() == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 1) && - BEAST_EXPECT(meta.affected_nodes_size() == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 2) && - BEAST_EXPECT(meta.affected_nodes_size() == 1) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 1); - }}, - {[this](auto meta) { - return BEAST_EXPECT(meta.transaction_index() == 0) && - BEAST_EXPECT(meta.affected_nodes_size() == 2) && - BEAST_EXPECT( - std::count_if( - meta.affected_nodes().begin(), - meta.affected_nodes().end(), - [](auto entry) { - return entry.ledger_entry_type() == - org::xrpl::rpc::v1::LedgerEntryType:: - LEDGER_ENTRY_TYPE_ACCOUNT_ROOT; - }) == 2); - }}}; - - auto doCheck = [this](auto txn, auto txCheck) { - return BEAST_EXPECT(txn.has_transaction()) && - BEAST_EXPECT(txn.validated()) && - BEAST_EXPECT(strHex(txn.hash()) == txCheck.hash) && - BEAST_EXPECT(txn.ledger_index() == txCheck.ledgerIndex) && - BEAST_EXPECT( - txn.transaction().sequence().value() == - txCheck.sequence) && - txCheck.checkTxn(txn.transaction()); - }; - - auto doMetaCheck = [this](auto txn, auto txMetaCheck) { - return BEAST_EXPECT(txn.has_meta()) && - BEAST_EXPECT(txn.meta().has_transaction_result()) && - BEAST_EXPECT( - txn.meta().transaction_result().result_type() == - org::xrpl::rpc::v1::TransactionResult:: - RESULT_TYPE_TES) && - BEAST_EXPECT( - txn.meta().transaction_result().result() == - "tesSUCCESS") && - txMetaCheck(txn.meta()); - }; - - auto [res, status] = next(grpcPort, env, alice.human()); - - if (!BEAST_EXPECT(status.error_code() == 0)) - return; - - if (!BEAST_EXPECT(res.transactions().size() == std::size(txCheck))) - return; - for (int i = 0; i < res.transactions().size(); ++i) - { - BEAST_EXPECT(doCheck(res.transactions()[i], txCheck[i])); - BEAST_EXPECT(doMetaCheck(res.transactions()[i], txMetaCheck[i])); - } - - // test binary representation - std::tie(res, status) = nextBinary(grpcPort, env, alice.human()); - - // txns vector does not contain the first two transactions returned by - // account_tx - if (!BEAST_EXPECT(res.transactions().size() == txns.size() + 2)) - return; - - std::reverse(txns.begin(), txns.end()); - for (int i = 0; i < txns.size(); ++i) - { - auto toByteString = [](auto data) { - const char* bytes = reinterpret_cast(data.data()); - return std::string(bytes, data.size()); - }; - - auto tx = txns[i]; - Serializer s = tx->getSerializer(); - std::string bin = toByteString(s); - - BEAST_EXPECT(res.transactions(i).transaction_binary() == bin); - } - } - - void - testAccountTxPagingGrpc() - { - testcase("Test Account_tx Grpc"); - - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - - Account A1{"A1"}; - Account A2{"A2"}; - Account A3{"A3"}; - - env.fund(XRP(10000), A1, A2, A3); - env.close(); - - env.trust(A3["USD"](1000), A1); - env.trust(A2["USD"](1000), A1); - env.trust(A3["USD"](1000), A2); - env.close(); - - for (auto i = 0; i < 5; ++i) - { - env(pay(A2, A1, A2["USD"](2))); - env(pay(A3, A1, A3["USD"](2))); - env(offer(A1, XRP(11), A1["USD"](1))); - env(offer(A2, XRP(10), A2["USD"](1))); - env(offer(A3, XRP(9), A3["USD"](1))); - env.close(); - } - - /* The sequence/ledger for A3 are as follows: - * seq ledger_index - * 3 ----> 3 - * 1 ----> 3 - * 2 ----> 4 - * 2 ----> 4 - * 2 ----> 5 - * 3 ----> 5 - * 4 ----> 6 - * 5 ----> 6 - * 6 ----> 7 - * 7 ----> 7 - * 8 ----> 8 - * 9 ----> 8 - * 10 ----> 9 - * 11 ----> 9 - */ - - // page through the results in several ways. - { - // limit = 2, 3 batches giving the first 6 txs - auto [res, status] = next(grpcPort, env, A3.human(), 2, 5, 2, true); - - auto txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 2)) - return; - - BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); - BEAST_EXPECT(checkTransaction(txs[1u], 3, 3)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, env, A3.human(), 2, 5, 2, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 2)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); - BEAST_EXPECT(checkTransaction(txs[1u], 4, 4)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, env, A3.human(), 2, 5, 2, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 2)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 4, 5)); - BEAST_EXPECT(checkTransaction(txs[1u], 5, 5)); - BEAST_EXPECT(!res.has_marker()); - return; - } - - { - // limit 1, 3 requests giving the first 3 txs - auto [res, status] = next(grpcPort, env, A3.human(), 3, 9, 1, true); - auto txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 1)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, env, A3.human(), 3, 9, 1, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 1)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, env, A3.human(), 3, 9, 1, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 1)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - // continue with limit 3, to end of all txs - std::tie(res, status) = next( - grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 3)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); - BEAST_EXPECT(checkTransaction(txs[1u], 4, 5)); - BEAST_EXPECT(checkTransaction(txs[2u], 5, 5)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 3)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 6, 6)); - BEAST_EXPECT(checkTransaction(txs[1u], 7, 6)); - BEAST_EXPECT(checkTransaction(txs[2u], 8, 7)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 3)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 9, 7)); - BEAST_EXPECT(checkTransaction(txs[1u], 10, 8)); - BEAST_EXPECT(checkTransaction(txs[2u], 11, 8)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, env, A3.human(), 3, 9, 3, true, res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 2)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 12, 9)); - BEAST_EXPECT(checkTransaction(txs[1u], 13, 9)); - BEAST_EXPECT(!res.has_marker()); - } - - { - // limit 2, descending, 2 batches giving last 4 txs - auto [res, status] = - next(grpcPort, env, A3.human(), 3, 9, 2, false); - auto txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 2)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 13, 9)); - BEAST_EXPECT(checkTransaction(txs[1u], 12, 9)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, - env, - A3.human(), - 3, - 9, - 2, - false, - res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 2)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 11, 8)); - BEAST_EXPECT(checkTransaction(txs[1u], 10, 8)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - // continue with limit 3 until all txs have been seen - std::tie(res, status) = next( - grpcPort, - env, - A3.human(), - 3, - 9, - 3, - false, - res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 3)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 9, 7)); - BEAST_EXPECT(checkTransaction(txs[1u], 8, 7)); - BEAST_EXPECT(checkTransaction(txs[2u], 7, 6)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, - env, - A3.human(), - 3, - 9, - 3, - false, - res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 3)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 6, 6)); - BEAST_EXPECT(checkTransaction(txs[1u], 5, 5)); - BEAST_EXPECT(checkTransaction(txs[2u], 4, 5)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, - env, - A3.human(), - 3, - 9, - 3, - false, - res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 3)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 4, 4)); - BEAST_EXPECT(checkTransaction(txs[1u], 4, 4)); - BEAST_EXPECT(checkTransaction(txs[2u], 3, 3)); - if (!BEAST_EXPECT(res.has_marker())) - return; - - std::tie(res, status) = next( - grpcPort, - env, - A3.human(), - 3, - 9, - 3, - false, - res.mutable_marker()); - txs = res.transactions(); - if (!BEAST_EXPECT(txs.size() == 1)) - return; - BEAST_EXPECT(checkTransaction(txs[0u], 3, 3)); - BEAST_EXPECT(!res.has_marker()); - } - } - public: void run() override { testAccountTxPaging(); - testAccountTxPagingGrpc(); - testAccountTxParametersGrpc(); - testAccountTxContentsGrpc(); } }; diff --git a/src/test/consensus/NegativeUNL_test.cpp b/src/test/consensus/NegativeUNL_test.cpp index 39028afff81..fee790281a0 100644 --- a/src/test/consensus/NegativeUNL_test.cpp +++ b/src/test/consensus/NegativeUNL_test.cpp @@ -25,7 +25,6 @@ #include #include #include -#include #include namespace ripple { @@ -1883,114 +1882,6 @@ class NegativeUNLVoteFilterValidations_test : public beast::unit_test::suite } }; -class NegativeUNLgRPC_test : public beast::unit_test::suite -{ - template - std::string - toByteString(T const& data) - { - const char* bytes = reinterpret_cast(data.data()); - return {bytes, data.size()}; - } - - void - testGRPC() - { - testcase("gRPC test"); - - auto gRpcTest = [this]( - std::uint32_t negUnlSize, - bool hasToDisable, - bool hasToReEnable) -> bool { - NetworkHistory history = { - *this, {20, negUnlSize, hasToDisable, hasToReEnable, {}}}; - if (!history.goodHistory) - return false; - - auto const& negUnlObject = - history.lastLedger()->read(keylet::negativeUNL()); - if (!negUnlSize && !hasToDisable && !hasToReEnable && !negUnlObject) - return true; - if (!negUnlObject) - return false; - - org::xrpl::rpc::v1::NegativeUNL to; - ripple::RPC::convert(to, *negUnlObject); - if (!to.has_flags() || - to.flags().value() != negUnlObject->getFlags()) - return false; - - bool goodSize = to.disabled_validators_size() == negUnlSize && - to.has_validator_to_disable() == hasToDisable && - to.has_validator_to_re_enable() == hasToReEnable; - if (!goodSize) - return false; - - if (negUnlSize) - { - if (!negUnlObject->isFieldPresent(sfDisabledValidators)) - return false; - auto const& nUnlData = - negUnlObject->getFieldArray(sfDisabledValidators); - if (nUnlData.size() != negUnlSize) - return false; - int idx = 0; - for (auto const& n : nUnlData) - { - if (!n.isFieldPresent(sfPublicKey) || - !n.isFieldPresent(sfFirstLedgerSequence)) - return false; - - if (!to.disabled_validators(idx).has_ledger_sequence() || - !to.disabled_validators(idx).has_public_key()) - return false; - - if (to.disabled_validators(idx).public_key().value() != - toByteString(n.getFieldVL(sfPublicKey))) - return false; - - if (to.disabled_validators(idx).ledger_sequence().value() != - n.getFieldU32(sfFirstLedgerSequence)) - return false; - - ++idx; - } - } - - if (hasToDisable) - { - if (!negUnlObject->isFieldPresent(sfValidatorToDisable)) - return false; - if (to.validator_to_disable().value() != - toByteString( - negUnlObject->getFieldVL(sfValidatorToDisable))) - return false; - } - - if (hasToReEnable) - { - if (!negUnlObject->isFieldPresent(sfValidatorToReEnable)) - return false; - if (to.validator_to_re_enable().value() != - toByteString( - negUnlObject->getFieldVL(sfValidatorToReEnable))) - return false; - } - - return true; - }; - - BEAST_EXPECT(gRpcTest(0, false, false)); - BEAST_EXPECT(gRpcTest(2, true, true)); - } - - void - run() override - { - testGRPC(); - } -}; - BEAST_DEFINE_TESTSUITE(NegativeUNL, ledger, ripple); BEAST_DEFINE_TESTSUITE(NegativeUNLNoAmendment, ledger, ripple); @@ -2006,7 +1897,6 @@ BEAST_DEFINE_TESTSUITE_PRIO( 1); BEAST_DEFINE_TESTSUITE(NegativeUNLVoteNewValidator, consensus, ripple); BEAST_DEFINE_TESTSUITE(NegativeUNLVoteFilterValidations, consensus, ripple); -BEAST_DEFINE_TESTSUITE(NegativeUNLgRPC, ledger, ripple); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// diff --git a/src/test/protocol/KnownFormatToGRPC_test.cpp b/src/test/protocol/KnownFormatToGRPC_test.cpp deleted file mode 100644 index bf49f2e3134..00000000000 --- a/src/test/protocol/KnownFormatToGRPC_test.cpp +++ /dev/null @@ -1,975 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include - -#include "org/xrpl/rpc/v1/ledger_objects.pb.h" -#include "org/xrpl/rpc/v1/transaction.pb.h" - -#include -#include -#include -#include - -namespace ripple { - -// This test suite uses the google::protobuf::Descriptor class to do runtime -// reflection on our gRPC stuff. At the time of this writing documentation -// for Descriptor could be found here: -// -// https://developers.google.com/protocol-buffers/docs/reference/cpp/google.protobuf.descriptor#Descriptor - -class KnownFormatToGRPC_test : public beast::unit_test::suite -{ -private: - static constexpr auto fieldTYPE_UINT32 = - google::protobuf::FieldDescriptor::Type::TYPE_UINT32; - - static constexpr auto fieldTYPE_UINT64 = - google::protobuf::FieldDescriptor::Type::TYPE_UINT64; - - static constexpr auto fieldTYPE_BYTES = - google::protobuf::FieldDescriptor::Type::TYPE_BYTES; - - static constexpr auto fieldTYPE_STRING = - google::protobuf::FieldDescriptor::Type::TYPE_STRING; - - static constexpr auto fieldTYPE_MESSAGE = - google::protobuf::FieldDescriptor::Type::TYPE_MESSAGE; - - // Format names are CamelCase and FieldDescriptor names are snake_case. - // Convert from CamelCase to snake_case. Do not be fooled by consecutive - // capital letters like in NegativeUNL. - static std::string - formatNameToEntryTypeName(std::string const& fmtName) - { - std::string entryName; - entryName.reserve(fmtName.size()); - bool prevUpper = false; - for (std::size_t i = 0; i < fmtName.size(); i++) - { - char const ch = fmtName[i]; - bool const upper = std::isupper(ch); - if (i > 0 && !prevUpper && upper) - entryName.push_back('_'); - - prevUpper = upper; - entryName.push_back(std::tolower(ch)); - } - return entryName; - }; - - // Create a map of (most) all the SFields in an SOTemplate. This map - // can be used to correlate a gRPC Descriptor to its corresponding SField. - template - static std::map - soTemplateToSFields( - SOTemplate const& soTemplate, - [[maybe_unused]] KeyType fmtId) - { - std::map sFields; - for (SOElement const& element : soTemplate) - { - SField const& sField = element.sField(); - - // Fields that gRPC never includes. - // - // o sfLedgerIndex and - // o sfLedgerEntryType are common to all ledger objects, so - // gRPC includes them at a higher level than the ledger - // object itself. - // - // o sfOperationLimit is an optional field in all transactions, - // but no one knows what it was intended for. - using FieldCode_t = - std::remove_const::type; - static const std::set excludedSFields{ - sfLedgerIndex.fieldCode, - sfLedgerEntryType.fieldCode, - sfOperationLimit.fieldCode}; - - if (excludedSFields.count(sField.fieldCode)) - continue; - - // There are certain fields that gRPC never represents in - // transactions. Exclude those. - // - // o sfPreviousTxnID is obsolete and was replaced by - // sfAccountTxnID some time before November of 2014. - // - // o sfWalletLocator and - // o sfWalletSize have been deprecated for six years or more. - // - // o sfTransactionType is not needed by gRPC, since the typing - // is handled using protobuf message types. - if constexpr (std::is_same_v) - { - static const std::set excludedTxFields{ - sfPreviousTxnID.fieldCode, - sfTransactionType.fieldCode, - sfWalletLocator.fieldCode, - sfWalletSize.fieldCode}; - - if (excludedTxFields.count(sField.fieldCode)) - continue; - } - - // If fmtId is a LedgerEntryType, exclude certain fields. - if constexpr (std::is_same_v) - { - // Fields that gRPC does not include in certain LedgerFormats. - // - // o sfWalletLocator, - // o sfWalletSize, - // o sfExchangeRate, and - // o sfFirstLedgerSequence are all deprecated fields in - // their respective ledger objects. - static const std:: - map> - gRPCOmitFields{ - {ltACCOUNT_ROOT, {&sfWalletLocator, &sfWalletSize}}, - {ltDIR_NODE, {&sfExchangeRate}}, - {ltLEDGER_HASHES, {&sfFirstLedgerSequence}}, - }; - - if (auto const iter = gRPCOmitFields.find(fmtId); - iter != gRPCOmitFields.end()) - { - std::vector const& omits = iter->second; - - // Check for fields that gRPC omits from this type. - if (std::find_if( - omits.begin(), - omits.end(), - [&sField](SField const* const omit) { - return *omit == sField; - }) != omits.end()) - { - // This is one of the fields that gRPC omits. - continue; - } - } - } - - // The SFields and gRPC disagree on the names of some fields. - // Provide a mapping from SField names to gRPC names for the - // known exceptions. - // - // clang-format off - // - // The implementers of the gRPC interface made the decision not - // to abbreviate anything. This accounts for the following - // field name differences: - // - // "AccountTxnID", "AccountTransactionID" - // "PreviousTxnID", "PreviousTransactionID" - // "PreviousTxnLgrSeq", "PreviousTransactionLedgerSequence" - // "SigningPubKey", "SigningPublicKey" - // "TxnSignature", "TransactionSignature" - // - // gRPC adds typing information for Fee, which accounts for - // "Fee", "XRPDropsAmount" - // - // There's one misspelling which accounts for - // "TakerGetsCurrency", "TakerGetsCurreny" - // - // The implementers of the gRPC interface observed that a - // PaymentChannelClaim transaction has a TxnSignature field at the - // upper level and a Signature field at the lever level. They - // felt that was confusing, which is the reason for - // "Signature", "PaymentChannelSignature" - // - static const std::map sFieldToGRPC{ - {"AccountTxnID", "AccountTransactionID"}, - {"Fee", "XRPDropsAmount"}, - {"PreviousTxnID", "PreviousTransactionID"}, - {"PreviousTxnLgrSeq", "PreviousTransactionLedgerSequence"}, - {"Signature", "PaymentChannelSignature"}, - {"SigningPubKey", "SigningPublicKey"}, - {"TakerGetsCurrency", "TakerGetsCurreny"}, - {"TxnSignature", "TransactionSignature"}, - }; - // clang-format on - - auto const iter = sFieldToGRPC.find(sField.getName()); - std::string gRPCName = - iter != sFieldToGRPC.end() ? iter->second : sField.getName(); - - sFields.insert({std::move(gRPCName), &sField}); - } - return sFields; - } - - // Given a Descriptor for a KnownFormat and a map of the SFields of that - // KnownFormat, make sure the fields are aligned. - void - validateDescriptorAgainstSFields( - google::protobuf::Descriptor const* const pbufDescriptor, - google::protobuf::Descriptor const* const commonFields, - std::string const& knownFormatName, - std::map&& sFields) - { - // Create namespace aliases for shorter names. - namespace pbuf = google::protobuf; - - // We'll be running through two sets of pbuf::Descriptors: the ones in - // the OneOf and the common fields. Here is a lambda that factors out - // the common checking code for these two cases. - auto checkFieldDesc = [this, &sFields, &knownFormatName]( - pbuf::FieldDescriptor const* const - fieldDesc) { - // gRPC has different handling for repeated vs non-repeated - // types. So we need to do that too. - std::string name; - if (fieldDesc->is_repeated()) - { - // Repeated-type handling. - - // Munge the fieldDescriptor name so it looks like the - // name in sFields. - name = fieldDesc->camelcase_name(); - name[0] = toupper(name[0]); - - // The ledger gives UNL all caps. Adapt to that. - if (size_t const i = name.find("Unl"); i != std::string::npos) - { - name[i + 1] = 'N'; - name[i + 2] = 'L'; - } - - // The ledger gives the NFT part of NFToken all caps. - // Adapt to that. - if (size_t const i = name.find("Nft"); i != std::string::npos) - { - name[i + 1] = 'F'; - name[i + 2] = 'T'; - } - - if (!sFields.count(name)) - { - fail( - std::string("Repeated Protobuf Descriptor '") + name + - "' expected in KnownFormat '" + knownFormatName + - "' and not found", - __FILE__, - __LINE__); - return; - } - pass(); - - validateRepeatedField(fieldDesc, sFields.at(name)); - } - else - { - // Non-repeated handling. - pbuf::Descriptor const* const entryDesc = - fieldDesc->message_type(); - if (entryDesc == nullptr) - return; - - name = entryDesc->name(); - if (!sFields.count(name)) - { - fail( - std::string("Protobuf Descriptor '") + - entryDesc->name() + "' expected in KnownFormat '" + - knownFormatName + "' and not found", - __FILE__, - __LINE__); - return; - } - pass(); - - validateDescriptor(entryDesc, sFields.at(entryDesc->name())); - } - // Remove the validated field from the map so we can tell if - // there are left over fields at the end of all comparisons. - sFields.erase(name); - }; - - // Compare the SFields to the FieldDescriptor->Descriptors. - for (int i = 0; i < pbufDescriptor->field_count(); ++i) - { - pbuf::FieldDescriptor const* const fieldDesc = - pbufDescriptor->field(i); - if (fieldDesc == nullptr || fieldDesc->type() != fieldTYPE_MESSAGE) - continue; - - checkFieldDesc(fieldDesc); - } - - // Now all of the OneOf-specific fields have been removed from - // sFields. But there may be common fields left in there. Process - // the commonFields next. - if (commonFields) - { - for (int i = 0; i < commonFields->field_count(); ++i) - { - // If the field we picked up is a OneOf, skip it. Common - // fields are never OneOfs. - pbuf::FieldDescriptor const* const fieldDesc = - commonFields->field(i); - - if (fieldDesc == nullptr || - fieldDesc->containing_oneof() != nullptr || - fieldDesc->type() != fieldTYPE_MESSAGE) - continue; - - checkFieldDesc(fieldDesc); - } - } - - // All SFields in the KnownFormat have corresponding gRPC fields - // if the sFields map is now empty. - if (!sFields.empty()) - { - fail( - std::string("Protobuf Descriptor '") + pbufDescriptor->name() + - "' did not account for all fields in KnownFormat '" + - knownFormatName + "'. Left over field: `" + - sFields.begin()->first + "'", - __FILE__, - __LINE__); - return; - } - pass(); - } - - // Compare a protobuf descriptor with multiple oneOfFields to choose from - // to an SField. - void - validateOneOfDescriptor( - google::protobuf::Descriptor const* const entryDesc, - SField const* const sField) - { - // Create namespace aliases for shorter names. - namespace pbuf = google::protobuf; - - // Note that it's not okay to compare names because SFields and - // gRPC do not always agree on the names. - if (entryDesc->field_count() == 0 || entryDesc->oneof_decl_count() != 1) - { - fail( - std::string("Protobuf Descriptor '") + entryDesc->name() + - "' expected to have multiple OneOf fields and nothing else", - __FILE__, - __LINE__); - return; - } - - pbuf::FieldDescriptor const* const fieldDesc = entryDesc->field(0); - if (fieldDesc == nullptr) - { - fail( - std::string("Internal test failure. Unhandled nullptr " - "in FieldDescriptor for '") + - entryDesc->name() + "'", - __FILE__, - __LINE__); - return; - } - - // Special handling for CurrencyAmount - if (sField->fieldType == STI_AMOUNT && - entryDesc->name() == "CurrencyAmount") - { - // SFields of type STI_AMOUNT are represented in gRPC by a - // multi-field CurrencyAmount. We don't really learn anything - // by diving into the interior of CurrencyAmount, so we stop here - // and call it good. - pass(); - return; - } - - fail( - std::string("Unhandled OneOf Protobuf Descriptor '") + - entryDesc->name() + "'", - __FILE__, - __LINE__); - } - - void - validateMultiFieldDescriptor( - google::protobuf::Descriptor const* const entryDesc, - SField const* const sField) - { - // Create namespace aliases for shorter names. - namespace pbuf = google::protobuf; - - if (entryDesc->field_count() <= 1 || entryDesc->oneof_decl_count() != 0) - { - fail( - std::string("Protobuf Descriptor '") + entryDesc->name() + - "' expected to have multiple fields and nothing else", - __FILE__, - __LINE__); - return; - } - - // There are composite fields that the SFields handle differently - // from gRPC. Handle those here. - { - struct FieldContents - { - std::string_view fieldName; - google::protobuf::FieldDescriptor::Type fieldType; - - bool - operator<(FieldContents const& other) const - { - return this->fieldName < other.fieldName; - } - - bool - operator==(FieldContents const& other) const - { - return this->fieldName == other.fieldName && - this->fieldType == other.fieldType; - } - }; - - struct SpecialEntry - { - std::string_view const descriptorName; - SerializedTypeID const sFieldType; - std::set const fields; - }; - - // clang-format off - static const std::array specialEntries{ - SpecialEntry{ - "Currency", STI_UINT160, - { - {"name", fieldTYPE_STRING}, - {"code", fieldTYPE_BYTES} - } - }, - SpecialEntry{ - "Memo", STI_OBJECT, - { - {"memo_data", fieldTYPE_BYTES}, - {"memo_format", fieldTYPE_BYTES}, - {"memo_type", fieldTYPE_BYTES} - } - } - }; - // clang-format on - - // If we're handling a SpecialEntry... - if (auto const iter = std::find_if( - specialEntries.begin(), - specialEntries.end(), - [entryDesc, sField](SpecialEntry const& entry) { - return entryDesc->name() == entry.descriptorName && - sField->fieldType == entry.sFieldType; - }); - iter != specialEntries.end()) - { - // Verify the SField. - if (!BEAST_EXPECT(sField->fieldType == iter->sFieldType)) - return; - - // Verify all of the fields in the entryDesc. - if (!BEAST_EXPECT( - entryDesc->field_count() == iter->fields.size())) - return; - - for (int i = 0; i < entryDesc->field_count(); ++i) - { - pbuf::FieldDescriptor const* const fieldDesc = - entryDesc->field(i); - - FieldContents const contents{ - fieldDesc->name(), fieldDesc->type()}; - - if (!BEAST_EXPECT( - iter->fields.find(contents) != iter->fields.end())) - return; - } - - // This field is good. - pass(); - return; - } - } - - // If the field was not one of the SpecialEntries, we expect it to be - // an InnerObjectFormat. - SOTemplate const* const innerFormat = - InnerObjectFormats::getInstance().findSOTemplateBySField(*sField); - if (innerFormat == nullptr) - { - fail( - "SOTemplate for field '" + sField->getName() + "' not found", - __FILE__, - __LINE__); - return; - } - - // Create a map we can use use to correlate each field in the - // gRPC Descriptor to its corresponding SField. - std::map sFields = - soTemplateToSFields(*innerFormat, 0); - - // Compare the SFields to the FieldDescriptor->Descriptors. - validateDescriptorAgainstSFields( - entryDesc, nullptr, sField->getName(), std::move(sFields)); - } - - // Compare a protobuf descriptor with only one field to an SField. - void - validateOneDescriptor( - google::protobuf::Descriptor const* const entryDesc, - SField const* const sField) - { - // Create namespace aliases for shorter names. - namespace pbuf = google::protobuf; - - // Note that it's not okay to compare names because SFields and - // gRPC do not always agree on the names. - if (entryDesc->field_count() != 1 || entryDesc->oneof_decl_count() != 0) - { - fail( - std::string("Protobuf Descriptor '") + entryDesc->name() + - "' expected to be one field and nothing else", - __FILE__, - __LINE__); - return; - } - - pbuf::FieldDescriptor const* const fieldDesc = entryDesc->field(0); - if (fieldDesc == nullptr) - { - fail( - std::string("Internal test failure. Unhandled nullptr " - "in FieldDescriptor for '") + - entryDesc->name() + "'", - __FILE__, - __LINE__); - return; - } - - // Create a map from SerializedTypeID to pbuf::FieldDescriptor::Type. - // - // This works for most, but not all, types because of divergence - // between the gRPC and LedgerFormat implementations. We deal - // with the special cases later. - // clang-format off - static const std::map - sTypeToFieldDescType{ - {STI_UINT8, fieldTYPE_UINT32}, - {STI_UINT16, fieldTYPE_UINT32}, - {STI_UINT32, fieldTYPE_UINT32}, - - {STI_UINT64, fieldTYPE_UINT64}, - - {STI_ACCOUNT, fieldTYPE_STRING}, - - {STI_AMOUNT, fieldTYPE_BYTES}, - {STI_UINT128, fieldTYPE_BYTES}, - {STI_UINT160, fieldTYPE_BYTES}, - {STI_UINT256, fieldTYPE_BYTES}, - {STI_VL, fieldTYPE_BYTES}, - }; - //clang-format on - - // If the SField and FieldDescriptor::Type correlate we're good. - if (auto const iter = sTypeToFieldDescType.find(sField->fieldType); - iter != sTypeToFieldDescType.end() && - iter->second == fieldDesc->type()) - { - pass(); - return; - } - - // Handle special cases for specific SFields. - static const std::map - sFieldCodeToFieldDescType{ - {sfDomain.fieldCode, fieldTYPE_STRING}, - {sfFee.fieldCode, fieldTYPE_UINT64}, - {sfURI.fieldCode, fieldTYPE_STRING}}; - - if (auto const iter = sFieldCodeToFieldDescType.find(sField->fieldCode); - iter != sFieldCodeToFieldDescType.end() && - iter->second == fieldDesc->type()) - { - pass(); - return; - } - - // Special handling for all Message types. - if (fieldDesc->type() == fieldTYPE_MESSAGE) - { - // We need to recurse to get to the bottom of the field(s) - // in question. - - // Start by identifying which fields we need to be handling. - // clang-format off - static const std::map messageMap{ - {sfAccount.fieldCode, "AccountAddress"}, - {sfAmount.fieldCode, "CurrencyAmount"}, - {sfAuthorize.fieldCode, "AccountAddress"}, - {sfBalance.fieldCode, "CurrencyAmount"}, - {sfDestination.fieldCode, "AccountAddress"}, - {sfFee.fieldCode, "XRPDropsAmount"}, - {sfHighLimit.fieldCode, "CurrencyAmount"}, - {sfLowLimit.fieldCode, "CurrencyAmount"}, - {sfOwner.fieldCode, "AccountAddress"}, - {sfRegularKey.fieldCode, "AccountAddress"}, - {sfSendMax.fieldCode, "CurrencyAmount"}, - {sfTakerGets.fieldCode, "CurrencyAmount"}, - {sfTakerGetsCurrency.fieldCode, "Currency"}, - {sfTakerPays.fieldCode, "CurrencyAmount"}, - {sfTakerPaysCurrency.fieldCode, "Currency"}, - }; - // clang-format on - if (messageMap.count(sField->fieldCode)) - { - pbuf::Descriptor const* const entry2Desc = - fieldDesc->message_type(); - - if (entry2Desc == nullptr) - { - fail( - std::string("Unexpected gRPC. ") + fieldDesc->name() + - " MESSAGE with null Descriptor", - __FILE__, - __LINE__); - return; - } - - // The Descriptor name should match the messageMap name. - if (messageMap.at(sField->fieldCode) != entry2Desc->name()) - { - fail( - std::string( - "Internal test error. Mismatch between SField '") + - sField->getName() + "' and gRPC Descriptor name '" + - entry2Desc->name() + "'", - __FILE__, - __LINE__); - return; - } - pass(); - - // Recurse to the next lower Descriptor. - validateDescriptor(entry2Desc, sField); - } - return; - } - - fail( - std::string("Internal test error. Unhandled FieldDescriptor '") + - entryDesc->name() + "' has type `" + fieldDesc->type_name() + - "` and label " + std::to_string(fieldDesc->label()), - __FILE__, - __LINE__); - } - - // Compare a repeated protobuf FieldDescriptor to an SField. - void - validateRepeatedField( - google::protobuf::FieldDescriptor const* const fieldDesc, - SField const* const sField) - { - // Create namespace aliases for shorter names. - namespace pbuf = google::protobuf; - - pbuf::Descriptor const* const entryDesc = fieldDesc->message_type(); - if (entryDesc == nullptr) - { - fail( - std::string("Expected Descriptor for repeated type ") + - sField->getName(), - __FILE__, - __LINE__); - return; - } - - // The following repeated types provide no further structure for their - // in-ledger representation. We just have to trust that the gRPC - // representation is reasonable for what the ledger implements. - static const std::set noFurtherDetail{ - {sfPaths.getName()}, - }; - - if (noFurtherDetail.count(sField->getName())) - { - // There is no Format representation for further details of this - // repeated type. We've done the best we can. - pass(); - return; - } - - // All of the repeated types that the test currently supports. - static const std::map repeatsWhat{ - {sfAmendments.getName(), &sfAmendment}, - {sfDisabledValidators.getName(), &sfDisabledValidator}, - {sfHashes.getName(), &sfLedgerHash}, - {sfIndexes.getName(), &sfLedgerIndex}, - {sfMajorities.getName(), &sfMajority}, - {sfMemos.getName(), &sfMemo}, - {sfNFTokens.getName(), &sfNFToken}, - {sfSignerEntries.getName(), &sfSignerEntry}, - {sfSigners.getName(), &sfSigner}, - {sfNFTokenOffers.getName(), &sfLedgerIndex}}; - - if (!repeatsWhat.count(sField->getName())) - { - fail( - std::string("Unexpected repeated type ") + fieldDesc->name(), - __FILE__, - __LINE__); - return; - } - pass(); - - // Process the type contained by the repeated type. - validateDescriptor(entryDesc, repeatsWhat.at(sField->getName())); - } - - // Determine which of the Descriptor validators to dispatch to. - void - validateDescriptor( - google::protobuf::Descriptor const* const entryDesc, - SField const* const sField) - { - if (entryDesc->nested_type_count() != 0 || - entryDesc->enum_type_count() != 0 || - entryDesc->extension_range_count() != 0 || - entryDesc->reserved_range_count() != 0) - { - fail( - std::string("Protobuf Descriptor '") + entryDesc->name() + - "' uses unsupported protobuf features", - __FILE__, - __LINE__); - return; - } - - // Dispatch to the correct validator - if (entryDesc->oneof_decl_count() > 0) - return validateOneOfDescriptor(entryDesc, sField); - - if (entryDesc->field_count() > 1) - return validateMultiFieldDescriptor(entryDesc, sField); - - return validateOneDescriptor(entryDesc, sField); - } - - // Compare a protobuf descriptor to a KnownFormat::Item - template - void - validateFields( - google::protobuf::Descriptor const* const pbufDescriptor, - google::protobuf::Descriptor const* const commonFields, - typename KnownFormats::Item const* const - knownFormatItem) - { - // Create namespace aliases for shorter names. - namespace pbuf = google::protobuf; - - // The names should usually be the same, but the bpufDescriptor - // name might have "Object" appended. - if (knownFormatItem->getName() != pbufDescriptor->name() && - knownFormatItem->getName() + "Object" != pbufDescriptor->name()) - { - fail( - std::string("Protobuf Descriptor '") + pbufDescriptor->name() + - "' and KnownFormat::Item '" + knownFormatItem->getName() + - "' don't have the same name", - __FILE__, - __LINE__); - return; - } - pass(); - - // Create a map we can use use to correlate each field in the - // gRPC Descriptor to its corresponding SField. - std::map sFields = soTemplateToSFields( - knownFormatItem->getSOTemplate(), knownFormatItem->getType()); - - // Compare the SFields to the FieldDescriptor->Descriptors. - validateDescriptorAgainstSFields( - pbufDescriptor, - commonFields, - knownFormatItem->getName(), - std::move(sFields)); - } - - template - void - testKnownFormats( - KnownFormats const& knownFormat, - std::string const& knownFormatName, - google::protobuf::Descriptor const* const commonFields, - google::protobuf::OneofDescriptor const* const oneOfDesc) - { - // Create namespace aliases for shorter names. - namespace grpc = org::xrpl::rpc::v1; - namespace pbuf = google::protobuf; - - if (!BEAST_EXPECT(oneOfDesc != nullptr)) - return; - - // Get corresponding names for all KnownFormat Items. - std::map< - std::string, - typename KnownFormats::Item const*> - formatTypes; - - for (auto const& item : knownFormat) - { - if constexpr (std::is_same_v) - { - // Skip LedgerEntryTypes that gRPC does not currently support. - static constexpr std::array notSupported{}; - - if (std::find( - notSupported.begin(), - notSupported.end(), - item.getType()) != notSupported.end()) - continue; - } - - if constexpr (std::is_same_v) - { - // Skip TxTypes that gRPC does not currently support. - static constexpr std::array notSupported{ - ttAMENDMENT, ttFEE, ttUNL_MODIFY}; - - if (std::find( - notSupported.begin(), - notSupported.end(), - item.getType()) != notSupported.end()) - continue; - } - - BEAST_EXPECT( - formatTypes - .insert({formatNameToEntryTypeName(item.getName()), &item}) - .second == true); - } - - // Verify that the OneOf objects match. Start by comparing - // KnownFormat vs gRPC OneOf counts. - { - BEAST_EXPECT(formatTypes.size() == oneOfDesc->field_count()); - } - - // This loop - // 1. Iterates through the gRPC OneOfs, - // 2. Finds each gRPC OneOf's matching KnownFormat::Item, - // 3. Sanity checks that the fields of the objects align well. - for (auto i = 0; i < oneOfDesc->field_count(); ++i) - { - pbuf::FieldDescriptor const* const fieldDesc = oneOfDesc->field(i); - - // The Field should be a TYPE_MESSAGE, which means we can get its - // descriptor. - if (fieldDesc->type() != fieldTYPE_MESSAGE) - { - fail( - std::string("gRPC OneOf '") + fieldDesc->name() + - "' is not TYPE_MESSAGE", - __FILE__, - __LINE__); - continue; - } - - auto const fmtIter = formatTypes.find(fieldDesc->name()); - - if (fmtIter == formatTypes.cend()) - { - fail( - std::string("gRPC OneOf '") + fieldDesc->name() + - "' not found in " + knownFormatName, - __FILE__, - __LINE__); - continue; - } - - // Validate that the gRPC and KnownFormat fields align. - validateFields( - fieldDesc->message_type(), commonFields, fmtIter->second); - - // Remove the checked KnownFormat from the map. This way we - // can check for leftovers when we're done processing. - formatTypes.erase(fieldDesc->name()); - } - - // Report any KnownFormats that don't have gRPC OneOfs. - for (auto const& spare : formatTypes) - { - fail( - knownFormatName + " '" + spare.second->getName() + - "' does not have a corresponding gRPC OneOf", - __FILE__, - __LINE__); - } - } - -public: - void - testLedgerObjectGRPCOneOfs() - { - testcase("Ledger object validation"); - - org::xrpl::rpc::v1::LedgerObject const ledgerObject; - - testKnownFormats( - LedgerFormats::getInstance(), - "LedgerFormats", - ledgerObject.GetDescriptor(), - ledgerObject.GetDescriptor()->FindOneofByName("object")); - - return; - } - - void - testTransactionGRPCOneOfs() - { - testcase("Transaction validation"); - - org::xrpl::rpc::v1::Transaction const txData; - - testKnownFormats( - TxFormats::getInstance(), - "TxFormats", - txData.GetDescriptor(), - txData.GetDescriptor()->FindOneofByName("transaction_data")); - - return; - } - - void - run() override - { - testLedgerObjectGRPCOneOfs(); - testTransactionGRPCOneOfs(); - } -}; - -BEAST_DEFINE_TESTSUITE(KnownFormatToGRPC, protocol, ripple); - -} // namespace ripple diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 8ce8270ad20..9772a0ffb70 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -491,227 +491,6 @@ class AccountInfo_test : public beast::unit_test::suite } } - // gRPC stuff - class GetAccountInfoClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetAccountInfoRequest request; - org::xrpl::rpc::v1::GetAccountInfoResponse reply; - - explicit GetAccountInfoClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - GetAccountInfo() - { - status = stub_->GetAccountInfo(&context, request, &reply); - } - }; - - void - testSimpleGrpc() - { - testcase("gRPC simple"); - - using namespace jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - Account const alice{"alice"}; - env.fund(drops(1000 * 1000 * 1000), alice); - - { - // most simple case - GetAccountInfoClient client(grpcPort); - client.request.mutable_account()->set_address(alice.human()); - client.GetAccountInfo(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT( - client.reply.account_data().account().value().address() == - alice.human()); - } - { - GetAccountInfoClient client(grpcPort); - client.request.mutable_account()->set_address(alice.human()); - client.request.set_queue(true); - client.request.mutable_ledger()->set_sequence(3); - client.GetAccountInfo(); - if (!BEAST_EXPECT(client.status.ok())) - return; - BEAST_EXPECT( - client.reply.account_data() - .balance() - .value() - .xrp_amount() - .drops() == 1000 * 1000 * 1000); - BEAST_EXPECT( - client.reply.account_data().account().value().address() == - alice.human()); - BEAST_EXPECT( - client.reply.account_data().sequence().value() == - env.seq(alice)); - BEAST_EXPECT(client.reply.queue_data().txn_count() == 0); - } - } - - void - testErrorsGrpc() - { - testcase("gRPC errors"); - - using namespace jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - auto getClient = [&grpcPort]() { - return GetAccountInfoClient(grpcPort); - }; - Account const alice{"alice"}; - env.fund(drops(1000 * 1000 * 1000), alice); - - { - // bad address - auto client = getClient(); - client.request.mutable_account()->set_address("deadbeef"); - client.GetAccountInfo(); - BEAST_EXPECT(!client.status.ok()); - } - { - // no account - Account const bogie{"bogie"}; - auto client = getClient(); - client.request.mutable_account()->set_address(bogie.human()); - client.GetAccountInfo(); - BEAST_EXPECT(!client.status.ok()); - } - { - // bad ledger_index - auto client = getClient(); - client.request.mutable_account()->set_address(alice.human()); - client.request.mutable_ledger()->set_sequence(0); - client.GetAccountInfo(); - BEAST_EXPECT(!client.status.ok()); - } - } - - void - testSignerListsGrpc() - { - testcase("gRPC singer lists"); - - using namespace jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - auto getClient = [&grpcPort]() { - return GetAccountInfoClient(grpcPort); - }; - - Account const alice{"alice"}; - env.fund(drops(1000 * 1000 * 1000), alice); - - { - auto client = getClient(); - client.request.mutable_account()->set_address(alice.human()); - client.request.set_signer_lists(true); - client.GetAccountInfo(); - if (!BEAST_EXPECT(client.status.ok())) - return; - BEAST_EXPECT(client.reply.signer_list().signer_entries_size() == 0); - } - - // Give alice a SignerList. - Account const bogie{"bogie"}; - Json::Value const smallSigners = signers(alice, 2, {{bogie, 3}}); - env(smallSigners); - { - auto client = getClient(); - client.request.mutable_account()->set_address(alice.human()); - client.request.set_signer_lists(false); - client.GetAccountInfo(); - if (!BEAST_EXPECT(client.status.ok())) - return; - BEAST_EXPECT(client.reply.signer_list().signer_entries_size() == 0); - } - { - auto client = getClient(); - client.request.mutable_account()->set_address(alice.human()); - client.request.set_signer_lists(true); - client.GetAccountInfo(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT( - client.reply.account_data().owner_count().value() == 1); - BEAST_EXPECT(client.reply.signer_list().signer_entries_size() == 1); - } - - // Give alice a big signer list - Account const demon{"demon"}; - Account const ghost{"ghost"}; - Account const haunt{"haunt"}; - Account const jinni{"jinni"}; - Account const phase{"phase"}; - Account const shade{"shade"}; - Account const spook{"spook"}; - Json::Value const bigSigners = signers( - alice, - 4, - { - {bogie, 1}, - {demon, 1}, - {ghost, 1}, - {haunt, 1}, - {jinni, 1}, - {phase, 1}, - {shade, 1}, - {spook, 1}, - }); - env(bigSigners); - - std::set accounts; - accounts.insert(bogie.human()); - accounts.insert(demon.human()); - accounts.insert(ghost.human()); - accounts.insert(haunt.human()); - accounts.insert(jinni.human()); - accounts.insert(phase.human()); - accounts.insert(shade.human()); - accounts.insert(spook.human()); - { - auto client = getClient(); - client.request.mutable_account()->set_address(alice.human()); - client.request.set_signer_lists(true); - client.GetAccountInfo(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT( - client.reply.account_data().owner_count().value() == 1); - auto& signerList = client.reply.signer_list(); - BEAST_EXPECT(signerList.signer_quorum().value() == 4); - BEAST_EXPECT(signerList.signer_entries_size() == 8); - for (int i = 0; i < 8; ++i) - { - BEAST_EXPECT( - signerList.signer_entries(i).signer_weight().value() == 1); - BEAST_EXPECT( - accounts.erase(signerList.signer_entries(i) - .account() - .value() - .address()) == 1); - } - BEAST_EXPECT(accounts.size() == 0); - } - } - void run() override { @@ -719,9 +498,6 @@ class AccountInfo_test : public beast::unit_test::suite testSignerLists(); testSignerListsApiVersion2(); testSignerListsV2(); - testSimpleGrpc(); - testErrorsGrpc(); - testSignerListsGrpc(); } }; diff --git a/src/test/rpc/Fee_test.cpp b/src/test/rpc/Fee_test.cpp deleted file mode 100644 index 17ada929e2e..00000000000 --- a/src/test/rpc/Fee_test.cpp +++ /dev/null @@ -1,138 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { -namespace test { - -class Fee_test : public beast::unit_test::suite -{ - class GrpcFeeClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetFeeRequest request; - org::xrpl::rpc::v1::GetFeeResponse reply; - - explicit GrpcFeeClient(std::string const& grpcPort) - : GRPCTestClientBase(grpcPort) - { - } - - void - GetFee() - { - status = stub_->GetFee(&context, request, &reply); - } - }; - - std::pair - grpcGetFee(std::string const& grpcPort) - { - GrpcFeeClient client(grpcPort); - client.GetFee(); - return std::pair( - client.status.ok(), client.reply); - } - - void - testFeeGrpc() - { - testcase("Test Fee Grpc"); - - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - Account A1{"A1"}; - Account A2{"A2"}; - env.fund(XRP(10000), A1); - env.fund(XRP(10000), A2); - env.close(); - env.trust(A2["USD"](1000), A1); - env.close(); - for (int i = 0; i < 7; ++i) - { - env(pay(A2, A1, A2["USD"](100))); - if (i == 4) - env.close(); - } - - auto view = env.current(); - - auto const metrics = env.app().getTxQ().getMetrics(*env.current()); - - auto const result = grpcGetFee(grpcPort); - - BEAST_EXPECT(result.first == true); - - auto reply = result.second; - - // current ledger data - BEAST_EXPECT(reply.current_ledger_size() == metrics.txInLedger); - BEAST_EXPECT(reply.current_queue_size() == metrics.txCount); - BEAST_EXPECT(reply.expected_ledger_size() == metrics.txPerLedger); - BEAST_EXPECT(reply.ledger_current_index() == view->info().seq); - BEAST_EXPECT(reply.max_queue_size() == *metrics.txQMaxSize); - - // fee levels data - org::xrpl::rpc::v1::FeeLevels& levels = *reply.mutable_levels(); - BEAST_EXPECT(levels.median_level() == metrics.medFeeLevel); - BEAST_EXPECT(levels.minimum_level() == metrics.minProcessingFeeLevel); - BEAST_EXPECT(levels.open_ledger_level() == metrics.openLedgerFeeLevel); - BEAST_EXPECT(levels.reference_level() == metrics.referenceFeeLevel); - - // fee data - org::xrpl::rpc::v1::Fee& fee = *reply.mutable_fee(); - auto const baseFee = view->fees().base; - BEAST_EXPECT( - fee.base_fee().drops() == - toDrops(metrics.referenceFeeLevel, baseFee)); - BEAST_EXPECT( - fee.minimum_fee().drops() == - toDrops(metrics.minProcessingFeeLevel, baseFee)); - BEAST_EXPECT( - fee.median_fee().drops() == toDrops(metrics.medFeeLevel, baseFee)); - auto openLedgerFee = - toDrops(metrics.openLedgerFeeLevel - FeeLevel64{1}, baseFee) + 1; - BEAST_EXPECT(fee.open_ledger_fee().drops() == openLedgerFee.drops()); - } - -public: - void - run() override - { - testFeeGrpc(); - } -}; - -BEAST_DEFINE_TESTSUITE(Fee, app, ripple); - -} // namespace test -} // namespace ripple diff --git a/src/test/rpc/ReportingETL_test.cpp b/src/test/rpc/ReportingETL_test.cpp index 36b2f9b0b69..d8e6fc684fd 100644 --- a/src/test/rpc/ReportingETL_test.cpp +++ b/src/test/rpc/ReportingETL_test.cpp @@ -773,25 +773,6 @@ class ReportingETL_test : public beast::unit_test::suite testNeedCurrentOrClosed() { testcase("NeedCurrentOrClosed"); - { - org::xrpl::rpc::v1::GetAccountInfoRequest request; - request.mutable_ledger()->set_sequence(1); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_hash(""); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_VALIDATED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_UNSPECIFIED); - BEAST_EXPECT(!needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); - BEAST_EXPECT(needCurrentOrClosed(request)); - request.mutable_ledger()->set_shortcut( - org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CLOSED); - BEAST_EXPECT(needCurrentOrClosed(request)); - } { org::xrpl::rpc::v1::GetLedgerRequest request; @@ -904,18 +885,6 @@ class ReportingETL_test : public beast::unit_test::suite org::xrpl::rpc::v1::LedgerSpecifier::SHORTCUT_CURRENT); BEAST_EXPECT(needCurrentOrClosed(request)); } - - { - org::xrpl::rpc::v1::GetFeeRequest feeRequest; - BEAST_EXPECT(!needCurrentOrClosed(feeRequest)); - - org::xrpl::rpc::v1::GetAccountTransactionHistoryRequest - accountTxRequest; - BEAST_EXPECT(!needCurrentOrClosed(accountTxRequest)); - - org::xrpl::rpc::v1::GetTransactionRequest txRequest; - BEAST_EXPECT(!needCurrentOrClosed(txRequest)); - } } void diff --git a/src/test/rpc/Submit_test.cpp b/src/test/rpc/Submit_test.cpp deleted file mode 100644 index 252e1c32a8a..00000000000 --- a/src/test/rpc/Submit_test.cpp +++ /dev/null @@ -1,276 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace ripple { -namespace test { - -class Submit_test : public beast::unit_test::suite -{ -public: - class SubmitClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::SubmitTransactionRequest request; - org::xrpl::rpc::v1::SubmitTransactionResponse reply; - - explicit SubmitClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - SubmitTransaction() - { - status = stub_->SubmitTransaction(&context, request, &reply); - } - }; - - struct TestData - { - std::string xrpTxBlob; - std::string xrpTxHash; - std::string usdTxBlob; - std::string usdTxHash; - const static int fund = 10000; - } testData; - - void - fillTestData() - { - testcase("fill test data"); - - using namespace jtx; - Env env(*this, envconfig(addGrpcConfig)); - auto const alice = Account("alice"); - auto const bob = Account("bob"); - env.fund(XRP(TestData::fund), "alice", "bob"); - env.trust(bob["USD"](TestData::fund), alice); - env.close(); - - auto toBinary = [this](std::string const& text) { - auto blob = strUnHex(text); - BEAST_EXPECT(blob); - return std::string{ - reinterpret_cast(blob->data()), blob->size()}; - }; - - // use a websocket client to fill transaction blobs - auto wsc = makeWSClient(env.app().config()); - { - Json::Value jrequestXrp; - jrequestXrp[jss::secret] = toBase58(generateSeed("alice")); - jrequestXrp[jss::tx_json] = - pay("alice", "bob", XRP(TestData::fund / 2)); - Json::Value jreply_xrp = wsc->invoke("sign", jrequestXrp); - - if (!BEAST_EXPECT(jreply_xrp.isMember(jss::result))) - return; - if (!BEAST_EXPECT(jreply_xrp[jss::result].isMember(jss::tx_blob))) - return; - testData.xrpTxBlob = - toBinary(jreply_xrp[jss::result][jss::tx_blob].asString()); - if (!BEAST_EXPECT(jreply_xrp[jss::result].isMember(jss::tx_json))) - return; - if (!BEAST_EXPECT( - jreply_xrp[jss::result][jss::tx_json].isMember(jss::hash))) - return; - testData.xrpTxHash = toBinary( - jreply_xrp[jss::result][jss::tx_json][jss::hash].asString()); - } - { - Json::Value jrequestUsd; - jrequestUsd[jss::secret] = toBase58(generateSeed("bob")); - jrequestUsd[jss::tx_json] = - pay("bob", "alice", bob["USD"](TestData::fund / 2)); - Json::Value jreply_usd = wsc->invoke("sign", jrequestUsd); - - if (!BEAST_EXPECT(jreply_usd.isMember(jss::result))) - return; - if (!BEAST_EXPECT(jreply_usd[jss::result].isMember(jss::tx_blob))) - return; - testData.usdTxBlob = - toBinary(jreply_usd[jss::result][jss::tx_blob].asString()); - if (!BEAST_EXPECT(jreply_usd[jss::result].isMember(jss::tx_json))) - return; - if (!BEAST_EXPECT( - jreply_usd[jss::result][jss::tx_json].isMember(jss::hash))) - return; - testData.usdTxHash = toBinary( - jreply_usd[jss::result][jss::tx_json][jss::hash].asString()); - } - } - - void - testSubmitGoodBlobGrpc() - { - testcase("Submit good blobs, XRP, USD, and same transaction twice"); - - using namespace jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - auto const alice = Account("alice"); - auto const bob = Account("bob"); - env.fund(XRP(TestData::fund), "alice", "bob"); - env.trust(bob["USD"](TestData::fund), alice); - env.close(); - - auto getClient = [&grpcPort]() { return SubmitClient(grpcPort); }; - - // XRP - { - auto client = getClient(); - client.request.set_signed_transaction(testData.xrpTxBlob); - client.SubmitTransaction(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT(client.reply.engine_result().result() == "tesSUCCESS"); - BEAST_EXPECT(client.reply.engine_result_code() == 0); - BEAST_EXPECT(client.reply.hash() == testData.xrpTxHash); - } - // USD - { - auto client = getClient(); - client.request.set_signed_transaction(testData.usdTxBlob); - client.SubmitTransaction(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT(client.reply.engine_result().result() == "tesSUCCESS"); - BEAST_EXPECT(client.reply.engine_result_code() == 0); - BEAST_EXPECT(client.reply.hash() == testData.usdTxHash); - } - // USD, error, same transaction again - { - auto client = getClient(); - client.request.set_signed_transaction(testData.usdTxBlob); - client.SubmitTransaction(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT( - client.reply.engine_result().result() == "tefPAST_SEQ"); - BEAST_EXPECT(client.reply.engine_result_code() == -190); - } - } - - void - testSubmitErrorBlobGrpc() - { - testcase("Submit error, bad blob, no account"); - - using namespace jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - - auto getClient = [&grpcPort]() { return SubmitClient(grpcPort); }; - - // short transaction blob, cannot parse - { - auto client = getClient(); - client.request.set_signed_transaction("deadbeef"); - client.SubmitTransaction(); - BEAST_EXPECT(!client.status.ok()); - } - // bad blob with correct length, cannot parse - { - auto client = getClient(); - auto xrpTxBlobCopy(testData.xrpTxBlob); - std::reverse(xrpTxBlobCopy.begin(), xrpTxBlobCopy.end()); - client.request.set_signed_transaction(xrpTxBlobCopy); - client.SubmitTransaction(); - BEAST_EXPECT(!client.status.ok()); - } - // good blob, can parse but no account - { - auto client = getClient(); - client.request.set_signed_transaction(testData.xrpTxBlob); - client.SubmitTransaction(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT( - client.reply.engine_result().result() == "terNO_ACCOUNT"); - BEAST_EXPECT(client.reply.engine_result_code() == -96); - } - } - - void - testSubmitInsufficientFundsGrpc() - { - testcase("Submit good blobs but insufficient funds"); - - using namespace jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - - auto const alice = Account("alice"); - auto const bob = Account("bob"); - // fund 1000 (TestData::fund/10) XRP, the transaction sends 5000 - // (TestData::fund/2) XRP, so insufficient funds - env.fund(XRP(TestData::fund / 10), "alice", "bob"); - env.trust(bob["USD"](TestData::fund), alice); - env.close(); - - { - SubmitClient client(grpcPort); - client.request.set_signed_transaction(testData.xrpTxBlob); - client.SubmitTransaction(); - if (!BEAST_EXPECT(client.status.ok())) - { - return; - } - BEAST_EXPECT( - client.reply.engine_result().result() == "tecUNFUNDED_PAYMENT"); - BEAST_EXPECT(client.reply.engine_result_code() == 104); - } - } - - void - run() override - { - fillTestData(); - testSubmitGoodBlobGrpc(); - testSubmitErrorBlobGrpc(); - testSubmitInsufficientFundsGrpc(); - } -}; - -BEAST_DEFINE_TESTSUITE(Submit, app, ripple); - -} // namespace test -} // namespace ripple diff --git a/src/test/rpc/Tx_test.cpp b/src/test/rpc/Tx_test.cpp deleted file mode 100644 index 8cfd36c5d86..00000000000 --- a/src/test/rpc/Tx_test.cpp +++ /dev/null @@ -1,829 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -namespace ripple { -namespace test { - -class Tx_test : public beast::unit_test::suite -{ - template - std::string - toByteString(T const& data) - { - const char* bytes = reinterpret_cast(data.data()); - return {bytes, data.size()}; - } - - void - cmpAmount( - const org::xrpl::rpc::v1::CurrencyAmount& proto_amount, - STAmount amount) - { - if (amount.native()) - { - if (!BEAST_EXPECT(proto_amount.has_xrp_amount())) - return; - BEAST_EXPECT( - proto_amount.xrp_amount().drops() == amount.xrp().drops()); - } - else - { - if (!BEAST_EXPECT(proto_amount.has_issued_currency_amount())) - return; - - org::xrpl::rpc::v1::IssuedCurrencyAmount issuedCurrency = - proto_amount.issued_currency_amount(); - Issue const& issue = amount.issue(); - Currency currency = issue.currency; - BEAST_EXPECT( - issuedCurrency.currency().name() == to_string(currency)); - BEAST_EXPECT( - issuedCurrency.currency().code() == toByteString(currency)); - BEAST_EXPECT(issuedCurrency.value() == to_string(amount.iou())); - BEAST_EXPECT( - issuedCurrency.issuer().address() == toBase58(issue.account)); - } - } - - void - cmpPaymentTx( - const org::xrpl::rpc::v1::Transaction& proto, - std::shared_ptr txnSt) - { - if (!BEAST_EXPECT(proto.has_payment())) - return; - - if (!BEAST_EXPECT( - safe_cast(txnSt->getFieldU16(sfTransactionType)) == - TxType::ttPAYMENT)) - return; - - AccountID account = txnSt->getAccountID(sfAccount); - - if (!BEAST_EXPECT(proto.has_account())) - return; - BEAST_EXPECT(proto.account().value().address() == toBase58(account)); - - STAmount amount = txnSt->getFieldAmount(sfAmount); - if (!BEAST_EXPECT(proto.payment().has_amount())) - return; - cmpAmount(proto.payment().amount().value(), amount); - - AccountID accountDest = txnSt->getAccountID(sfDestination); - if (!BEAST_EXPECT(proto.payment().has_destination())) - return; - BEAST_EXPECT( - proto.payment().destination().value().address() == - toBase58(accountDest)); - - STAmount fee = txnSt->getFieldAmount(sfFee); - if (!BEAST_EXPECT(proto.has_fee())) - return; - BEAST_EXPECT(proto.fee().drops() == fee.xrp().drops()); - - if (!BEAST_EXPECT(proto.has_sequence())) - return; - BEAST_EXPECT( - proto.sequence().value() == txnSt->getFieldU32(sfSequence)); - - if (!BEAST_EXPECT(proto.has_signing_public_key())) - return; - - Blob signingPubKey = txnSt->getFieldVL(sfSigningPubKey); - BEAST_EXPECT( - proto.signing_public_key().value() == toByteString(signingPubKey)); - - if (txnSt->isFieldPresent(sfFlags)) - { - if (!BEAST_EXPECT(proto.has_flags())) - return; - BEAST_EXPECT(proto.flags().value() == txnSt->getFieldU32(sfFlags)); - } - else - { - BEAST_EXPECT(!proto.has_flags()); - } - - if (txnSt->isFieldPresent(sfLastLedgerSequence)) - { - if (!BEAST_EXPECT(proto.has_last_ledger_sequence())) - return; - - BEAST_EXPECT( - proto.last_ledger_sequence().value() == - txnSt->getFieldU32(sfLastLedgerSequence)); - } - else - { - BEAST_EXPECT(!proto.has_last_ledger_sequence()); - } - - if (txnSt->isFieldPresent(sfTxnSignature)) - { - if (!BEAST_EXPECT(proto.has_transaction_signature())) - return; - - Blob blob = txnSt->getFieldVL(sfTxnSignature); - BEAST_EXPECT( - proto.transaction_signature().value() == toByteString(blob)); - } - - if (txnSt->isFieldPresent(sfSendMax)) - { - if (!BEAST_EXPECT(proto.payment().has_send_max())) - return; - STAmount const& send_max = txnSt->getFieldAmount(sfSendMax); - cmpAmount(proto.payment().send_max().value(), send_max); - } - else - { - BEAST_EXPECT(!proto.payment().has_send_max()); - } - - if (txnSt->isFieldPresent(sfAccountTxnID)) - { - if (!BEAST_EXPECT(proto.has_account_transaction_id())) - return; - auto field = txnSt->getFieldH256(sfAccountTxnID); - BEAST_EXPECT( - proto.account_transaction_id().value() == toByteString(field)); - } - else - { - BEAST_EXPECT(!proto.has_account_transaction_id()); - } - - if (txnSt->isFieldPresent(sfSourceTag)) - { - if (!BEAST_EXPECT(proto.has_source_tag())) - return; - BEAST_EXPECT( - proto.source_tag().value() == txnSt->getFieldU32(sfSourceTag)); - } - else - { - BEAST_EXPECT(!proto.has_source_tag()); - } - - if (txnSt->isFieldPresent(sfDestinationTag)) - { - if (!BEAST_EXPECT(proto.payment().has_destination_tag())) - return; - - BEAST_EXPECT( - proto.payment().destination_tag().value() == - txnSt->getFieldU32(sfDestinationTag)); - } - else - { - BEAST_EXPECT(!proto.payment().has_destination_tag()); - } - - if (txnSt->isFieldPresent(sfInvoiceID)) - { - if (!BEAST_EXPECT(proto.payment().has_invoice_id())) - return; - - auto field = txnSt->getFieldH256(sfInvoiceID); - BEAST_EXPECT( - proto.payment().invoice_id().value() == toByteString(field)); - } - else - { - BEAST_EXPECT(!proto.payment().has_invoice_id()); - } - - if (txnSt->isFieldPresent(sfDeliverMin)) - { - if (!BEAST_EXPECT(proto.payment().has_deliver_min())) - return; - STAmount const& deliverMin = txnSt->getFieldAmount(sfDeliverMin); - cmpAmount(proto.payment().deliver_min().value(), deliverMin); - } - else - { - BEAST_EXPECT(!proto.payment().has_deliver_min()); - } - - STPathSet const& pathset = txnSt->getFieldPathSet(sfPaths); - if (!BEAST_EXPECT(pathset.size() == proto.payment().paths_size())) - return; - - int ind = 0; - for (auto it = pathset.begin(); it < pathset.end(); ++it) - { - STPath const& path = *it; - - const org::xrpl::rpc::v1::Payment_Path& protoPath = - proto.payment().paths(ind++); - if (!BEAST_EXPECT(protoPath.elements_size() == path.size())) - continue; - - int ind2 = 0; - for (auto it2 = path.begin(); it2 != path.end(); ++it2) - { - const org::xrpl::rpc::v1::Payment_PathElement& protoElement = - protoPath.elements(ind2++); - STPathElement const& elt = *it2; - - if (elt.isOffer()) - { - if (elt.hasCurrency()) - { - Currency const& currency = elt.getCurrency(); - if (BEAST_EXPECT(protoElement.has_currency())) - { - BEAST_EXPECT( - protoElement.currency().name() == - to_string(currency)); - } - } - else - { - BEAST_EXPECT(!protoElement.has_currency()); - } - if (elt.hasIssuer()) - { - AccountID const& issuer = elt.getIssuerID(); - if (BEAST_EXPECT(protoElement.has_issuer())) - { - BEAST_EXPECT( - protoElement.issuer().address() == - toBase58(issuer)); - } - } - else - { - BEAST_EXPECT(!protoElement.has_issuer()); - } - } - else - { - if (BEAST_EXPECT(protoElement.has_account())) - { - AccountID const& path_account = elt.getAccountID(); - BEAST_EXPECT( - protoElement.account().address() == - toBase58(path_account)); - } - else - { - BEAST_EXPECT(!protoElement.has_account()); - } - - BEAST_EXPECT(!protoElement.has_issuer()); - BEAST_EXPECT(!protoElement.has_currency()); - } - } - } - - if (txnSt->isFieldPresent(sfMemos)) - { - auto arr = txnSt->getFieldArray(sfMemos); - if (BEAST_EXPECT(proto.memos_size() == arr.size())) - { - for (size_t i = 0; i < arr.size(); ++i) - { - auto protoMemo = proto.memos(i); - auto stMemo = arr[i]; - - if (stMemo.isFieldPresent(sfMemoData)) - { - if (BEAST_EXPECT(protoMemo.has_memo_data())) - { - BEAST_EXPECT( - protoMemo.memo_data().value() == - toByteString(stMemo.getFieldVL(sfMemoData))); - } - } - else - { - BEAST_EXPECT(!protoMemo.has_memo_data()); - } - - if (stMemo.isFieldPresent(sfMemoType)) - { - if (BEAST_EXPECT(protoMemo.has_memo_type())) - { - BEAST_EXPECT( - protoMemo.memo_type().value() == - toByteString(stMemo.getFieldVL(sfMemoType))); - } - } - else - { - BEAST_EXPECT(!protoMemo.has_memo_type()); - } - - if (stMemo.isFieldPresent(sfMemoFormat)) - { - if (BEAST_EXPECT(protoMemo.has_memo_format())) - { - BEAST_EXPECT( - protoMemo.memo_format().value() == - toByteString(stMemo.getFieldVL(sfMemoFormat))); - } - } - else - { - BEAST_EXPECT(!protoMemo.has_memo_format()); - } - } - } - } - else - { - BEAST_EXPECT(proto.memos_size() == 0); - } - - if (txnSt->isFieldPresent(sfSigners)) - { - auto arr = txnSt->getFieldArray(sfSigners); - if (BEAST_EXPECT(proto.signers_size() == arr.size())) - { - for (size_t i = 0; i < arr.size(); ++i) - { - auto protoSigner = proto.signers(i); - auto stSigner = arr[i]; - - if (stSigner.isFieldPresent(sfAccount)) - { - if (BEAST_EXPECT(protoSigner.has_account())) - { - BEAST_EXPECT( - protoSigner.account().value().address() == - toBase58(stSigner.getAccountID(sfAccount))); - } - } - else - { - BEAST_EXPECT(!protoSigner.has_account()); - } - - if (stSigner.isFieldPresent(sfTxnSignature)) - { - if (BEAST_EXPECT( - protoSigner.has_transaction_signature())) - { - Blob blob = stSigner.getFieldVL(sfTxnSignature); - BEAST_EXPECT( - protoSigner.transaction_signature().value() == - toByteString(blob)); - } - } - else - { - BEAST_EXPECT(!protoSigner.has_transaction_signature()); - } - - if (stSigner.isFieldPresent(sfSigningPubKey)) - { - if (BEAST_EXPECT(protoSigner.has_signing_public_key())) - { - Blob signingPubKey = - stSigner.getFieldVL(sfSigningPubKey); - BEAST_EXPECT( - protoSigner.signing_public_key().value() == - toByteString(signingPubKey)); - } - } - else - { - BEAST_EXPECT(!protoSigner.has_signing_public_key()); - } - } - } - } - else - { - BEAST_EXPECT(proto.signers_size() == 0); - } - } - - void - cmpMeta( - const org::xrpl::rpc::v1::Meta& proto, - std::shared_ptr txMeta) - { - BEAST_EXPECT(proto.transaction_index() == txMeta->getIndex()); - BEAST_EXPECT( - proto.transaction_result().result() == - transToken(txMeta->getResultTER())); - - org::xrpl::rpc::v1::TransactionResult r; - - RPC::convert(r, txMeta->getResultTER()); - - BEAST_EXPECT( - proto.transaction_result().result_type() == r.result_type()); - } - - void - cmpDeliveredAmount( - const org::xrpl::rpc::v1::Meta& meta, - const org::xrpl::rpc::v1::Transaction& txn, - const std::shared_ptr expMeta, - const std::shared_ptr expTxn, - bool checkAmount = true) - { - if (expMeta->hasDeliveredAmount()) - { - if (!BEAST_EXPECT(meta.has_delivered_amount())) - return; - cmpAmount( - meta.delivered_amount().value(), expMeta->getDeliveredAmount()); - } - else - { - if (expTxn->isFieldPresent(sfAmount)) - { - using namespace std::chrono_literals; - if (checkAmount) - { - cmpAmount( - meta.delivered_amount().value(), - expTxn->getFieldAmount(sfAmount)); - } - } - else - { - BEAST_EXPECT(!meta.has_delivered_amount()); - } - } - } - - // gRPC stuff - class GrpcTxClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetTransactionRequest request; - org::xrpl::rpc::v1::GetTransactionResponse reply; - - explicit GrpcTxClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - Tx() - { - status = stub_->GetTransaction(&context, request, &reply); - } - }; - - class GrpcAccountTxClient : public GRPCTestClientBase - { - public: - org::xrpl::rpc::v1::GetAccountTransactionHistoryRequest request; - org::xrpl::rpc::v1::GetAccountTransactionHistoryResponse reply; - - explicit GrpcAccountTxClient(std::string const& port) - : GRPCTestClientBase(port) - { - } - - void - AccountTx() - { - status = - stub_->GetAccountTransactionHistory(&context, request, &reply); - } - }; - - void - testTxGrpc() - { - testcase("Test Tx Grpc"); - - using namespace test::jtx; - std::unique_ptr config = envconfig(addGrpcConfig); - std::string grpcPort = *(*config)["port_grpc"].get("port"); - Env env(*this, std::move(config)); - - using namespace std::chrono_literals; - // Set time to this value (or greater) to get delivered_amount in meta - env.timeKeeper().set(NetClock::time_point{446000001s}); - - auto grpcTx = [&grpcPort](auto hash, auto binary) { - GrpcTxClient client(grpcPort); - client.request.set_hash(&hash, sizeof(hash)); - client.request.set_binary(binary); - client.Tx(); - return std::pair( - client.status.ok(), client.reply); - }; - - Account A1{"A1"}; - Account A2{"A2"}; - Account A3{"A3"}; - env.fund(XRP(10000), A1); - env.fund(XRP(10000), A2); - env.close(); - env.trust(A2["USD"](1000), A1); - env.close(); - env(fset(A2, 5)); // set asfAccountTxnID flag - - // SignerListSet - env(signers(A2, 1, {{"bogie", 1}, {"demon", 1}, {A1, 1}, {A3, 1}}), - sig(A2)); - env.close(); - std::vector> txns; - auto const startLegSeq = env.current()->info().seq; - - uint256 prevHash; - for (int i = 0; i < 14; ++i) - { - auto const baseFee = env.current()->fees().base; - auto txfee = fee(i + (2 * baseFee)); - auto lls = last_ledger_seq(i + startLegSeq + 20); - auto dsttag = dtag(i * 456); - auto srctag = stag(i * 321); - auto sm = sendmax(A2["USD"](1000)); - auto dm = delivermin(A2["USD"](50)); - auto txf = txflags(131072); // partial payment flag - auto txnid = account_txn_id(prevHash); - auto inv = invoice_id(prevHash); - auto mem1 = memo("foo", "bar", "baz"); - auto mem2 = memo("dragons", "elves", "goblins"); - - if (i & 1) - { - if (i & 2) - { - env(pay(A2, A1, A2["USD"](100)), - txfee, - srctag, - dsttag, - lls, - sm, - dm, - txf, - txnid, - inv, - mem1, - mem2, - sig(A2)); - } - else - { - env(pay(A2, A1, A2["USD"](100)), - txfee, - srctag, - dsttag, - lls, - sm, - dm, - txf, - txnid, - inv, - mem1, - mem2, - msig(A3)); - } - } - else - { - if (i & 2) - { - env(pay(A2, A1, A2["XRP"](200)), - txfee, - srctag, - dsttag, - lls, - txnid, - inv, - mem1, - mem2, - sig(A2)); - } - else - { - env(pay(A2, A1, A2["XRP"](200)), - txfee, - srctag, - dsttag, - lls, - txnid, - inv, - mem1, - mem2, - msig(A3)); - } - } - txns.emplace_back(env.tx()); - prevHash = txns.back()->getTransactionID(); - env.close(); - } - - // Payment with Paths - auto const gw = Account("gateway"); - auto const USD = gw["USD"]; - env.fund(XRP(10000), "alice", "bob", gw); - env.trust(USD(600), "alice"); - env.trust(USD(700), "bob"); - env(pay(gw, "alice", USD(70))); - txns.emplace_back(env.tx()); - env.close(); - env(pay(gw, "bob", USD(50))); - txns.emplace_back(env.tx()); - env.close(); - env(pay("alice", "bob", Account("bob")["USD"](5)), path(gw)); - txns.emplace_back(env.tx()); - env.close(); - - auto const endLegSeq = env.closed()->info().seq; - - // Find the existing transactions - auto& ledgerMaster = env.app().getLedgerMaster(); - int index = startLegSeq; - for (auto&& tx : txns) - { - auto id = tx->getTransactionID(); - auto ledger = ledgerMaster.getLedgerBySeq(index); - - for (bool b : {false, true}) - { - auto const result = grpcTx(id, b); - - BEAST_EXPECT(result.first == true); - BEAST_EXPECT(result.second.ledger_index() == index); - BEAST_EXPECT(result.second.validated() == true); - if (b) - { - Serializer s = tx->getSerializer(); - BEAST_EXPECT( - result.second.transaction_binary() == toByteString(s)); - } - else - { - cmpPaymentTx(result.second.transaction(), tx); - } - - if (!ledger || b) - continue; - - auto rawMeta = ledger->txRead(id).second; - if (!rawMeta) - continue; - - auto txMeta = - std::make_shared(id, ledger->seq(), *rawMeta); - - cmpMeta(result.second.meta(), txMeta); - cmpDeliveredAmount( - result.second.meta(), - result.second.transaction(), - txMeta, - tx); - - auto grpcAccountTx = [&grpcPort]( - uint256 const& id, - bool binary, - AccountID const& account) - -> std:: - pair { - GrpcAccountTxClient client(grpcPort); - client.request.set_binary(binary); - client.request.mutable_account()->set_address( - toBase58(account)); - client.AccountTx(); - org::xrpl::rpc::v1::GetTransactionResponse res; - - for (auto const& tx : client.reply.transactions()) - { - if (uint256::fromVoid(tx.hash().data()) == id) - { - return {client.status.ok(), tx}; - } - } - return {false, res}; - }; - - // Compare result to result from account_tx - auto mentioned = tx->getMentionedAccounts(); - - if (!BEAST_EXPECT(mentioned.size())) - continue; - - auto account = *mentioned.begin(); - auto const accountTxResult = grpcAccountTx(id, b, account); - - if (!BEAST_EXPECT(accountTxResult.first)) - continue; - - cmpPaymentTx(accountTxResult.second.transaction(), tx); - cmpMeta(accountTxResult.second.meta(), txMeta); - cmpDeliveredAmount( - accountTxResult.second.meta(), - accountTxResult.second.transaction(), - txMeta, - tx); - } - index++; - } - - // Find not existing transaction - auto const tx = env.jt(noop(A1), seq(env.seq(A1))).stx; - for (bool b : {false, true}) - { - auto const result = grpcTx(tx->getTransactionID(), b); - - BEAST_EXPECT(result.first == false); - } - - // Delete one transaction - const auto deletedLedger = (startLegSeq + endLegSeq) / 2; - { - // Remove one of the ledgers from the database directly - dynamic_cast(&env.app().getRelationalDatabase()) - ->deleteTransactionByLedgerSeq(deletedLedger); - } - - for (bool b : {false, true}) - { - auto const result = grpcTx(tx->getTransactionID(), b); - - BEAST_EXPECT(result.first == false); - } - - // non final transaction - env(pay(A2, A1, A2["XRP"](200))); - auto res = grpcTx(env.tx()->getTransactionID(), false); - BEAST_EXPECT(res.first); - BEAST_EXPECT(res.second.has_transaction()); - if (!BEAST_EXPECT(res.second.has_meta())) - return; - if (!BEAST_EXPECT(res.second.meta().has_transaction_result())) - return; - - BEAST_EXPECT( - res.second.meta().transaction_result().result() == "tesSUCCESS"); - BEAST_EXPECT( - res.second.meta().transaction_result().result_type() == - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TES); - BEAST_EXPECT(!res.second.validated()); - BEAST_EXPECT(!res.second.meta().has_delivered_amount()); - env.close(); - - res = grpcTx(env.tx()->getTransactionID(), false); - BEAST_EXPECT(res.first); - BEAST_EXPECT(res.second.has_transaction()); - if (!BEAST_EXPECT(res.second.has_meta())) - return; - if (!BEAST_EXPECT(res.second.meta().has_transaction_result())) - return; - - BEAST_EXPECT( - res.second.meta().transaction_result().result() == "tesSUCCESS"); - BEAST_EXPECT( - res.second.meta().transaction_result().result_type() == - org::xrpl::rpc::v1::TransactionResult::RESULT_TYPE_TES); - BEAST_EXPECT(res.second.validated()); - BEAST_EXPECT(res.second.meta().has_delivered_amount()); - } - -public: - void - run() override - { - testTxGrpc(); - } -}; - -BEAST_DEFINE_TESTSUITE(Tx, app, ripple); -} // namespace test -} // namespace ripple From 5d38e4cfbfe457999f942ba09bff789e06bb81f9 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 26 May 2022 18:20:50 -0400 Subject: [PATCH 055/508] Work around soci build issues --- Builds/CMake/SociConfig.cmake.patched | 106 ++++++++++++++++++++++++++ Builds/CMake/deps/Soci.cmake | 3 +- Builds/CMake/soci_patch.cmake | 10 +++ 3 files changed, 118 insertions(+), 1 deletion(-) create mode 100644 Builds/CMake/SociConfig.cmake.patched diff --git a/Builds/CMake/SociConfig.cmake.patched b/Builds/CMake/SociConfig.cmake.patched new file mode 100644 index 00000000000..11bcd1f3d0f --- /dev/null +++ b/Builds/CMake/SociConfig.cmake.patched @@ -0,0 +1,106 @@ +################################################################################ +# SociConfig.cmake - CMake build configuration of SOCI library +################################################################################ +# Copyright (C) 2010 Mateusz Loskot +# +# Distributed under the Boost Software License, Version 1.0. +# (See accompanying file LICENSE_1_0.txt or copy at +# http://www.boost.org/LICENSE_1_0.txt) +################################################################################ + +include(CheckCXXSymbolExists) + +if(WIN32) + check_cxx_symbol_exists("_M_AMD64" "" SOCI_TARGET_ARCH_X64) + if(NOT RTC_ARCH_X64) + check_cxx_symbol_exists("_M_IX86" "" SOCI_TARGET_ARCH_X86) + endif(NOT RTC_ARCH_X64) + # add check for arm here + # see http://msdn.microsoft.com/en-us/library/b0084kay.aspx +else(WIN32) + check_cxx_symbol_exists("__i386__" "" SOCI_TARGET_ARCH_X86) + check_cxx_symbol_exists("__x86_64__" "" SOCI_TARGET_ARCH_X64) + check_cxx_symbol_exists("__arm__" "" SOCI_TARGET_ARCH_ARM) +endif(WIN32) + +if(NOT DEFINED LIB_SUFFIX) + if(SOCI_TARGET_ARCH_X64) + set(_lib_suffix "64") + else() + set(_lib_suffix "") + endif() + set(LIB_SUFFIX ${_lib_suffix} CACHE STRING "Specifies suffix for the lib directory") +endif() + +# +# C++11 Option +# + +if(NOT SOCI_CXX_C11) + set (SOCI_CXX_C11 OFF CACHE BOOL "Build to the C++11 standard") +endif() + +# +# Force compilation flags and set desired warnings level +# + +if (MSVC) + add_definitions(-D_CRT_SECURE_NO_DEPRECATE) + add_definitions(-D_CRT_SECURE_NO_WARNINGS) + add_definitions(-D_CRT_NONSTDC_NO_WARNING) + add_definitions(-D_SCL_SECURE_NO_WARNINGS) + + if(CMAKE_CXX_FLAGS MATCHES "/W[0-4]") + string(REGEX REPLACE "/W[0-4]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /we4266") + endif() + +else() + + set(SOCI_GCC_CLANG_COMMON_FLAGS "") + # "-pedantic -Werror -Wno-error=parentheses -Wall -Wextra -Wpointer-arith -Wcast-align -Wcast-qual -Wfloat-equal -Woverloaded-virtual -Wredundant-decls -Wno-long-long") + + + if (SOCI_CXX_C11) + set(SOCI_CXX_VERSION_FLAGS "-std=c++11") + else() + set(SOCI_CXX_VERSION_FLAGS "-std=gnu++98") + endif() + + if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" OR "${CMAKE_CXX_COMPILER}" MATCHES "clang") + + if(NOT CMAKE_CXX_COMPILER_VERSION LESS 3.1 AND SOCI_ASAN) + set(SOCI_GCC_CLANG_COMMON_FLAGS "${SOCI_GCC_CLANG_COMMON_FLAGS} -fsanitize=address") + endif() + + # enforce C++11 for Clang + set(SOCI_CXX_C11 ON) + set(SOCI_CXX_VERSION_FLAGS "-std=c++11") + add_definitions(-DCATCH_CONFIG_CPP11_NO_IS_ENUM) + + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SOCI_GCC_CLANG_COMMON_FLAGS} ${SOCI_CXX_VERSION_FLAGS}") + + elseif(CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX) + + if(NOT CMAKE_CXX_COMPILER_VERSION LESS 4.8 AND SOCI_ASAN) + set(SOCI_GCC_CLANG_COMMON_FLAGS "${SOCI_GCC_CLANG_COMMON_FLAGS} -fsanitize=address") + endif() + + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SOCI_GCC_CLANG_COMMON_FLAGS} ${SOCI_CXX_VERSION_FLAGS} ") + if (CMAKE_COMPILER_IS_GNUCXX) + if (CMAKE_SYSTEM_NAME MATCHES "FreeBSD") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-variadic-macros") + endif() + endif() + + else() + message(WARNING "Unknown toolset - using default flags to build SOCI") + endif() + +endif() + +# Set SOCI_HAVE_* variables for soci-config.h generator +set(SOCI_HAVE_CXX_C11 ${SOCI_CXX_C11} CACHE INTERNAL "Enables C++11 support") diff --git a/Builds/CMake/deps/Soci.cmake b/Builds/CMake/deps/Soci.cmake index fa05a115756..d165d6e1f84 100644 --- a/Builds/CMake/deps/Soci.cmake +++ b/Builds/CMake/deps/Soci.cmake @@ -51,7 +51,8 @@ else() # This patch process is likely fragile and should be reviewed carefully # whenever we update the GIT_TAG above. PATCH_COMMAND - ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/soci_patch.cmake + ${CMAKE_COMMAND} -D RIPPLED_SOURCE=${CMAKE_CURRENT_SOURCE_DIR} + -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/soci_patch.cmake CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} diff --git a/Builds/CMake/soci_patch.cmake b/Builds/CMake/soci_patch.cmake index 57c46e58219..0c2a75c0dff 100644 --- a/Builds/CMake/soci_patch.cmake +++ b/Builds/CMake/soci_patch.cmake @@ -2,6 +2,16 @@ # so as to remove type range check exceptions that cause # us trouble when using boost::optional to select int values +# Soci's CMake setup leaves flags in place that will cause warnings to +# be treated as errors, but some compiler versions throw "new" warnings +# that then cause the build to fail. Simplify that until soci fixes +# those warnings. +if (RIPPLED_SOURCE) + execute_process( COMMAND ${CMAKE_COMMAND} -E copy_if_different + ${RIPPLED_SOURCE}/Builds/CMake/SociConfig.cmake.patched + cmake/SociConfig.cmake ) +endif () + # Some versions of CMake erroneously patch external projects on every build. # If the patch makes no changes, skip it. This workaround can be # removed once we stop supporting vulnerable versions of CMake. From e37dc710cff103dbac5f35271069ab14dde2e267 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Mon, 29 Aug 2022 11:38:20 -0700 Subject: [PATCH 056/508] Improve timing of NodeToShardRPC unit test to reduce false failures --- src/test/rpc/NodeToShardRPC_test.cpp | 100 +++++++++++++++------------ 1 file changed, 55 insertions(+), 45 deletions(-) diff --git a/src/test/rpc/NodeToShardRPC_test.cpp b/src/test/rpc/NodeToShardRPC_test.cpp index 07d8d8953dd..867f2cfe364 100644 --- a/src/test/rpc/NodeToShardRPC_test.cpp +++ b/src/test/rpc/NodeToShardRPC_test.cpp @@ -194,64 +194,74 @@ class NodeToShardRPC_test : public beast::unit_test::suite result[jss::status] == "success" || importCompleted(shardStore, numberOfShards, result)); - std::chrono::seconds const maxWait{60}; - auto const start = std::chrono::system_clock::now(); + std::chrono::seconds const maxWait{180}; - while (true) { - // Verify that the status object accurately - // reflects import progress. - - auto const completeShards = - shardStore->getShardInfo()->finalized(); - - if (!completeShards.empty()) + auto const start = std::chrono::system_clock::now(); + while (true) { - auto const result = env.rpc( - "json", - "node_to_shard", - to_string(jvParams))[jss::result]; + // Verify that the status object accurately + // reflects import progress. + + auto const completeShards = + shardStore->getShardInfo()->finalized(); - if (!importCompleted(shardStore, numberOfShards, result)) + if (!completeShards.empty()) { - BEAST_EXPECT(result[jss::firstShardIndex] == 1); - BEAST_EXPECT(result[jss::lastShardIndex] == 10); + auto const result = env.rpc( + "json", + "node_to_shard", + to_string(jvParams))[jss::result]; + + if (!importCompleted( + shardStore, numberOfShards, result)) + { + BEAST_EXPECT(result[jss::firstShardIndex] == 1); + BEAST_EXPECT(result[jss::lastShardIndex] == 10); + } } - } - if (boost::icl::contains(completeShards, 1)) - { - auto const result = env.rpc( - "json", - "node_to_shard", - to_string(jvParams))[jss::result]; + if (boost::icl::contains(completeShards, 1)) + { + auto const result = env.rpc( + "json", + "node_to_shard", + to_string(jvParams))[jss::result]; - BEAST_EXPECT( - result[jss::currentShardIndex] >= 1 || - importCompleted(shardStore, numberOfShards, result)); + BEAST_EXPECT( + result[jss::currentShardIndex] >= 1 || + importCompleted( + shardStore, numberOfShards, result)); - break; - } + break; + } - if (std::this_thread::sleep_for(std::chrono::milliseconds{100}); - std::chrono::system_clock::now() - start > maxWait) - { - BEAST_EXPECTS( - false, "Import timeout: could just be a slow machine."); - break; + if (std::this_thread::sleep_for( + std::chrono::milliseconds{100}); + std::chrono::system_clock::now() - start > maxWait) + { + BEAST_EXPECTS( + false, + "Import timeout: could just be a slow machine."); + break; + } } } - // Wait for the import to complete - while (!boost::icl::contains( - shardStore->getShardInfo()->finalized(), 10)) { - if (std::this_thread::sleep_for(std::chrono::milliseconds{100}); - std::chrono::system_clock::now() - start > maxWait) + // Wait for the import to complete + auto const start = std::chrono::system_clock::now(); + while (!boost::icl::contains( + shardStore->getShardInfo()->finalized(), 10)) { - BEAST_EXPECT( - importCompleted(shardStore, numberOfShards, result)); - break; + if (std::this_thread::sleep_for( + std::chrono::milliseconds{100}); + std::chrono::system_clock::now() - start > maxWait) + { + BEAST_EXPECT(importCompleted( + shardStore, numberOfShards, result)); + break; + } } } } @@ -323,7 +333,7 @@ class NodeToShardRPC_test : public beast::unit_test::suite result[jss::status] == "success" || importCompleted(shardStore, numberOfShards, result)); - std::chrono::seconds const maxWait{10}; + std::chrono::seconds const maxWait{30}; auto const start = std::chrono::system_clock::now(); while (shardStore->getShardInfo()->finalized().empty()) @@ -352,7 +362,7 @@ class NodeToShardRPC_test : public beast::unit_test::suite importCompleted(shardStore, numberOfShards, result)); } - std::chrono::seconds const maxWait{10}; + std::chrono::seconds const maxWait{30}; auto const start = std::chrono::system_clock::now(); while (true) From 3726f8bf31b3eab8bab39dce139656fd705ae9a0 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 18 Jul 2022 11:31:36 -0400 Subject: [PATCH 057/508] Release TaggedCache object memory outside the lock --- src/ripple/basics/TaggedCache.h | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/src/ripple/basics/TaggedCache.h b/src/ripple/basics/TaggedCache.h index d9a1b542ed9..6765ff16bee 100644 --- a/src/ripple/basics/TaggedCache.h +++ b/src/ripple/basics/TaggedCache.h @@ -196,14 +196,17 @@ class TaggedCache return true; } + using SweptPointersVector = std::pair< + std::vector>, + std::vector>>; + void sweep() { // Keep references to all the stuff we sweep // For performance, each worker thread should exit before the swept data // is destroyed but still within the main cache lock. - std::vector>> allStuffToSweep( - m_cache.partitions()); + std::vector allStuffToSweep(m_cache.partitions()); clock_type::time_point const now(m_clock.now()); clock_type::time_point when_expire; @@ -652,9 +655,9 @@ class TaggedCache clock_type::time_point const& when_expire, [[maybe_unused]] clock_type::time_point const& now, typename KeyValueCacheType::map_type& partition, - std::vector>& stuffToSweep, + SweptPointersVector& stuffToSweep, std::atomic& allRemovals, - std::lock_guard const& lock) + std::lock_guard const&) { return std::thread([&, this]() { int cacheRemovals = 0; @@ -662,7 +665,8 @@ class TaggedCache // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. - stuffToSweep.reserve(partition.size()); + stuffToSweep.first.reserve(partition.size()); + stuffToSweep.second.reserve(partition.size()); { auto cit = partition.begin(); while (cit != partition.end()) @@ -672,6 +676,8 @@ class TaggedCache // weak if (cit->second.isExpired()) { + stuffToSweep.second.push_back( + std::move(cit->second.weak_ptr)); ++mapRemovals; cit = partition.erase(cit); } @@ -686,7 +692,8 @@ class TaggedCache ++cacheRemovals; if (cit->second.ptr.use_count() == 1) { - stuffToSweep.push_back(cit->second.ptr); + stuffToSweep.first.push_back( + std::move(cit->second.ptr)); ++mapRemovals; cit = partition.erase(cit); } @@ -722,9 +729,9 @@ class TaggedCache clock_type::time_point const& when_expire, clock_type::time_point const& now, typename KeyOnlyCacheType::map_type& partition, - std::vector>& stuffToSweep, + SweptPointersVector&, std::atomic& allRemovals, - std::lock_guard const& lock) + std::lock_guard const&) { return std::thread([&, this]() { int cacheRemovals = 0; @@ -732,7 +739,6 @@ class TaggedCache // Keep references to all the stuff we sweep // so that we can destroy them outside the lock. - stuffToSweep.reserve(partition.size()); { auto cit = partition.begin(); while (cit != partition.end()) From 7e9e9104eabbf0391a0837de5630af17a788e233 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Mon, 2 May 2022 18:09:29 -0700 Subject: [PATCH 058/508] Rename SHAMapStoreImp::stopping() to healthWait() --- src/ripple/app/misc/SHAMapStoreImp.cpp | 32 +++++++++++++------------- src/ripple/app/misc/SHAMapStoreImp.h | 16 ++++++------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index af568d0274e..d5cb07792dc 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -269,7 +269,7 @@ SHAMapStoreImp::copyNode(std::uint64_t& nodeCount, SHAMapTreeNode const& node) true); if (!(++nodeCount % checkHealthInterval_)) { - if (stopping()) + if (healthWait() == stopping) return false; } @@ -327,7 +327,7 @@ SHAMapStoreImp::run() bool const readyToRotate = validatedSeq >= lastRotated + deleteInterval_ && - canDelete_ >= lastRotated - 1 && !stopping(); + canDelete_ >= lastRotated - 1 && healthWait() == keepGoing; // Make sure we don't delete ledgers currently being // imported into the ShardStore @@ -359,7 +359,7 @@ SHAMapStoreImp::run() << ledgerMaster_->getValidatedLedgerAge().count() << 's'; clearPrior(lastRotated); - if (stopping()) + if (healthWait() == stopping) return; JLOG(journal_.debug()) << "copying ledger " << validatedSeq; @@ -382,7 +382,7 @@ SHAMapStoreImp::run() continue; } - if (stopping()) + if (healthWait() == stopping) return; // Only log if we completed without a "health" abort JLOG(journal_.debug()) << "copied ledger " << validatedSeq @@ -390,7 +390,7 @@ SHAMapStoreImp::run() JLOG(journal_.debug()) << "freshening caches"; freshenCaches(); - if (stopping()) + if (healthWait() == stopping) return; // Only log if we completed without a "health" abort JLOG(journal_.debug()) << validatedSeq << " freshened caches"; @@ -401,7 +401,7 @@ SHAMapStoreImp::run() << validatedSeq << " new backend " << newBackend->getName(); clearCaches(validatedSeq); - if (stopping()) + if (healthWait() == stopping) return; lastRotated = validatedSeq; @@ -566,7 +566,7 @@ SHAMapStoreImp::clearSql( min = *m; } - if (min > lastRotated || stopping()) + if (min > lastRotated || healthWait() == stopping) return; if (min == lastRotated) { @@ -587,11 +587,11 @@ SHAMapStoreImp::clearSql( JLOG(journal_.trace()) << "End: Delete up to " << deleteBatch_ << " rows with LedgerSeq < " << min << " from: " << TableName; - if (stopping()) + if (healthWait() == stopping) return; if (min < lastRotated) std::this_thread::sleep_for(backOff_); - if (stopping()) + if (healthWait() == stopping) return; } JLOG(journal_.debug()) << "finished deleting from: " << TableName; @@ -631,7 +631,7 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) ledgerMaster_->clearPriorLedgers(lastRotated); JLOG(journal_.trace()) << "End: Clear internal ledgers up to " << lastRotated; - if (stopping()) + if (healthWait() == stopping) return; SQLiteDatabase* const db = @@ -645,7 +645,7 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) "Ledgers", [db]() -> std::optional { return db->getMinLedgerSeq(); }, [db](LedgerIndex min) -> void { db->deleteBeforeLedgerSeq(min); }); - if (stopping()) + if (healthWait() == stopping) return; if (!app_.config().useTxTables()) @@ -660,7 +660,7 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) [&db](LedgerIndex min) -> void { db->deleteTransactionsBeforeLedgerSeq(min); }); - if (stopping()) + if (healthWait() == stopping) return; clearSql( @@ -672,12 +672,12 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) [&db](LedgerIndex min) -> void { db->deleteAccountTransactionsBeforeLedgerSeq(min); }); - if (stopping()) + if (healthWait() == stopping) return; } -bool -SHAMapStoreImp::stopping() +SHAMapStoreImp::HealthResult +SHAMapStoreImp::healthWait() { auto age = ledgerMaster_->getValidatedLedgerAge(); OperatingMode mode = netOPs_->getOperatingMode(); @@ -695,7 +695,7 @@ SHAMapStoreImp::stopping() lock.lock(); } - return stop_; + return stop_ ? stopping : keepGoing; } void diff --git a/src/ripple/app/misc/SHAMapStoreImp.h b/src/ripple/app/misc/SHAMapStoreImp.h index a2d7b3006a8..995ee0267bb 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.h +++ b/src/ripple/app/misc/SHAMapStoreImp.h @@ -104,9 +104,8 @@ class SHAMapStoreImp : public SHAMapStore std::uint32_t deleteBatch_ = 100; std::chrono::milliseconds backOff_{100}; std::chrono::seconds ageThreshold_{60}; - /// If the node is out of sync during an - /// online_delete health check, sleep the thread - /// for this time, and continue checking until + /// If the node is out of sync during an online_delete healthWait() + /// call, sleep the thread for this time, and continue checking until /// recovery. /// See also: "recovery_wait_seconds" in rippled-example.cfg std::chrono::seconds recoveryWaitTime_{5}; @@ -199,7 +198,7 @@ class SHAMapStoreImp : public SHAMapStore { dbRotating_->fetchNodeObject( key, 0, NodeStore::FetchType::synchronous, true); - if (!(++check % checkHealthInterval_) && stopping()) + if (!(++check % checkHealthInterval_) && healthWait() == stopping) return true; } @@ -225,13 +224,14 @@ class SHAMapStoreImp : public SHAMapStore /** * This is a health check for online deletion that waits until rippled is - * stable until returning. If the server is stopping, then it returns - * "true" to inform the caller to allow the server to stop. + * stable before returning. It returns an indication of whether the server + * is stopping. * * @return Whether the server is stopping. */ - bool - stopping(); + enum HealthResult { stopping, keepGoing }; + [[nodiscard]] HealthResult + healthWait(); public: void From 649ab872fff2204683960715d6cb26866dcee7a8 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Fri, 1 Apr 2022 15:59:20 -0700 Subject: [PATCH 059/508] Add featureImmediateOfferKilled for tfImmediateOrCancel offers: Fixes #4115; https://github.com/ripple/rippled/issues/4115 --- src/ripple/app/tx/impl/CreateOffer.cpp | 6 ++++++ src/ripple/protocol/Feature.h | 3 ++- src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/TER.cpp | 2 +- src/test/app/Offer_test.cpp | 27 ++++++++++++++++---------- 5 files changed, 27 insertions(+), 12 deletions(-) diff --git a/src/ripple/app/tx/impl/CreateOffer.cpp b/src/ripple/app/tx/impl/CreateOffer.cpp index 9bb7532852f..4f1d9108bca 100644 --- a/src/ripple/app/tx/impl/CreateOffer.cpp +++ b/src/ripple/app/tx/impl/CreateOffer.cpp @@ -1107,6 +1107,12 @@ CreateOffer::applyGuts(Sandbox& sb, Sandbox& sbCancel) if (bImmediateOrCancel) { JLOG(j_.trace()) << "Immediate or cancel: offer canceled"; + if (!crossed && sb.rules().enabled(featureImmediateOfferKilled)) + // If the ImmediateOfferKilled amendment is enabled, any + // ImmediateOrCancel offer that transfers absolutely no funds + // returns tecKILLED rather than tesSUCCESS. Motivation for the + // change is here: https://github.com/ripple/rippled/issues/4115 + return {tecKILLED, false}; return {tesSUCCESS, true}; } diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 8896d86e4b2..fac54c2fa71 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 51; +static constexpr std::size_t numFeatures = 53; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -339,6 +339,7 @@ extern uint256 const fixNFTokenNegOffer; extern uint256 const featureNonFungibleTokensV1_1; extern uint256 const fixTrustLinesToSelf; extern uint256 const fixRemoveNFTokenAutoTrustLine; +extern uint256 const featureImmediateOfferKilled; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 51e7360816d..fa0d167ef09 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -449,6 +449,7 @@ REGISTER_FIX (fixNFTokenNegOffer, Supported::yes, DefaultVote::no) REGISTER_FEATURE(NonFungibleTokensV1_1, Supported::yes, DefaultVote::no); REGISTER_FIX (fixTrustLinesToSelf, Supported::yes, DefaultVote::no); REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, DefaultVote::yes); +REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/ripple/protocol/impl/TER.cpp b/src/ripple/protocol/impl/TER.cpp index c660b1cea3f..a845bdaeebc 100644 --- a/src/ripple/protocol/impl/TER.cpp +++ b/src/ripple/protocol/impl/TER.cpp @@ -77,7 +77,7 @@ transResults() MAKE_ERROR(tecINVARIANT_FAILED, "One or more invariants for the transaction were not satisfied."), MAKE_ERROR(tecEXPIRED, "Expiration time is passed."), MAKE_ERROR(tecDUPLICATE, "Ledger object already exists."), - MAKE_ERROR(tecKILLED, "FillOrKill offer killed."), + MAKE_ERROR(tecKILLED, "No funds transferred and no offer created."), MAKE_ERROR(tecHAS_OBLIGATIONS, "The account cannot be deleted since it has obligations."), MAKE_ERROR(tecTOO_SOON, "It is too early to attempt the requested operation. Please wait."), MAKE_ERROR(tecMAX_SEQUENCE_REACHED, "The maximum sequence number was reached."), diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 200e1c4aa51..9f6e165bc16 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -950,9 +950,14 @@ class Offer_test : public beast::unit_test::suite env(pay(gw, alice, USD(1000)), ter(tesSUCCESS)); // No cross: - env(offer(alice, XRP(1000), USD(1000)), - txflags(tfImmediateOrCancel), - ter(tesSUCCESS)); + { + TER const expectedCode = features[featureImmediateOfferKilled] + ? static_cast(tecKILLED) + : static_cast(tesSUCCESS); + env(offer(alice, XRP(1000), USD(1000)), + txflags(tfImmediateOrCancel), + ter(expectedCode)); + } env.require( balance(alice, startBalance - f - f), @@ -5165,11 +5170,12 @@ class Offer_test : public beast::unit_test::suite FeatureBitset const flowCross{featureFlowCross}; FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval}; FeatureBitset const rmSmallIncreasedQOffers{fixRmSmallIncreasedQOffers}; + FeatureBitset const immediateOfferKilled{featureImmediateOfferKilled}; - testAll(all - takerDryOffer); - testAll(all - flowCross - takerDryOffer); - testAll(all - flowCross); - testAll(all - rmSmallIncreasedQOffers); + testAll(all - takerDryOffer - immediateOfferKilled); + testAll(all - flowCross - takerDryOffer - immediateOfferKilled); + testAll(all - flowCross - immediateOfferKilled); + testAll(all - rmSmallIncreasedQOffers - immediateOfferKilled); testAll(all); testFalseAssert(); } @@ -5184,11 +5190,12 @@ class Offer_manual_test : public Offer_test FeatureBitset const all{supported_amendments()}; FeatureBitset const flowCross{featureFlowCross}; FeatureBitset const f1513{fix1513}; + FeatureBitset const immediateOfferKilled{featureImmediateOfferKilled}; FeatureBitset const takerDryOffer{fixTakerDryOfferRemoval}; - testAll(all - flowCross - f1513); - testAll(all - flowCross); - testAll(all - f1513); + testAll(all - flowCross - f1513 - immediateOfferKilled); + testAll(all - flowCross - immediateOfferKilled); + testAll(all - immediateOfferKilled); testAll(all); testAll(all - flowCross - takerDryOffer); From ebbf4b64a57d2673d54a6b25def7726601d99bf9 Mon Sep 17 00:00:00 2001 From: manojsdoshi Date: Tue, 11 Oct 2022 17:13:13 -0700 Subject: [PATCH 060/508] Set version to 1.10.0-b2 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 9a28a55cb21..d3f5ff34537 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.4" +char const* const versionString = "1.10.0-b2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 004ec2d2014f248280c715b55b9025388a819075 Mon Sep 17 00:00:00 2001 From: "J. Scott Branson" <18340247+crypticrabbit@users.noreply.github.com> Date: Mon, 28 Nov 2022 16:49:38 -0500 Subject: [PATCH 061/508] Update Linux Build Instructions (#4018) * Removed a reference to the default number of workers varying based on whether a node has validation enabled. Workers default to the number of processor cores + 2: https://github.com/ripple/rippled/blob/develop/src/ripple/core/impl/JobQueue.cpp#L166 * Protobuf v2 and Ubuntu 16.04 are no longer supported. * Updated protobuf version as v3 is now supported, fixed typos, automatically sent number of processors when building boost & rippled. --- Builds/linux/README.md | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/Builds/linux/README.md b/Builds/linux/README.md index 001a3705c73..15a84a33e43 100644 --- a/Builds/linux/README.md +++ b/Builds/linux/README.md @@ -7,10 +7,11 @@ the [rippled-package-builder](https://github.com/ripple/rippled-package-builder) repository. Note: Ubuntu 16.04 users may need to update their compiler (see the dependencies -section). For non Ubuntu distributions, the steps below should work be +section). For non Ubuntu distributions, the steps below should work by installing the appropriate dependencies using that distribution's package management tools. + ## Dependencies gcc-8 or later is required. @@ -28,8 +29,6 @@ $ apt-get install -y autoconf flex bison ``` Advanced users can choose to install newer versions of gcc, or the clang compiler. -At this time, rippled only supports protobuf version 2. Using version 3 of -protobuf will give errors. ### Build Boost @@ -42,7 +41,7 @@ $ tar -xzf boost_1_70_0.tar.gz $ cd boost_1_70_0 $ ./bootstrap.sh $ ./b2 headers -$ ./b2 -j +$ ./b2 -j $(echo $(nproc)-2 | bc) ``` ### (Optional) Dependencies for Building Source Documentation @@ -88,8 +87,8 @@ git checkout develop If you didn't persistently set the `BOOST_ROOT` environment variable to the directory in which you compiled boost, then you should set it temporarily. -For example, you built Boost in your home directory `~/boost_1_70_0`, you -would do for any shell in which you want to build: +For example, if you built Boost in your home directory `~/boost_1_70_0`, you +would run the following shell command: ``` export BOOST_ROOT=~/boost_1_70_0 @@ -104,8 +103,8 @@ All builds should be done in a separate directory from the source tree root (a subdirectory is fine). For example, from the root of the ripple source tree: ``` -mkdir my_build -cd my_build +mkdir build +cd build ``` followed by: @@ -153,7 +152,7 @@ Several other infrequently used options are available - run `ccmake` or Once you have generated the build system, you can run the build via cmake: ``` -cmake --build . -- -j +cmake --build . -- -j $(echo $(nproc)-2 | bc) ``` the `-j` parameter in this example tells the build tool to compile several @@ -174,7 +173,7 @@ building, e.g.: ``` cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=/opt/local .. -cmake --build . --target install -- -j +cmake --build . --target install -- -j $(echo $(nproc)-2 | bc) ``` We recommend specifying `CMAKE_INSTALL_PREFIX` when configuring in order to @@ -184,7 +183,7 @@ the installation by specifying the `DESTDIR` env variable during the install pha e.g.: ``` -DESTDIR=~/mylibs cmake --build . --target install -- -j +DESTDIR=~/mylibs cmake --build . --target install -- -j $(echo $(nproc)-2 | bc) ``` in which case, the files would be installed in the `CMAKE_INSTALL_PREFIX` within @@ -213,13 +212,13 @@ git submodule add -b master https://github.com/ripple/rippled.git vendor/rippled change the `vendor/rippled` path as desired for your repo layout. Furthermore, change the branch name if you want to track a different rippled branch, such as `develop`. - + Second, to bring this submodule into your project, just add the rippled subdirectory: ``` add_subdirectory (vendor/rippled) ``` - + ##### Option 2: installed rippled + find_package First, follow the "Optional Installation" instructions above to From b7ac73c8e40df6dc0a7c9799fb79ce974bdbcbea Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 18 Nov 2022 18:04:39 -0500 Subject: [PATCH 062/508] Don't try to read SLE with key 0 from the ledger: * May resolve #4341 --- src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp | 3 +++ src/test/app/NFToken_test.cpp | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp index fb5f51c7251..07fe9957a76 100644 --- a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp @@ -67,6 +67,9 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) -> std::pair, TER> { if (id) { + if (id->isZero()) + return {nullptr, tecOBJECT_NOT_FOUND}; + auto offerSLE = ctx.view.read(keylet::nftoffer(*id)); if (!offerSLE) diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index ac86bf74988..2fb27f8a352 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -1130,6 +1130,12 @@ class NFToken_test : public beast::unit_test::suite //---------------------------------------------------------------------- // preclaim + // The buy offer must be non-zero. + env(token::acceptBuyOffer(buyer, beast::zero), + ter(tecOBJECT_NOT_FOUND)); + env.close(); + BEAST_EXPECT(ownerCount(env, buyer) == 0); + // The buy offer must be present in the ledger. uint256 const missingOfferIndex = keylet::nftoffer(alice, 1).key; env(token::acceptBuyOffer(buyer, missingOfferIndex), @@ -1142,6 +1148,12 @@ class NFToken_test : public beast::unit_test::suite env.close(); BEAST_EXPECT(ownerCount(env, buyer) == 0); + // The sell offer must be non-zero. + env(token::acceptSellOffer(buyer, beast::zero), + ter(tecOBJECT_NOT_FOUND)); + env.close(); + BEAST_EXPECT(ownerCount(env, buyer) == 0); + // The sell offer must be present in the ledger. env(token::acceptSellOffer(buyer, missingOfferIndex), ter(tecOBJECT_NOT_FOUND)); From 093055c03959d9fa90394ed244462a74e2ec9c64 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Mon, 28 Nov 2022 13:57:38 -0800 Subject: [PATCH 063/508] README - add link to ripple-server mailing list (#3890) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 20ce719d79a..267ecfb258b 100644 --- a/README.md +++ b/README.md @@ -56,4 +56,5 @@ git-subtree. See those directories' README files for more details. * [XRP Ledger Dev Portal](https://xrpl.org/) * [Setup and Installation](https://xrpl.org/install-rippled.html) * [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/) +* [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server) * [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi) From 518fb6d2087d4bc666a2f220441863d742c5f3e4 Mon Sep 17 00:00:00 2001 From: Scott Determan Date: Mon, 28 Nov 2022 17:01:54 -0500 Subject: [PATCH 064/508] Improve move semantics in Expected (#4326) * Improve move semantics in Expected: This patch unconditionally moves an `Unexpected` value parameter as long as `U` is not a reference. If `U` is a reference the code should not compile. An error type that holds a reference is a strange use-case, and an overload is not provided. If it is required in the future it can be added. The `Expected(U r)` overload should take a forwarding ref. * Replace enable_if with concepts in Expected --- src/ripple/basics/Expected.h | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/src/ripple/basics/Expected.h b/src/ripple/basics/Expected.h index 09d2bdc5042..8dc368eefd7 100644 --- a/src/ripple/basics/Expected.h +++ b/src/ripple/basics/Expected.h @@ -21,7 +21,10 @@ #define RIPPLE_BASICS_EXPECTED_H_INCLUDED #include + #include + +#include #include #include @@ -132,17 +135,16 @@ class [[nodiscard]] Expected using Base = boost::outcome_v2::result; public: - template < - typename U, - typename = std::enable_if_t>> - constexpr Expected(U r) : Base(T{std::forward(r)}) + template + requires std::convertible_to constexpr Expected(U && r) + : Base(T{std::forward(r)}) { } - template < - typename U, - typename = std::enable_if_t>> - constexpr Expected(Unexpected e) : Base(E{std::forward(e.value())}) + template + requires std::convertible_to && + (!std::is_reference_v)constexpr Expected(Unexpected e) + : Base(E{std::move(e.value())}) { } @@ -215,10 +217,10 @@ class [[nodiscard]] Expected { } - template < - typename U, - typename = std::enable_if_t>> - constexpr Expected(Unexpected e) : Base(E{std::forward(e.value())}) + template + requires std::convertible_to && + (!std::is_reference_v)constexpr Expected(Unexpected e) + : Base(E{std::move(e.value())}) { } From ffd453f7dd091b0499fd6ab964880c8268deead4 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Mon, 28 Nov 2022 14:12:16 -0800 Subject: [PATCH 065/508] Set version to 1.10.0-rc1 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index d3f5ff34537..22f2f17be02 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.0-b2" +char const* const versionString = "1.10.0-rc1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 4a5ad4cfacd1a7d5329bbd15129234471d2f9fe2 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Thu, 8 Dec 2022 19:30:05 -0500 Subject: [PATCH 066/508] Update affiliation: Nikolaos D Bougalis (#4360) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 44530b4f7d7..06b9d622cbb 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -58,7 +58,7 @@ Existing maintainers can resign, or be subject to a vote for removal at the behe * [JoelKatz](https://github.com/JoelKatz) (Ripple) * [Manojsdoshi](https://github.com/manojsdoshi) (Ripple) * [N3tc4t](https://github.com/n3tc4t) (XRPL Labs) -* [Nikolaos D Bougalis](https://github.com/nbougalis) (Ripple) +* [Nikolaos D Bougalis](https://github.com/nbougalis) * [Nixer89](https://github.com/nixer89) (XRP Ledger Foundation) * [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation) * [Seelabs](https://github.com/seelabs) (Ripple) From c1e7fe2d93625a354704c9312c4bad7e97129b6e Mon Sep 17 00:00:00 2001 From: Gregory Popovitch Date: Thu, 8 Dec 2022 23:15:21 -0500 Subject: [PATCH 067/508] Prevent unnecessary `shared_ptr` copies by accepting a value in `SHAMapInnerNode::setChild` (#4266) * Do a move instead of a copy in `SHAMapInnerNode::setChild` * Create the value directly in the call --- src/ripple/shamap/SHAMapInnerNode.h | 2 +- src/ripple/shamap/impl/SHAMap.cpp | 7 +++---- src/ripple/shamap/impl/SHAMapInnerNode.cpp | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/ripple/shamap/SHAMapInnerNode.h b/src/ripple/shamap/SHAMapInnerNode.h index 5f0765e9c26..c85cdcbbc85 100644 --- a/src/ripple/shamap/SHAMapInnerNode.h +++ b/src/ripple/shamap/SHAMapInnerNode.h @@ -147,7 +147,7 @@ class SHAMapInnerNode final : public SHAMapTreeNode, getChildHash(int m) const; void - setChild(int m, std::shared_ptr const& child); + setChild(int m, std::shared_ptr child); void shareChild(int m, std::shared_ptr const& child); diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index 6f6acb9a7e1..1a5a283dd3c 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -118,7 +118,7 @@ SHAMap::dirtyUp( assert(branch >= 0); node = unshareNode(std::move(node), nodeID); - node->setChild(branch, child); + node->setChild(branch, std::move(child)); child = std::move(node); } @@ -718,7 +718,7 @@ SHAMap::delItem(uint256 const& id) stack.pop(); node = unshareNode(std::move(node), nodeID); - node->setChild(selectBranch(nodeID, id), prevNode); + node->setChild(selectBranch(nodeID, id), std::move(prevNode)); if (!nodeID.isRoot()) { @@ -795,8 +795,7 @@ SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr item) auto inner = std::static_pointer_cast(node); int branch = selectBranch(nodeID, tag); assert(inner->isEmptyBranch(branch)); - auto newNode = makeTypedLeaf(type, std::move(item), cowid_); - inner->setChild(branch, newNode); + inner->setChild(branch, makeTypedLeaf(type, std::move(item), cowid_)); } else { diff --git a/src/ripple/shamap/impl/SHAMapInnerNode.cpp b/src/ripple/shamap/impl/SHAMapInnerNode.cpp index d408fe195f1..6ea6f47eb37 100644 --- a/src/ripple/shamap/impl/SHAMapInnerNode.cpp +++ b/src/ripple/shamap/impl/SHAMapInnerNode.cpp @@ -284,7 +284,7 @@ SHAMapInnerNode::getString(const SHAMapNodeID& id) const // We are modifying an inner node void -SHAMapInnerNode::setChild(int m, std::shared_ptr const& child) +SHAMapInnerNode::setChild(int m, std::shared_ptr child) { assert((m >= 0) && (m < branchFactor)); assert(cowid_ != 0); @@ -310,7 +310,7 @@ SHAMapInnerNode::setChild(int m, std::shared_ptr const& child) auto const childIndex = *getChildIndex(m); auto [_, hashes, children] = hashesAndChildren_.getHashesAndChildren(); hashes[childIndex].zero(); - children[childIndex] = child; + children[childIndex] = std::move(child); } hash_.zero(); From a79fa2026bad675d0146df5c734f70feddbbd5b8 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 8 Dec 2022 20:41:50 -0800 Subject: [PATCH 068/508] Update GitHub Actions to ubuntu-latest: (#4357) * Per actions/runner-images#6002, ubuntu-18.04 is being deprecated. If latest ever fails in the future, we'll need to fix the jobs anyway, so catch it early. * Use long option names * Force clang-format to ubuntu-20.04 because LLVM 10 is not available for 22.04 --- .github/workflows/clang-format.yml | 7 ++++--- .github/workflows/doxygen.yml | 2 +- .github/workflows/levelization.yml | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index 78ad98c7d9e..941789844b0 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -4,16 +4,17 @@ on: [push, pull_request] jobs: check: - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 env: CLANG_VERSION: 10 steps: - uses: actions/checkout@v2 - name: Install clang-format run: | + codename=$( lsb_release --codename --short ) sudo tee /etc/apt/sources.list.d/llvm.list >/dev/null < Date: Tue, 13 Dec 2022 20:56:35 +0200 Subject: [PATCH 069/508] Reduce default reserves to 10/2 (#4329) Reduce the reserve requirements from 20/5 to 10/2 in line with the current network votes. The requirements of 10/2 have been on the network long enough that new nodes should not still have the old reserve amount. Co-authored-by: Richard Holland --- src/ripple/app/misc/FeeVote.h | 4 ++-- src/test/app/AccountDelete_test.cpp | 8 ++++---- src/test/app/FeeVote_test.cpp | 12 ++++++------ src/test/rpc/AccountTx_test.cpp | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/ripple/app/misc/FeeVote.h b/src/ripple/app/misc/FeeVote.h index 543f4cdb634..d8948a150b3 100644 --- a/src/ripple/app/misc/FeeVote.h +++ b/src/ripple/app/misc/FeeVote.h @@ -46,10 +46,10 @@ class FeeVote static constexpr FeeUnit32 reference_fee_units{10}; /** The account reserve requirement in drops. */ - XRPAmount account_reserve{20 * DROPS_PER_XRP}; + XRPAmount account_reserve{10 * DROPS_PER_XRP}; /** The per-owned item reserve requirement in drops. */ - XRPAmount owner_reserve{5 * DROPS_PER_XRP}; + XRPAmount owner_reserve{2 * DROPS_PER_XRP}; }; virtual ~FeeVote() = default; diff --git a/src/test/app/AccountDelete_test.cpp b/src/test/app/AccountDelete_test.cpp index db2ac5799d0..73a0ccbf9e0 100644 --- a/src/test/app/AccountDelete_test.cpp +++ b/src/test/app/AccountDelete_test.cpp @@ -515,16 +515,16 @@ class AccountDelete_test : public beast::unit_test::suite // All it takes is a large enough XRP payment to resurrect // becky's account. Try too small a payment. - env(pay(alice, becky, XRP(19)), ter(tecNO_DST_INSUF_XRP)); + env(pay(alice, becky, XRP(9)), ter(tecNO_DST_INSUF_XRP)); env.close(); // Actually resurrect becky's account. - env(pay(alice, becky, XRP(20))); + env(pay(alice, becky, XRP(10))); env.close(); // becky's account root should be back. BEAST_EXPECT(env.closed()->exists(beckyAcctKey)); - BEAST_EXPECT(env.balance(becky) == XRP(20)); + BEAST_EXPECT(env.balance(becky) == XRP(10)); // becky's resurrected account can be the destination of alice's // PayChannel. @@ -541,7 +541,7 @@ class AccountDelete_test : public beast::unit_test::suite env(payChanClaim()); env.close(); - BEAST_EXPECT(env.balance(becky) == XRP(20) + payChanXRP); + BEAST_EXPECT(env.balance(becky) == XRP(10) + payChanXRP); } void diff --git a/src/test/app/FeeVote_test.cpp b/src/test/app/FeeVote_test.cpp index a2dd76fa47c..4c2acf6297d 100644 --- a/src/test/app/FeeVote_test.cpp +++ b/src/test/app/FeeVote_test.cpp @@ -34,8 +34,8 @@ class FeeVote_test : public beast::unit_test::suite Section config; auto setup = setup_FeeVote(config); BEAST_EXPECT(setup.reference_fee == 10); - BEAST_EXPECT(setup.account_reserve == 20 * DROPS_PER_XRP); - BEAST_EXPECT(setup.owner_reserve == 5 * DROPS_PER_XRP); + BEAST_EXPECT(setup.account_reserve == 10 * DROPS_PER_XRP); + BEAST_EXPECT(setup.owner_reserve == 2 * DROPS_PER_XRP); } { Section config; @@ -57,8 +57,8 @@ class FeeVote_test : public beast::unit_test::suite // Illegal values are ignored, and the defaults left unchanged auto setup = setup_FeeVote(config); BEAST_EXPECT(setup.reference_fee == 10); - BEAST_EXPECT(setup.account_reserve == 20 * DROPS_PER_XRP); - BEAST_EXPECT(setup.owner_reserve == 5 * DROPS_PER_XRP); + BEAST_EXPECT(setup.account_reserve == 10 * DROPS_PER_XRP); + BEAST_EXPECT(setup.owner_reserve == 2 * DROPS_PER_XRP); } { Section config; @@ -87,8 +87,8 @@ class FeeVote_test : public beast::unit_test::suite // Illegal values are ignored, and the defaults left unchanged auto setup = setup_FeeVote(config); BEAST_EXPECT(setup.reference_fee == 10); - BEAST_EXPECT(setup.account_reserve == 20 * DROPS_PER_XRP); - BEAST_EXPECT(setup.owner_reserve == 5 * DROPS_PER_XRP); + BEAST_EXPECT(setup.account_reserve == 10 * DROPS_PER_XRP); + BEAST_EXPECT(setup.owner_reserve == 2 * DROPS_PER_XRP); } } diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index f5709c28e87..1d537d47791 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -547,7 +547,7 @@ class AccountTx_test : public beast::unit_test::suite // All it takes is a large enough XRP payment to resurrect // becky's account. Try too small a payment. - env(pay(alice, becky, XRP(19)), ter(tecNO_DST_INSUF_XRP)); + env(pay(alice, becky, XRP(9)), ter(tecNO_DST_INSUF_XRP)); env.close(); // Actually resurrect becky's account. From 47ffc392d76333b8e5234a22eebfe7281f95473f Mon Sep 17 00:00:00 2001 From: Scott Determan Date: Tue, 13 Dec 2022 19:13:54 -0500 Subject: [PATCH 070/508] Work around gdb bug by changing a template parameter: (#4332) There's a bug in gdb where unsigned template parameters cause issues with RTTI. This patch changes a template parameter from `size_t` to `int` to work around this gdb bug. --- src/ripple/protocol/SField.h | 2 +- src/ripple/protocol/STBitString.h | 34 ++++++++++++++++++------------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index 5039e4e0524..253d956408f 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -43,7 +43,7 @@ Some fields have a different meaning for their class STAccount; class STAmount; class STBlob; -template +template class STBitString; template class STInteger; diff --git a/src/ripple/protocol/STBitString.h b/src/ripple/protocol/STBitString.h index 1819d54d1cf..45d1a3d6f05 100644 --- a/src/ripple/protocol/STBitString.h +++ b/src/ripple/protocol/STBitString.h @@ -25,9 +25,15 @@ namespace ripple { -template +// The template parameter could be an unsigned type, however there's a bug in +// gdb (last checked in gdb 12.1) that prevents gdb from finding the RTTI +// information of a template parameterized by an unsigned type. This RTTI +// information is needed to write gdb pretty printers. +template class STBitString final : public STBase { + static_assert(Bits > 0, "Number of bits must be positive"); + public: using value_type = base_uint; @@ -79,36 +85,36 @@ using STUInt128 = STBitString<128>; using STUInt160 = STBitString<160>; using STUInt256 = STBitString<256>; -template +template inline STBitString::STBitString(SField const& n) : STBase(n) { } -template +template inline STBitString::STBitString(const value_type& v) : value_(v) { } -template +template inline STBitString::STBitString(SField const& n, const value_type& v) : STBase(n), value_(v) { } -template +template inline STBitString::STBitString(SerialIter& sit, SField const& name) : STBitString(name, sit.getBitString()) { } -template +template STBase* STBitString::copy(std::size_t n, void* buf) const { return emplace(n, buf, *this); } -template +template STBase* STBitString::move(std::size_t n, void* buf) { @@ -136,14 +142,14 @@ STUInt256::getSType() const return STI_UINT256; } -template +template std::string STBitString::getText() const { return to_string(value_); } -template +template bool STBitString::isEquivalent(const STBase& t) const { @@ -151,7 +157,7 @@ STBitString::isEquivalent(const STBase& t) const return v && (value_ == v->value_); } -template +template void STBitString::add(Serializer& s) const { @@ -160,7 +166,7 @@ STBitString::add(Serializer& s) const s.addBitString(value_); } -template +template template void STBitString::setValue(base_uint const& v) @@ -168,20 +174,20 @@ STBitString::setValue(base_uint const& v) value_ = v; } -template +template typename STBitString::value_type const& STBitString::value() const { return value_; } -template +template STBitString::operator value_type() const { return value_; } -template +template bool STBitString::isDefault() const { From 648d6c3e2f9ac399809709ad9ce319848a9a792e Mon Sep 17 00:00:00 2001 From: Scott Determan Date: Tue, 13 Dec 2022 19:21:22 -0500 Subject: [PATCH 071/508] Fix clang 15 warnings: (#4325) Clang warned about the code removed in this patch with the warning: ``` warning: out-of-line definition of constexpr static data member is redundant in C++17 and is deprecated [-Wdeprecated] ``` --- src/ripple/app/misc/impl/TxQ.cpp | 2 -- src/ripple/core/impl/Config.cpp | 2 -- src/ripple/rpc/impl/Status.cpp | 2 -- src/test/unit_test/multi_runner.cpp | 5 ----- 4 files changed, 11 deletions(-) diff --git a/src/ripple/app/misc/impl/TxQ.cpp b/src/ripple/app/misc/impl/TxQ.cpp index 0eeec8d62ae..59559cf24c6 100644 --- a/src/ripple/app/misc/impl/TxQ.cpp +++ b/src/ripple/app/misc/impl/TxQ.cpp @@ -78,8 +78,6 @@ increase(FeeLevel64 level, std::uint32_t increasePercent) ////////////////////////////////////////////////////////////////////////// -constexpr FeeLevel64 TxQ::baseLevel; - std::size_t TxQ::FeeMetrics::update( Application& app, diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index f8d8878a771..c2cfb14d21d 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -263,8 +263,6 @@ getEnvVar(char const* name) return value; } -constexpr FeeUnit32 Config::TRANSACTION_FEE_BASE; - Config::Config() : j_(beast::Journal::getNullSink()), ramSize_(detail::getMemorySize()) { diff --git a/src/ripple/rpc/impl/Status.cpp b/src/ripple/rpc/impl/Status.cpp index 0890daee6f9..e9e64da7ac0 100644 --- a/src/ripple/rpc/impl/Status.cpp +++ b/src/ripple/rpc/impl/Status.cpp @@ -23,8 +23,6 @@ namespace ripple { namespace RPC { -constexpr Status::Code Status::OK; - std::string Status::codeString() const { diff --git a/src/test/unit_test/multi_runner.cpp b/src/test/unit_test/multi_runner.cpp index 5cd69111b42..c1111e4cfeb 100644 --- a/src/test/unit_test/multi_runner.cpp +++ b/src/test/unit_test/multi_runner.cpp @@ -389,11 +389,6 @@ multi_runner_base::add_failures(std::size_t failures) any_failed(failures != 0); } -template -constexpr const char* multi_runner_base::shared_mem_name_; -template -constexpr const char* multi_runner_base::message_queue_name_; - } // namespace detail //------------------------------------------------------------------------------ From df1300fb379169d07969e52d7d2659be12231747 Mon Sep 17 00:00:00 2001 From: CJ Cobb <46455409+cjcobb23@users.noreply.github.com> Date: Tue, 13 Dec 2022 20:09:53 -0500 Subject: [PATCH 072/508] Catch transaction deserialization error in doLedgerGrpc (#4323) * Allow clio to extract ledgers with transactions that can no longer be deserialized. The problem transactions will be skipped. --- src/ripple/rpc/handlers/LedgerHandler.cpp | 43 ++++++++++++++--------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/src/ripple/rpc/handlers/LedgerHandler.cpp b/src/ripple/rpc/handlers/LedgerHandler.cpp index b3a441f48a0..6b4fc77367b 100644 --- a/src/ripple/rpc/handlers/LedgerHandler.cpp +++ b/src/ripple/rpc/handlers/LedgerHandler.cpp @@ -133,27 +133,38 @@ doLedgerGrpc(RPC::GRPCContext& context) if (request.transactions()) { - for (auto& i : ledger->txs) + try { - assert(i.first); - if (request.expand()) + for (auto& i : ledger->txs) { - auto txn = - response.mutable_transactions_list()->add_transactions(); - Serializer sTxn = i.first->getSerializer(); - txn->set_transaction_blob(sTxn.data(), sTxn.getLength()); - if (i.second) + assert(i.first); + if (request.expand()) { - Serializer sMeta = i.second->getSerializer(); - txn->set_metadata_blob(sMeta.data(), sMeta.getLength()); + auto txn = response.mutable_transactions_list() + ->add_transactions(); + Serializer sTxn = i.first->getSerializer(); + txn->set_transaction_blob(sTxn.data(), sTxn.getLength()); + if (i.second) + { + Serializer sMeta = i.second->getSerializer(); + txn->set_metadata_blob(sMeta.data(), sMeta.getLength()); + } + } + else + { + auto const& hash = i.first->getTransactionID(); + response.mutable_hashes_list()->add_hashes( + hash.data(), hash.size()); } } - else - { - auto const& hash = i.first->getTransactionID(); - response.mutable_hashes_list()->add_hashes( - hash.data(), hash.size()); - } + } + catch (std::exception const& e) + { + JLOG(context.j.error()) + << __func__ << " - Error deserializing transaction in ledger " + << ledger->info().seq + << " . skipping transaction and following transactions. You " + "should look into this further"; } } From c3a9f3dbf3b2a1430cbc26bb4630e9b84643b354 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Fri, 16 Dec 2022 12:46:22 -0600 Subject: [PATCH 073/508] Use the Conan package manager (#4367) Introduces a conanfile.py (and a Conan recipe for RocksDB) to enable building the package with Conan, choosing more recent default versions of dependencies. It removes almost all of the CMake build files related to dependencies, and the configurations for Travis CI and GitLab CI. A new set of cross-platform build instructions are written in BUILD.md. Includes example GitHub Actions workflow for each of Linux, macOS, Windows. * Test on macos-12 We use the library which was not added to Apple Clang until version 13.1.6. The default Clang on macos-11 (the sometimes current version of macos-latest) is 13.0.0, and the default Clang on macos-12 is 14.0.0. Closes #4223. --- .github/workflows/nix.yml | 95 + .github/workflows/windows.yml | 89 + .gitlab-ci.yml | 169 -- .travis.yml | 460 ----- BUILD.md | 351 ++++ Builds/CMake/FindRocksDB.cmake | 62 - Builds/CMake/README.md | 18 - Builds/CMake/RippledCore.cmake | 4 +- Builds/CMake/RippledDocs.cmake | 7 + Builds/CMake/RippledInstall.cmake | 12 - Builds/CMake/RippledInterface.cmake | 15 +- Builds/CMake/RippledMultiConfig.cmake | 2 +- Builds/CMake/RippledNIH.cmake | 33 - Builds/CMake/RippledRelease.cmake | 11 +- Builds/CMake/RippledSanity.cmake | 10 +- Builds/CMake/deps/Boost.cmake | 51 +- Builds/CMake/deps/Ed25519-donna.cmake | 28 - Builds/CMake/deps/FindBoost.cmake | 2170 --------------------- Builds/CMake/deps/Findjemalloc.cmake | 47 - Builds/CMake/deps/Findlibarchive_pc.cmake | 22 - Builds/CMake/deps/Findlz4.cmake | 24 - Builds/CMake/deps/Findsecp256k1.cmake | 24 - Builds/CMake/deps/Findsnappy.cmake | 24 - Builds/CMake/deps/Findsoci.cmake | 17 - Builds/CMake/deps/Findsqlite.cmake | 24 - Builds/CMake/deps/Libarchive.cmake | 163 -- Builds/CMake/deps/Lz4.cmake | 79 - Builds/CMake/deps/Nudb.cmake | 31 - Builds/CMake/deps/OpenSSL.cmake | 48 - Builds/CMake/deps/Postgres.cmake | 70 - Builds/CMake/deps/Protobuf.cmake | 167 +- Builds/CMake/deps/Rocksdb.cmake | 177 -- Builds/CMake/deps/Secp256k1.cmake | 58 - Builds/CMake/deps/Snappy.cmake | 77 - Builds/CMake/deps/Soci.cmake | 165 -- Builds/CMake/deps/Sqlite.cmake | 93 - Builds/CMake/deps/cassandra.cmake | 167 -- Builds/CMake/deps/date.cmake | 18 - Builds/CMake/deps/gRPC.cmake | 344 +--- Builds/CMake/rocks_thirdparty.inc | 15 - Builds/CMake/rocksdb_build_version.cc.in | 71 - Builds/CMake/soci_patch.cmake | 49 - CMakeLists.txt | 83 +- conanfile.py | 149 ++ external/rocksdb/conanfile.py | 193 ++ external/rocksdb/thirdparty.inc | 62 + src/ed25519-donna/CMakeLists.txt | 48 + src/ripple/protocol/impl/PublicKey.cpp | 3 +- src/ripple/protocol/impl/SecretKey.cpp | 2 +- src/ripple/rpc/handlers/WalletPropose.cpp | 2 +- src/secp256k1/CMakeLists.txt | 52 + src/test/core/SociDB_test.cpp | 13 +- 52 files changed, 1171 insertions(+), 4997 deletions(-) create mode 100644 .github/workflows/nix.yml create mode 100644 .github/workflows/windows.yml delete mode 100644 .gitlab-ci.yml delete mode 100644 .travis.yml create mode 100644 BUILD.md delete mode 100644 Builds/CMake/FindRocksDB.cmake delete mode 100644 Builds/CMake/README.md delete mode 100644 Builds/CMake/RippledNIH.cmake delete mode 100644 Builds/CMake/deps/Ed25519-donna.cmake delete mode 100644 Builds/CMake/deps/FindBoost.cmake delete mode 100644 Builds/CMake/deps/Findjemalloc.cmake delete mode 100644 Builds/CMake/deps/Findlibarchive_pc.cmake delete mode 100644 Builds/CMake/deps/Findlz4.cmake delete mode 100644 Builds/CMake/deps/Findsecp256k1.cmake delete mode 100644 Builds/CMake/deps/Findsnappy.cmake delete mode 100644 Builds/CMake/deps/Findsoci.cmake delete mode 100644 Builds/CMake/deps/Findsqlite.cmake delete mode 100644 Builds/CMake/deps/Libarchive.cmake delete mode 100644 Builds/CMake/deps/Lz4.cmake delete mode 100644 Builds/CMake/deps/Nudb.cmake delete mode 100644 Builds/CMake/deps/OpenSSL.cmake delete mode 100644 Builds/CMake/deps/Postgres.cmake delete mode 100644 Builds/CMake/deps/Rocksdb.cmake delete mode 100644 Builds/CMake/deps/Secp256k1.cmake delete mode 100644 Builds/CMake/deps/Snappy.cmake delete mode 100644 Builds/CMake/deps/Soci.cmake delete mode 100644 Builds/CMake/deps/Sqlite.cmake delete mode 100644 Builds/CMake/deps/cassandra.cmake delete mode 100644 Builds/CMake/deps/date.cmake delete mode 100644 Builds/CMake/rocks_thirdparty.inc delete mode 100644 Builds/CMake/rocksdb_build_version.cc.in delete mode 100644 Builds/CMake/soci_patch.cmake create mode 100644 conanfile.py create mode 100644 external/rocksdb/conanfile.py create mode 100644 external/rocksdb/thirdparty.inc create mode 100644 src/ed25519-donna/CMakeLists.txt create mode 100644 src/secp256k1/CMakeLists.txt diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml new file mode 100644 index 00000000000..a37ea9f132a --- /dev/null +++ b/.github/workflows/nix.yml @@ -0,0 +1,95 @@ +name: nix +on: [push, pull_request] + +jobs: + + test: + strategy: + matrix: + platform: + - ubuntu-latest + - macos-12 + generator: + - Ninja + configuration: + - Release + runs-on: ${{ matrix.platform }} + env: + build_dir: .build + steps: + - name: checkout + uses: actions/checkout@v3 + - name: install Ninja on Linux + if: matrix.generator == 'Ninja' && runner.os == 'Linux' + run: sudo apt install ninja-build + - name: install Ninja on OSX + if: matrix.generator == 'Ninja' && runner.os == 'macOS' + run: brew install ninja + - name: install nproc on OSX + if: runner.os == 'macOS' + run: brew install coreutils + - name: choose Python + uses: actions/setup-python@v3 + with: + python-version: 3.9 + - name: learn Python cache directory + id: pip-cache + run: | + sudo pip install --upgrade pip + echo "::set-output name=dir::$(pip cache dir)" + - name: restore Python cache directory + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ hashFiles('.github/workflows/nix.yml') }} + - name: install Conan + run: pip install wheel 'conan>=1.52.0' + - name: check environment + run: | + echo ${PATH} | tr ':' '\n' + python --version + conan --version + cmake --version + env + - name: configure Conan + run: | + conan profile new default --detect + conan profile update settings.compiler.cppstd=20 default + - name: configure Conan on Linux + if: runner.os == 'Linux' + run: | + conan profile update settings.compiler.libcxx=libstdc++11 default + - name: learn Conan cache directory + id: conan-cache + run: | + echo "::set-output name=dir::$(conan config get storage.path)" + - name: restore Conan cache directory + uses: actions/cache@v2 + with: + path: ${{ steps.conan-cache.outputs.dir }} + key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/nix.yml') }} + - name: export RocksDB + run: conan export external/rocksdb + - name: install dependencies + run: | + mkdir ${build_dir} + cd ${build_dir} + conan install .. --build missing --settings build_type=${{ matrix.configuration }} --profile:build default --profile:host default + - name: configure + run: | + cd ${build_dir} + cmake \ + -G ${{ matrix.generator }} \ + -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ + -DCMAKE_BUILD_TYPE=${{ matrix.configuration }} \ + -Dassert=ON \ + -Dcoverage=OFF \ + -Dreporting=OFF \ + -Dunity=OFF \ + .. + - name: build + run: | + cmake --build ${build_dir} --target rippled --parallel $(nproc) + - name: test + run: | + ${build_dir}/rippled --unittest --unittest-jobs $(nproc) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml new file mode 100644 index 00000000000..1cc6bd5ef8e --- /dev/null +++ b/.github/workflows/windows.yml @@ -0,0 +1,89 @@ +name: windows +# We have disabled this workflow because it fails in our CI Windows +# environment, but we cannot replicate the failure in our personal Windows +# test environments, nor have we gone through the trouble of setting up an +# interactive CI Windows environment. +# We welcome contributions to diagnose or debug the problems on Windows. Until +# then, we leave this tombstone as a reminder that we have tried (but failed) +# to write a reliable test for Windows. +# on: [push, pull_request] + +jobs: + + test: + strategy: + matrix: + generator: + - Visual Studio 16 2019 + configuration: + - Release + runs-on: windows-2019 + env: + build_dir: .build + steps: + - name: checkout + uses: actions/checkout@v3 + - name: choose Python + uses: actions/setup-python@v3 + with: + python-version: 3.9 + - name: learn Python cache directory + id: pip-cache + run: | + pip install --upgrade pip + echo "::set-output name=dir::$(pip cache dir)" + - name: restore Python cache directory + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }} + - name: install Conan + run: pip install wheel 'conan>=1.52.0' + - name: check environment + run: | + $env:PATH -split ';' + python --version + conan --version + cmake --version + dir env: + - name: configure Conan + run: | + conan profile new default --detect + conan profile update settings.compiler.cppstd=20 default + conan profile update settings.compiler.runtime=MT default + conan profile update settings.compiler.toolset=v141 default + - name: learn Conan cache directory + id: conan-cache + run: | + echo "::set-output name=dir::$(conan config get storage.path)" + - name: restore Conan cache directory + uses: actions/cache@v2 + with: + path: ${{ steps.conan-cache.outputs.dir }} + key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/windows.yml') }} + - name: export RocksDB + run: conan export external/rocksdb + - name: install dependencies + run: | + mkdir $env:build_dir + cd $env:build_dir + conan install .. --build missing --settings build_type=${{ matrix.configuration }} + - name: configure + run: | + $env:build_dir + cd $env:build_dir + pwd + ls + cmake ` + -G "${{ matrix.generator }}" ` + -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake ` + -Dassert=ON ` + -Dreporting=OFF ` + -Dunity=OFF ` + .. + - name: build + run: | + cmake --build $env:build_dir --target rippled --config ${{ matrix.configuration }} --parallel $env:NUMBER_OF_PROCESSORS + - name: test + run: | + & "$env:build_dir\${{ matrix.configuration }}\rippled.exe" --unittest diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index 02475adf0ff..00000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,169 +0,0 @@ -# I don't know what the minimum size is, but we cannot build on t3.micro. - -# TODO: Factor common builds between different tests. - -# The parameters for our job matrix: -# -# 1. Generator (Make, Ninja, MSBuild) -# 2. Compiler (GCC, Clang, MSVC) -# 3. Build type (Debug, Release) -# 4. Definitions (-Dunity=OFF, -Dassert=ON, ...) - - -.job_linux_build_test: - only: - variables: - - $CI_PROJECT_URL =~ /^https?:\/\/gitlab.com\// - stage: build - tags: - - linux - - c5.2xlarge - image: thejohnfreeman/rippled-build-ubuntu:4b73694e07f0 - script: - - bin/ci/build.sh - - bin/ci/test.sh - cache: - # Use a different key for each unique combination of (generator, compiler, - # build type). Caches are stored as `.zip` files; they are not merged. - # Generate a new key whenever you want to bust the cache, e.g. when the - # dependency versions have been bumped. - # By default, jobs pull the cache. Only a few specially chosen jobs update - # the cache (with policy `pull-push`); one for each unique combination of - # (generator, compiler, build type). - policy: pull - paths: - - .nih_c/ - -'build+test Make GCC Debug': - extends: .job_linux_build_test - variables: - GENERATOR: Unix Makefiles - COMPILER: gcc - BUILD_TYPE: Debug - cache: - key: 62ada41c-fc9e-4949-9533-736d4d6512b6 - policy: pull-push - -'build+test Ninja GCC Debug': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: gcc - BUILD_TYPE: Debug - cache: - key: 1665d3eb-6233-4eef-9f57-172636899faa - policy: pull-push - -'build+test Ninja GCC Debug -Dstatic=OFF': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: gcc - BUILD_TYPE: Debug - CMAKE_ARGS: '-Dstatic=OFF' - cache: - key: 1665d3eb-6233-4eef-9f57-172636899faa - -'build+test Ninja GCC Debug -Dstatic=OFF -DBUILD_SHARED_LIBS=ON': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: gcc - BUILD_TYPE: Debug - CMAKE_ARGS: '-Dstatic=OFF -DBUILD_SHARED_LIBS=ON' - cache: - key: 1665d3eb-6233-4eef-9f57-172636899faa - -'build+test Ninja GCC Debug -Dunity=OFF': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: gcc - BUILD_TYPE: Debug - CMAKE_ARGS: '-Dunity=OFF' - cache: - key: 1665d3eb-6233-4eef-9f57-172636899faa - -'build+test Ninja GCC Release -Dassert=ON': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: gcc - BUILD_TYPE: Release - CMAKE_ARGS: '-Dassert=ON' - cache: - key: c45ec125-9625-4c19-acf7-4e889d5f90bd - policy: pull-push - -'build+test(manual) Ninja GCC Release -Dassert=ON': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: gcc - BUILD_TYPE: Release - CMAKE_ARGS: '-Dassert=ON' - MANUAL_TEST: 'true' - cache: - key: c45ec125-9625-4c19-acf7-4e889d5f90bd - -'build+test Make clang Debug': - extends: .job_linux_build_test - variables: - GENERATOR: Unix Makefiles - COMPILER: clang - BUILD_TYPE: Debug - cache: - key: bf578dc2-5277-4580-8de5-6b9523118b19 - policy: pull-push - -'build+test Ninja clang Debug': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: clang - BUILD_TYPE: Debug - cache: - key: 762514c5-3d4c-4c7c-8da2-2df9d8839cbe - policy: pull-push - -'build+test Ninja clang Debug -Dunity=OFF': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: clang - BUILD_TYPE: Debug - CMAKE_ARGS: '-Dunity=OFF' - cache: - key: 762514c5-3d4c-4c7c-8da2-2df9d8839cbe - -'build+test Ninja clang Debug -Dunity=OFF -Dsan=address': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: clang - BUILD_TYPE: Debug - CMAKE_ARGS: '-Dunity=OFF -Dsan=address' - CONCURRENT_TESTS: 1 - cache: - key: 762514c5-3d4c-4c7c-8da2-2df9d8839cbe - -'build+test Ninja clang Debug -Dunity=OFF -Dsan=undefined': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: clang - BUILD_TYPE: Debug - CMAKE_ARGS: '-Dunity=OFF -Dsan=undefined' - cache: - key: 762514c5-3d4c-4c7c-8da2-2df9d8839cbe - -'build+test Ninja clang Release -Dassert=ON': - extends: .job_linux_build_test - variables: - GENERATOR: Ninja - COMPILER: clang - BUILD_TYPE: Release - CMAKE_ARGS: '-Dassert=ON' - cache: - key: 7751be37-2358-4f08-b1d0-7e72e0ad266d - policy: pull-push diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d8cbf43448a..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,460 +0,0 @@ -# There is a known issue where Travis will have trouble fetching the cache, -# particularly on non-linux builds. Try restarting the individual build -# (probably will not be necessary in the "windep" stages) if the end of the -# log looks like: -# -#--------------------------------------- -# attempting to download cache archive -# fetching travisorder/cache--windows-1809-containers-f2bf1c76c7fb4095c897a4999bd7c9b3fb830414dfe91f33d665443b52416d39--compiler-gpp.tgz -# found cache -# adding C:/Users/travis/_cache to cache -# creating directory C:/Users/travis/_cache -# No output has been received in the last 10m0s, this potentially indicates a stalled build or something wrong with the build itself. -# Check the details on how to adjust your build configuration on: https://docs.travis-ci.com/user/common-build-problems/#build-times-out-because-no-output-was-received -# The build has been terminated -#--------------------------------------- - -language: cpp -dist: bionic - -services: - - docker - -stages: - - windep-vcpkg - - windep-boost - - build - -env: - global: - - DOCKER_IMAGE="rippleci/rippled-ci-builder:2020-01-08" - - CMAKE_EXTRA_ARGS="-Dwerr=ON -Dwextra=ON" - - NINJA_BUILD=true - # change this if we get more VM capacity - - MAX_TIME_MIN=80 - - CACHE_DIR=${TRAVIS_HOME}/_cache - - NIH_CACHE_ROOT=${CACHE_DIR}/nih_c - - PARALLEL_TESTS=true - # this is NOT used by linux container based builds (which already have boost installed) - - BOOST_URL='https://boostorg.jfrog.io/artifactory/main/release/1.75.0/source/boost_1_75_0.tar.gz' - # Alternate dowload location - - BOOST_URL2='https://downloads.sourceforge.net/project/boost/boost/1.75.0/boost_1_75_0.tar.bz2?r=&ts=1594393912&use_mirror=newcontinuum' - # Travis downloader doesn't seem to have updated certs. Using this option - # introduces obvious security risks, but they're Travis's risks. - # Note that this option is only used if the "normal" build fails. - - BOOST_WGET_OPTIONS='--no-check-certificate' - - VCPKG_DIR=${CACHE_DIR}/vcpkg - - USE_CCACHE=true - - CCACHE_BASEDIR=${TRAVIS_HOME}" - - CCACHE_NOHASHDIR=true - - CCACHE_DIR=${CACHE_DIR}/ccache - -before_install: - - export NUM_PROCESSORS=$(nproc) - - echo "NUM PROC is ${NUM_PROCESSORS}" - - if [ "$(uname)" = "Linux" ] ; then docker pull ${DOCKER_IMAGE}; fi - - if [ "${MATRIX_EVAL}" != "" ] ; then eval "${MATRIX_EVAL}"; fi - - if [ "${CMAKE_ADD}" != "" ] ; then export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} ${CMAKE_ADD}"; fi - - bin/ci/ubuntu/travis-cache-start.sh - -matrix: - fast_finish: true - allow_failures: - # TODO these need more investigation - # - # there are a number of UBs caught currently that need triage - - name: ubsan, clang-8 - # this one often runs out of memory: - - name: manual tests, gcc-8, release - # The Windows build may fail if any of the dependencies fail, but - # allow the rest of the builds to continue. They may succeed if the - # dependency is already cached. These do not need to be retried if - # _any_ of the Windows builds succeed. - - stage: windep-vcpkg - - stage: windep-boost - - # https://docs.travis-ci.com/user/build-config-yaml#usage-of-yaml-anchors-and-aliases - include: - # debug builds - - &linux - stage: build - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ - compiler: gcc-8 - name: gcc-8, debug - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - script: - - sudo chmod -R a+rw ${CACHE_DIR} - - ccache -s - - travis_wait ${MAX_TIME_MIN} bin/ci/ubuntu/build-in-docker.sh - - ccache -s - - <<: *linux - compiler: clang-8 - name: clang-8, debug - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Debug - - <<: *linux - compiler: clang-8 - name: reporting, clang-8, debug - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dreporting=ON" - # coverage builds - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_cov/ - compiler: gcc-8 - name: coverage, gcc-8 - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dcoverage=ON" - - TARGET=coverage_report - - SKIP_TESTS=true - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_cov/ - compiler: clang-8 - name: coverage, clang-8 - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dcoverage=ON" - - TARGET=coverage_report - - SKIP_TESTS=true - # test-free builds - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ - compiler: gcc-8 - name: no-tests-unity, gcc-8 - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dtests=OFF" - - SKIP_TESTS=true - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ - compiler: clang-8 - name: no-tests-non-unity, clang-8 - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dtests=OFF -Dunity=OFF" - - SKIP_TESTS=true - # nounity - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_nounity/ - compiler: gcc-8 - name: non-unity, gcc-8 - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dunity=OFF" - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_nounity/ - compiler: clang-8 - name: non-unity, clang-8 - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dunity=OFF" - # manual tests - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_man/ - compiler: gcc-8 - name: manual tests, gcc-8, debug - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - MANUAL_TESTS=true - # manual tests - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_man/ - compiler: gcc-8 - name: manual tests, gcc-8, release - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Release - - CMAKE_ADD="-Dassert=ON -Dunity=OFF" - - MANUAL_TESTS=true - # release builds - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_release/ - compiler: gcc-8 - name: gcc-8, release - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Release - - CMAKE_ADD="-Dassert=ON -Dunity=OFF" - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_release/ - compiler: clang-8 - name: clang-8, release - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Release - - CMAKE_ADD="-Dassert=ON" - # asan - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_san/ - compiler: clang-8 - name: asan, clang-8 - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Release - - CMAKE_ADD="-Dsan=address" - - ASAN_OPTIONS="print_stats=true:atexit=true" - #- LSAN_OPTIONS="verbosity=1:log_threads=1" - - PARALLEL_TESTS=false - # ubsan - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_san/ - compiler: clang-8 - name: ubsan, clang-8 - env: - - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - - BUILD_TYPE=Release - - CMAKE_ADD="-Dsan=undefined" - # once we can run clean under ubsan, add halt_on_error=1 to options below - - UBSAN_OPTIONS="print_stacktrace=1:report_error_type=1" - - PARALLEL_TESTS=false - # tsan - # current tsan failure *might* be related to: - # https://github.com/google/sanitizers/issues/1104 - # but we can't get it to run, so leave it disabled for now - # - <<: *linux - # if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_linux/ OR commit_message =~ /travis_run_san/ - # compiler: clang-8 - # name: tsan, clang-8 - # env: - # - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - # - BUILD_TYPE=Release - # - CMAKE_ADD="-Dsan=thread" - # - TSAN_OPTIONS="history_size=3 external_symbolizer_path=/usr/bin/llvm-symbolizer verbosity=1" - # - PARALLEL_TESTS=false - # dynamic lib builds - - <<: *linux - compiler: gcc-8 - name: non-static, gcc-8 - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dstatic=OFF" - - <<: *linux - compiler: gcc-8 - name: non-static + BUILD_SHARED_LIBS, gcc-8 - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dstatic=OFF -DBUILD_SHARED_LIBS=ON" - # makefile - - <<: *linux - compiler: gcc-8 - name: makefile generator, gcc-8 - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - NINJA_BUILD=false - # misc alternative compilers - - <<: *linux - compiler: gcc-9 - name: gcc-9 - env: - - MATRIX_EVAL="CC=gcc-9 && CXX=g++-9" - - BUILD_TYPE=Debug - - <<: *linux - compiler: clang-9 - name: clang-9, debug - env: - - MATRIX_EVAL="CC=clang-9 && CXX=clang++-9" - - BUILD_TYPE=Debug - - <<: *linux - compiler: clang-9 - name: clang-9, release - env: - - MATRIX_EVAL="CC=clang-9 && CXX=clang++-9" - - BUILD_TYPE=Release - # verify build with min version of cmake - - <<: *linux - compiler: gcc-8 - name: min cmake version - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - CMAKE_EXE=/opt/local/cmake/bin/cmake - - SKIP_TESTS=true - # validator keys project as subproj of rippled - - <<: *linux - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_vkeys/ - compiler: gcc-8 - name: validator-keys - env: - - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - - BUILD_TYPE=Debug - - CMAKE_ADD="-Dvalidator_keys=ON" - - TARGET=validator-keys - # macos - - &macos - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_mac/ - stage: build - os: osx - osx_image: xcode13.1 - name: xcode13.1, debug - env: - # put NIH in non-cache location since it seems to - # cause failures when homebrew updates - - NIH_CACHE_ROOT=${TRAVIS_BUILD_DIR}/nih_c - - BLD_CONFIG=Debug - - TEST_EXTRA_ARGS="" - - BOOST_ROOT=${CACHE_DIR}/boost_1_75_0 - - >- - CMAKE_ADD=" - -DBOOST_ROOT=${BOOST_ROOT}/_INSTALLED_ - -DBoost_ARCHITECTURE=-x64 - -DBoost_NO_SYSTEM_PATHS=ON - -DCMAKE_VERBOSE_MAKEFILE=ON" - addons: - homebrew: - packages: - - protobuf - - grpc - - pkg-config - - bash - - ninja - - cmake - - wget - - zstd - - libarchive - - openssl@1.1 - update: true - install: - - export OPENSSL_ROOT=$(brew --prefix openssl@1.1) - - travis_wait ${MAX_TIME_MIN} Builds/containers/shared/install_boost.sh - - brew uninstall --ignore-dependencies boost - script: - - mkdir -p build.macos && cd build.macos - - cmake -G Ninja ${CMAKE_EXTRA_ARGS} -DCMAKE_BUILD_TYPE=${BLD_CONFIG} .. - - travis_wait ${MAX_TIME_MIN} cmake --build . --parallel --verbose - - ./rippled --unittest --quiet --unittest-log --unittest-jobs ${NUM_PROCESSORS} ${TEST_EXTRA_ARGS} - - <<: *macos - name: xcode13.1, release - before_script: - - export BLD_CONFIG=Release - - export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -Dassert=ON" - - <<: *macos - name: ipv6 (macos) - before_script: - - export TEST_EXTRA_ARGS="--unittest-ipv6" - - <<: *macos - osx_image: xcode13.1 - name: xcode13.1, debug - # windows - - &windows - if: commit_message !~ /travis_run_/ OR commit_message =~ /travis_run_win/ - os: windows - env: - # put NIH in a non-cached location until - # we come up with a way to stabilize that - # cache on windows (minimize incremental changes) - - CACHE_NAME=win_01 - - NIH_CACHE_ROOT=${TRAVIS_BUILD_DIR}/nih_c - - VCPKG_DEFAULT_TRIPLET="x64-windows-static" - - MATRIX_EVAL="CC=cl.exe && CXX=cl.exe" - - BOOST_ROOT=${CACHE_DIR}/boost_1_75 - - >- - CMAKE_ADD=" - -DCMAKE_PREFIX_PATH=${BOOST_ROOT}/_INSTALLED_ - -DBOOST_ROOT=${BOOST_ROOT}/_INSTALLED_ - -DBoost_ROOT=${BOOST_ROOT}/_INSTALLED_ - -DBoost_DIR=${BOOST_ROOT}/_INSTALLED_/lib/cmake/Boost-1.75.0 - -DBoost_COMPILER=vc141 - -DCMAKE_VERBOSE_MAKEFILE=ON - -DCMAKE_TOOLCHAIN_FILE=${VCPKG_DIR}/scripts/buildsystems/vcpkg.cmake - -DVCPKG_TARGET_TRIPLET=x64-windows-static" - stage: windep-vcpkg - name: prereq-vcpkg - install: - - choco upgrade cmake.install - - choco install ninja visualstudio2017-workload-vctools -y - script: - - df -h - - env - - travis_wait ${MAX_TIME_MIN} bin/sh/install-vcpkg.sh openssl - - travis_wait ${MAX_TIME_MIN} bin/sh/install-vcpkg.sh grpc - - travis_wait ${MAX_TIME_MIN} bin/sh/install-vcpkg.sh libarchive[lz4] - # TBD consider rocksdb via vcpkg if/when we can build with the - # vcpkg version - # - travis_wait ${MAX_TIME_MIN} bin/sh/install-vcpkg.sh rocksdb[snappy,lz4,zlib] - - <<: *windows - stage: windep-boost - name: prereq-keep-boost - install: - - choco upgrade cmake.install - - choco install ninja visualstudio2017-workload-vctools -y - - choco install visualstudio2019buildtools visualstudio2019community visualstudio2019-workload-vctools -y - script: - - export BOOST_TOOLSET=msvc-14.1 - - travis_wait ${MAX_TIME_MIN} Builds/containers/shared/install_boost.sh - - &windows-bld - <<: *windows - stage: build - name: windows, debug - before_script: - - export BLD_CONFIG=Debug - script: - - df -h - - . ./bin/sh/setup-msvc.sh - - mkdir -p build.ms && cd build.ms - - cmake -G Ninja ${CMAKE_EXTRA_ARGS} -DCMAKE_BUILD_TYPE=${BLD_CONFIG} .. - - travis_wait ${MAX_TIME_MIN} cmake --build . --parallel --verbose - # override num procs to force fewer unit test jobs - - export NUM_PROCESSORS=2 - - travis_wait ${MAX_TIME_MIN} ./rippled.exe --unittest --quiet --unittest-log --unittest-jobs ${NUM_PROCESSORS} - - <<: *windows-bld - name: windows, release - before_script: - - export BLD_CONFIG=Release - - <<: *windows-bld - name: windows, visual studio, debug - script: - - mkdir -p build.ms && cd build.ms - - export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DCMAKE_GENERATOR_TOOLSET=host=x64" - - cmake -G "Visual Studio 15 2017 Win64" ${CMAKE_EXTRA_ARGS} .. - - export DESTDIR=${PWD}/_installed_ - - travis_wait ${MAX_TIME_MIN} cmake --build . --parallel --verbose --config ${BLD_CONFIG} --target install - # override num procs to force fewer unit test jobs - - export NUM_PROCESSORS=2 - - >- - travis_wait ${MAX_TIME_MIN} "./_installed_/Program Files/rippled/bin/rippled.exe" --unittest --quiet --unittest-log --unittest-jobs ${NUM_PROCESSORS} - - <<: *windows-bld - name: windows, vc2019 - install: - - choco upgrade cmake.install - - choco install ninja -y - - choco install visualstudio2019buildtools visualstudio2019community visualstudio2019-workload-vctools -y - before_script: - - export BLD_CONFIG=Release - # we want to use the boost build from cache, which was built using the - # vs2017 compiler so we need to specify the Boost_COMPILER. BUT, we - # can't use the cmake config files generated by boost b/c they are - # broken for Boost_COMPILER override, so we need to specify both - # Boost_NO_BOOST_CMAKE and a slightly different Boost_COMPILER string - # to make the legacy find module work for us. If the cmake configs are - # fixed in the future, it should be possible to remove these - # workarounds. - - export CMAKE_EXTRA_ARGS="${CMAKE_EXTRA_ARGS} -DBoost_NO_BOOST_CMAKE=ON -DBoost_COMPILER=-vc141" - -before_cache: - - if [ $(uname) = "Linux" ] ; then SUDO="sudo"; else SUDO=""; fi - - cd ${TRAVIS_HOME} - - if [ -f cache_ignore.tar ] ; then $SUDO tar xvf cache_ignore.tar; fi - - cd ${TRAVIS_BUILD_DIR} - -cache: - timeout: 900 - directories: - - $CACHE_DIR - -notifications: - email: false diff --git a/BUILD.md b/BUILD.md new file mode 100644 index 00000000000..c686c0a51b4 --- /dev/null +++ b/BUILD.md @@ -0,0 +1,351 @@ +## Branches + +For a stable release, choose the `master` branch or one of the [tagged +releases](https://github.com/ripple/rippled/releases). + +``` +git checkout master +``` + +For the latest release candidate, choose the `release` branch. + +``` +git checkout release +``` + +If you are contributing or want the latest set of untested features, +then use the `develop` branch. + +``` +git checkout develop +``` + + +## Platforms + +We do not recommend Windows for rippled production use at this time. Currently, +the Ubuntu platform has received the highest level of quality assurance, +testing, and support. Additionally, 32-bit Windows development is not supported. + +Visual Studio 2022 is not yet supported. +This is because rippled is not compatible with [Boost][] versions 1.78 or 1.79, +but Conan cannot build Boost versions released earlier than them with VS 2022. +We expect that rippled will be compatible with Boost 1.80, which should be +released in August 2022. +Until then, we advise Windows developers to use Visual Studio 2019. + +[Boost]: https://www.boost.org/ + + +## Prerequisites + +To build this package, you will need Python (>= 3.7), +[Conan][] (>= 1.52), and [CMake][] (>= 3.16). +If you are unfamiliar with Conan, +there is a crash course at the end of this document. + +[Conan]: https://conan.io/downloads.html +[CMake]: https://cmake.org/download/ + +You'll need to compile in the C++20 dialect: + +``` +conan profile update settings.cppstd=20 default +``` + +Linux developers will commonly have a default Conan [profile][] that compiles +with GCC and links with libstdc++. +If you are linking with libstdc++ (see profile setting `compiler.libcxx`), +then you will need to choose the `libstdc++11` ABI: + +``` +conan profile update settings.compiler.libcxx=libstdc++11 default +``` + +We find it necessary to use the x64 native build tools on Windows. +An easy way to do that is to run the shortcut "x64 Native Tools Command +Prompt" for the version of Visual Studio that you have installed. + +Windows developers must build rippled and its dependencies for the x64 +architecture: + +``` +conan profile update settings.arch=x86_64 default +``` + + +## How to build and test + +Let's start with a couple of examples of common workflows. +The first is for a single-configuration generator (e.g. Unix Makefiles) on +Linux or MacOS: + +``` +conan export external/rocksdb +mkdir .build +cd .build +conan install .. --output-folder . --build missing --settings build_type=Release +cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. +cmake --build . +./rippled --unittest +``` + +The second is for a multi-configuration generator (e.g. Visual Studio) on +Windows: + +``` +conan export external/rocksdb +mkdir .build +cd .build +conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT +conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd +cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake .. +cmake --build . --config Release +cmake --build . --config Debug +./Release/rippled --unittest +./Debug/rippled --unittest +``` + +Here we explain the individual steps: + +1. Export our [Conan recipe for RocksDB](./external/rocksdb). + + It builds version 6.27.3, which, as of July 8, 2022, + is not available in [Conan Center](https://conan.io/center/rocksdb). + +1. Create a build directory (and move into it). + + You can choose any name you want. + + Conan will generate some files in what it calls the "install folder". + These files are implementation details that you don't need to worry about. + By default, the install folder is your current working directory. + If you don't move into your build directory before calling Conan, + then you may be annoyed to see it polluting your project root directory + with these files. + To make Conan put them in your build directory, + you'll have to add the option + `--install-folder` or `-if` to every `conan install` command. + +1. Generate CMake files for every configuration you want to build. + + For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`, + you only need to run this command once. + For a multi-configuration generator, e.g. `Visual Studio`, you may want to + run it more than once. + + Each of these commands should have a different `build_type` setting. + A second command with the same `build_type` setting will just overwrite + the files generated by the first. + You can pass the build type on the command line with `--settings + build_type=$BUILD_TYPE` or in the profile itself, under the section + `[settings]`, with the key `build_type`. + + If you are using a Microsoft Visual C++ compiler, then you will need to + ensure consistency between the `build_type` setting and the + `compiler.runtime` setting. + When `build_type` is `Release`, `compiler.runtime` should be `MT`. + When `build_type` is `Debug`, `compiler.runtime` should be `MTd`. + +1. Configure CMake once. + + For all choices of generator, pass the toolchain file generated by Conan. + It will be located at + `$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`. + If you are using a single-configuration generator, then pass the CMake + variable [`CMAKE_BUILD_TYPE`][build_type] and make sure it matches the + `build_type` setting you chose in the previous step. + + This step is where you may pass build options for rippled. + +1. Build rippled. + + For a multi-configuration generator, you must pass the option `--config` + to select the build configuration. + For a single-configuration generator, it will build whatever configuration + you passed for `CMAKE_BUILD_TYPE`. + +5. Test rippled. + + The exact location of rippled in your build directory + depends on your choice of CMake generator. + You can run unit tests by passing `--unittest`. + Pass `--help` to see the rest of the command line options. + + +### Options + +The `unity` option allows you to select between [unity][5] and non-unity +builds. +Unity builds may be faster for the first build (at the cost of much +more memory) since they concatenate sources into fewer translation +units. +Non-unity builds may be faster for incremental builds, and can be helpful for +detecting `#include` omissions. + +Below are the most commonly used options, +with their default values in parentheses. + +- `assert` (OFF): Enable assertions. +- `reporting` (OFF): Build the reporting mode feature. +- `tests` (ON): Build tests. +- `unity` (ON): Configure a [unity build][5]. + + +### Troubleshooting + +If you get a linker error like the one below suggesting that you recompile +Boost with position-independent code, the reason is most likely that Conan +downloaded a bad binary distribution of the dependency. +For now, this seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled +with GCC for Linux. +The solution is to build the dependency locally by passing `--build boost` +when calling `conan install`. + +``` +/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/dc8aedd23a0f0a773a5fcdcfe1ae3e89c4205978/lib/libboost_container.a(alloc_lib.o): requires unsupported dynamic reloc 11; recompile with -fPIC +``` + + +## How to add a dependency + +If you want to experiment with a new package, here are the steps to get it +working: + +1. Search for the package on [Conan Center](https://conan.io/center/). +1. In [`conanfile.py`](./conanfile.py): + 1. Add a version of the package to the `requires` property. + 1. Change any default options for the package by adding them to the + `default_options` property (with syntax `'$package:$option': $value`) +1. In [`CMakeLists.txt`](./CMakeLists.txt): + 1. Add a call to `find_package($package REQUIRED)`. + 1. Link a library from the package to the target `ripple_libs` (search for + the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`). +1. Start coding! Don't forget to include whatever headers you need from the + package. + + +## A crash course in CMake and Conan + +To better understand how to use Conan, +we should first understand _why_ we use Conan, +and to understand that, +we need to understand how we use CMake. + + +### CMake + +Technically, you don't need CMake to build this project. +You could manually compile every translation unit into an object file, +using the right compiler options, +and then manually link all those objects together, +using the right linker options. +However, that is very tedious and error-prone, +which is why we lean on tools like CMake. + +We have written CMake configuration files +([`CMakeLists.txt`](./CMakeLists.txt) and friends) +for this project so that CMake can be used to correctly compile and link +all of the translation units in it. +Or rather, CMake will generate files for a separate build system +(e.g. Make, Ninja, Visual Studio, Xcode, etc.) +that compile and link all of the translation units. +Even then, CMake has parameters, some of which are platform-specific. +In CMake's parlance, parameters are specially-named **variables** like +[`CMAKE_BUILD_TYPE`][build_type] or +[`CMAKE_MSVC_RUNTIME_LIBRARY`][runtime]. +Parameters include: + +- what build system to generate files for +- where to find the compiler and linker +- where to find dependencies, e.g. libraries and headers +- how to link dependencies, e.g. any special compiler or linker flags that + need to be used with them, including preprocessor definitions +- how to compile translation units, e.g. with optimizations, debug symbols, + position-independent code, etc. +- on Windows, which runtime library to link with + +For some of these parameters, like the build system and compiler, +CMake goes through a complicated search process to choose default values. +For others, like the dependencies, +_we_ had written in the CMake configuration files of this project +our own complicated process to choose defaults. +For most developers, things "just worked"... until they didn't, and then +you were left trying to debug one of these complicated processes, instead of +choosing and manually passing the parameter values yourself. + +You can pass every parameter to CMake on the command line, +but writing out these parameters every time we want to configure CMake is +a pain. +Most humans prefer to put them into a configuration file, once, that +CMake can read every time it is configured. +For CMake, that file is a [toolchain file][toolchain]. + + +### Conan + +These next few paragraphs on Conan are going to read much like the ones above +for CMake. + +Technically, you don't need Conan to build this project. +You could manually download, configure, build, and install all of the +dependencies yourself, and then pass all of the parameters necessary for +CMake to link to those dependencies. +To guarantee ABI compatibility, you must be sure to use the same set of +compiler and linker options for all dependencies _and_ this project. +However, that is very tedious and error-prone, which is why we lean on tools +like Conan. + +We have written a Conan configuration file ([`conanfile.py`](./conanfile.py)) +so that Conan can be used to correctly download, configure, build, and install +all of the dependencies for this project, +using a single set of compiler and linker options for all of them. +It generates files that contain almost all of the parameters that CMake +expects. +Those files include: + +- A single toolchain file. +- For every dependency, a CMake [package configuration file][pcf], + [package version file][pvf], and for every build type, a package + targets file. + Together, these files implement version checking and define `IMPORTED` + targets for the dependencies. + +The toolchain file itself amends the search path +([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package] +will [discover][search] the generated package configuration files. + +**Nearly all we must do to properly configure CMake is pass the toolchain +file.** +What CMake parameters are left out? +You'll still need to pick a build system generator, +and if you choose a single-configuration generator, +you'll need to pass the `CMAKE_BUILD_TYPE`, +which should match the `build_type` setting you gave to Conan. + +Even then, Conan has parameters, some of which are platform-specific. +In Conan's parlance, parameters are either settings or options. +**Settings** are shared by all packages, e.g. the build type. +**Options** are specific to a given package, e.g. whether to build and link +OpenSSL as a shared library. + +For settings, Conan goes through a complicated search process to choose +defaults. +For options, each package recipe defines its own defaults. + +You can pass every parameter to Conan on the command line, +but it is more convenient to put them in a [profile][profile]. +**All we must do to properly configure Conan is edit and pass the profile.** + + +[1]: https://github.com/conan-io/conan-center-index/issues/13168 +[5]: https://en.wikipedia.org/wiki/Unity_build +[build_type]: https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html +[runtime]: https://cmake.org/cmake/help/latest/variable/CMAKE_MSVC_RUNTIME_LIBRARY.html +[toolchain]: https://cmake.org/cmake/help/latest/manual/cmake-toolchains.7.html +[pcf]: https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#package-configuration-file +[pvf]: https://cmake.org/cmake/help/latest/manual/cmake-packages.7.html#package-version-file +[find_package]: https://cmake.org/cmake/help/latest/command/find_package.html +[search]: https://cmake.org/cmake/help/latest/command/find_package.html#search-procedure +[prefix_path]: https://cmake.org/cmake/help/latest/variable/CMAKE_PREFIX_PATH.html +[profile]: https://docs.conan.io/en/latest/reference/profiles.html diff --git a/Builds/CMake/FindRocksDB.cmake b/Builds/CMake/FindRocksDB.cmake deleted file mode 100644 index e0d35706b67..00000000000 --- a/Builds/CMake/FindRocksDB.cmake +++ /dev/null @@ -1,62 +0,0 @@ -set (RocksDB_DIR "" CACHE PATH "Root directory of RocksDB distribution") - -find_path (RocksDB_INCLUDE_DIR - rocksdb/db.h - PATHS ${RocksDB_DIR}) - -set (RocksDB_VERSION "") -find_file (RocksDB_VERSION_FILE - rocksdb/version.h - PATHS ${RocksDB_DIR}) -if (RocksDB_VERSION_FILE) - file (READ ${RocksDB_VERSION_FILE} _verfile) - if ("${_verfile}" MATCHES "#define[ \\t]+ROCKSDB_MAJOR[ \\t]+([0-9]+)") - string (APPEND RocksDB_VERSION "${CMAKE_MATCH_1}") - else () - string (APPEND RocksDB_VERSION "0") - endif() - if ("${_verfile}" MATCHES "#define[ \\t]+ROCKSDB_MINOR[ \\t]+([0-9]+)") - string (APPEND RocksDB_VERSION ".${CMAKE_MATCH_1}") - else () - string (APPEND RocksDB_VERSION ".0") - endif() - if ("${_verfile}" MATCHES "#define[ \\t]+ROCKSDB_PATCH[ \\t]+([0-9]+)") - string (APPEND RocksDB_VERSION ".${CMAKE_MATCH_1}") - else () - string (APPEND RocksDB_VERSION ".0") - endif() -endif () - -if (RocksDB_USE_STATIC) - list (APPEND RocksDB_NAMES - "${CMAKE_STATIC_LIBRARY_PREFIX}rocksdb${CMAKE_STATIC_LIBRARY_SUFFIX}" - "${CMAKE_STATIC_LIBRARY_PREFIX}rocksdblib${CMAKE_STATIC_LIBRARY_SUFFIX}") -endif () - -list (APPEND RocksDB_NAMES rocksdb) - -find_library (RocksDB_LIBRARY NAMES ${RocksDB_NAMES} - PATHS - ${RocksDB_DIR} - ${RocksDB_DIR}/bin/Release - ${RocksDB_DIR}/bin64_vs2013/Release - PATH_SUFFIXES lib lib64) - -foreach (_n RocksDB_NAMES) - list (APPEND RocksDB_NAMES_DBG "${_n}_d" "${_n}d") -endforeach () -find_library (RocksDB_LIBRARY_DEBUG NAMES ${RocksDB_NAMES_DBG} - PATHS - ${RocksDB_DIR} - ${RocksDB_DIR}/bin/Debug - ${RocksDB_DIR}/bin64_vs2013/Debug - PATH_SUFFIXES lib lib64) - -include (FindPackageHandleStandardArgs) -find_package_handle_standard_args (RocksDB - REQUIRED_VARS RocksDB_LIBRARY RocksDB_INCLUDE_DIR - VERSION_VAR RocksDB_VERSION) - -mark_as_advanced (RocksDB_INCLUDE_DIR RocksDB_LIBRARY) -set (RocksDB_INCLUDE_DIRS ${RocksDB_INCLUDE_DIR}) -set (RocksDB_LIBRARIES ${RocksDB_LIBRARY}) diff --git a/Builds/CMake/README.md b/Builds/CMake/README.md deleted file mode 100644 index 77d6813b653..00000000000 --- a/Builds/CMake/README.md +++ /dev/null @@ -1,18 +0,0 @@ - -These are modules and sources that support our CMake build. - -== FindBoost.cmake == - -In order to facilitate updating to latest releases of boost, we've made a local -copy of the FindBoost cmake module in our repo. The latest official version can -generally be obtained -[here](https://github.com/Kitware/CMake/blob/master/Modules/FindBoost.cmake). - -The latest version provided by Kitware can be tailored for use with the -version of CMake that it ships with (typically the next upcoming CMake -release). As such, the latest version from the repository might not work -perfectly with older versions of CMake - for instance, the latest version -might use features or properties only available in the version of CMake that -it ships with. Given this, it's best to test any updates to this module with a few -different versions of cmake. - diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 82a57995a4c..710ebb00aab 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -135,8 +135,8 @@ target_link_libraries (xrpl_core OpenSSL::Crypto Ripple::boost Ripple::syslibs - NIH::secp256k1 - NIH::ed25519-donna + secp256k1::secp256k1 + ed25519::ed25519 date::date Ripple::opts) #[=================================[ diff --git a/Builds/CMake/RippledDocs.cmake b/Builds/CMake/RippledDocs.cmake index 6cb8f730dd8..883fd616256 100644 --- a/Builds/CMake/RippledDocs.cmake +++ b/Builds/CMake/RippledDocs.cmake @@ -1,6 +1,13 @@ #[===================================================================[ docs target (optional) #]===================================================================] + +# Early return if the `docs` directory is missing, +# e.g. when we are building a Conan package. +if(NOT EXISTS docs) + return() +endif() + if (tests) find_package (Doxygen) if (NOT TARGET Doxygen::doxygen) diff --git a/Builds/CMake/RippledInstall.cmake b/Builds/CMake/RippledInstall.cmake index 3cdeca9e6fb..eef90c14615 100644 --- a/Builds/CMake/RippledInstall.cmake +++ b/Builds/CMake/RippledInstall.cmake @@ -4,7 +4,6 @@ install ( TARGETS - ed25519-donna common opts ripple_syslibs @@ -16,17 +15,6 @@ install ( RUNTIME DESTINATION bin INCLUDES DESTINATION include) -if(${INSTALL_SECP256K1}) -install ( - TARGETS - secp256k1 - EXPORT RippleExports - LIBRARY DESTINATION lib - ARCHIVE DESTINATION lib - RUNTIME DESTINATION bin - INCLUDES DESTINATION include) -endif() - install (EXPORT RippleExports FILE RippleTargets.cmake NAMESPACE Ripple:: diff --git a/Builds/CMake/RippledInterface.cmake b/Builds/CMake/RippledInterface.cmake index 28a531246fe..dfb57a52f46 100644 --- a/Builds/CMake/RippledInterface.cmake +++ b/Builds/CMake/RippledInterface.cmake @@ -35,17 +35,10 @@ target_link_libraries (opts $<$:-pg> $<$,$>:-p>) -if (jemalloc) - if (static) - set(JEMALLOC_USE_STATIC ON CACHE BOOL "" FORCE) - endif () - find_package (jemalloc REQUIRED) - target_compile_definitions (opts INTERFACE PROFILE_JEMALLOC) - target_include_directories (opts SYSTEM INTERFACE ${JEMALLOC_INCLUDE_DIRS}) - target_link_libraries (opts INTERFACE ${JEMALLOC_LIBRARIES}) - get_filename_component (JEMALLOC_LIB_PATH ${JEMALLOC_LIBRARIES} DIRECTORY) - ## TODO see if we can use the BUILD_RPATH target property (is it transitive?) - set (CMAKE_BUILD_RPATH ${CMAKE_BUILD_RPATH} ${JEMALLOC_LIB_PATH}) +if(jemalloc) + find_package(jemalloc REQUIRED) + target_compile_definitions(opts INTERFACE PROFILE_JEMALLOC) + target_link_libraries(opts INTERFACE jemalloc::jemalloc) endif () if (san) diff --git a/Builds/CMake/RippledMultiConfig.cmake b/Builds/CMake/RippledMultiConfig.cmake index 3bc500b53bc..ae9b182a3fc 100644 --- a/Builds/CMake/RippledMultiConfig.cmake +++ b/Builds/CMake/RippledMultiConfig.cmake @@ -14,7 +14,7 @@ if (is_multiconfig) file(GLOB md_files RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} CONFIGURE_DEPENDS *.md) LIST(APPEND all_sources ${md_files}) - foreach (_target secp256k1 ed25519-donna pbufs xrpl_core rippled) + foreach (_target secp256k1::secp256k1 ed25519::ed25519 pbufs xrpl_core rippled) get_target_property (_type ${_target} TYPE) if(_type STREQUAL "INTERFACE_LIBRARY") continue() diff --git a/Builds/CMake/RippledNIH.cmake b/Builds/CMake/RippledNIH.cmake deleted file mode 100644 index 60ab3e4bf85..00000000000 --- a/Builds/CMake/RippledNIH.cmake +++ /dev/null @@ -1,33 +0,0 @@ -#[===================================================================[ - NIH prefix path..this is where we will download - and build any ExternalProjects, and they will hopefully - survive across build directory deletion (manual cleans) -#]===================================================================] - -string (REGEX REPLACE "[ \\/%]+" "_" gen_for_path ${CMAKE_GENERATOR}) -string (TOLOWER ${gen_for_path} gen_for_path) -# HACK: trying to shorten paths for windows CI (which hits 260 MAXPATH easily) -# @see: https://issues.jenkins-ci.org/browse/JENKINS-38706?focusedCommentId=339847 -string (REPLACE "visual_studio" "vs" gen_for_path ${gen_for_path}) -if (NOT DEFINED NIH_CACHE_ROOT) - if (DEFINED ENV{NIH_CACHE_ROOT}) - set (NIH_CACHE_ROOT $ENV{NIH_CACHE_ROOT}) - else () - set (NIH_CACHE_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/.nih_c") - endif () -endif () -set (nih_cache_path - "${NIH_CACHE_ROOT}/${gen_for_path}/${CMAKE_CXX_COMPILER_ID}_${CMAKE_CXX_COMPILER_VERSION}") -if (NOT is_multiconfig) - set (nih_cache_path "${nih_cache_path}/${CMAKE_BUILD_TYPE}") -endif () -file(TO_CMAKE_PATH "${nih_cache_path}" nih_cache_path) -message (STATUS "NIH-EP cache path: ${nih_cache_path}") -## two convenience variables: -set (ep_lib_prefix ${CMAKE_STATIC_LIBRARY_PREFIX}) -set (ep_lib_suffix ${CMAKE_STATIC_LIBRARY_SUFFIX}) - -# this is a setting for FetchContent and needs to be -# a cache variable -# https://cmake.org/cmake/help/latest/module/FetchContent.html#populating-the-content -set (FETCHCONTENT_BASE_DIR ${nih_cache_path} CACHE STRING "" FORCE) diff --git a/Builds/CMake/RippledRelease.cmake b/Builds/CMake/RippledRelease.cmake index 3be93658255..2b1cf3666fc 100644 --- a/Builds/CMake/RippledRelease.cmake +++ b/Builds/CMake/RippledRelease.cmake @@ -2,6 +2,12 @@ package/container targets - (optional) #]===================================================================] +# Early return if the `containers` directory is missing, +# e.g. when we are building a Conan package. +if(NOT EXISTS containers) + return() +endif() + if (is_root_project) if (NOT DOCKER) find_program (DOCKER docker) @@ -16,7 +22,6 @@ if (is_root_project) message (STATUS "using [${container_label}] as build container tag...") file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages) - file (MAKE_DIRECTORY ${NIH_CACHE_ROOT}/pkgbuild) if (is_linux) execute_process (COMMAND id -u OUTPUT_VARIABLE DOCKER_USER_ID @@ -62,8 +67,6 @@ if (is_root_project) exclude_from_default (rpm_container) add_custom_target (rpm docker run - -e NIH_CACHE_ROOT=/opt/rippled_bld/pkg/.nih_c - -v ${NIH_CACHE_ROOT}/pkgbuild:/opt/rippled_bld/pkg/.nih_c -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" @@ -137,8 +140,6 @@ if (is_root_project) exclude_from_default (dpkg_container) add_custom_target (dpkg docker run - -e NIH_CACHE_ROOT=/opt/rippled_bld/pkg/.nih_c - -v ${NIH_CACHE_ROOT}/pkgbuild:/opt/rippled_bld/pkg/.nih_c -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" diff --git a/Builds/CMake/RippledSanity.cmake b/Builds/CMake/RippledSanity.cmake index 9e7fd113afd..1d217196e75 100644 --- a/Builds/CMake/RippledSanity.cmake +++ b/Builds/CMake/RippledSanity.cmake @@ -10,12 +10,7 @@ if (NOT ep_procs) message (STATUS "Using ${ep_procs} cores for ExternalProject builds.") endif () endif () -get_property (is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) -if (is_multiconfig STREQUAL "NOTFOUND") - if (${CMAKE_GENERATOR} STREQUAL "Xcode" OR ${CMAKE_GENERATOR} MATCHES "^Visual Studio") - set (is_multiconfig TRUE) - endif () -endif () +get_property(is_multiconfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) set (CMAKE_CONFIGURATION_TYPES "Debug;Release" CACHE STRING "" FORCE) if (NOT is_multiconfig) @@ -49,9 +44,6 @@ elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") message (FATAL_ERROR "This project requires GCC 8 or later") endif () endif () -if (CMAKE_GENERATOR STREQUAL "Xcode") - set (is_xcode TRUE) -endif () if (CMAKE_SYSTEM_NAME STREQUAL "Linux") set (is_linux TRUE) diff --git a/Builds/CMake/deps/Boost.cmake b/Builds/CMake/deps/Boost.cmake index ba3086c5c8f..23ea5e549cc 100644 --- a/Builds/CMake/deps/Boost.cmake +++ b/Builds/CMake/deps/Boost.cmake @@ -1,49 +1,3 @@ -#[===================================================================[ - NIH dep: boost -#]===================================================================] - -if((NOT DEFINED BOOST_ROOT) AND(DEFINED ENV{BOOST_ROOT})) - set(BOOST_ROOT $ENV{BOOST_ROOT}) -endif() -file(TO_CMAKE_PATH "${BOOST_ROOT}" BOOST_ROOT) -if(WIN32 OR CYGWIN) - # Workaround for MSVC having two boost versions - x86 and x64 on same PC in stage folders - if(DEFINED BOOST_ROOT) - if(IS_DIRECTORY ${BOOST_ROOT}/stage64/lib) - set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage64/lib) - elseif(IS_DIRECTORY ${BOOST_ROOT}/stage/lib) - set(BOOST_LIBRARYDIR ${BOOST_ROOT}/stage/lib) - elseif(IS_DIRECTORY ${BOOST_ROOT}/lib) - set(BOOST_LIBRARYDIR ${BOOST_ROOT}/lib) - else() - message(WARNING "Did not find expected boost library dir. " - "Defaulting to ${BOOST_ROOT}") - set(BOOST_LIBRARYDIR ${BOOST_ROOT}) - endif() - endif() -endif() -message(STATUS "BOOST_ROOT: ${BOOST_ROOT}") -message(STATUS "BOOST_LIBRARYDIR: ${BOOST_LIBRARYDIR}") - -# uncomment the following as needed to debug FindBoost issues: -#set(Boost_DEBUG ON) - -#[=========================================================[ - boost dynamic libraries don't trivially support @rpath - linking right now (cmake's default), so just force - static linking for macos, or if requested on linux by flag -#]=========================================================] -if(static) - set(Boost_USE_STATIC_LIBS ON) -endif() -set(Boost_USE_MULTITHREADED ON) -if(static AND NOT APPLE) - set(Boost_USE_STATIC_RUNTIME ON) -else() - set(Boost_USE_STATIC_RUNTIME OFF) -endif() -# TBD: -# Boost_USE_DEBUG_RUNTIME: When ON, uses Boost libraries linked against the find_package(Boost 1.70 REQUIRED COMPONENTS chrono @@ -55,11 +9,12 @@ find_package(Boost 1.70 REQUIRED program_options regex system - thread) + thread +) add_library(ripple_boost INTERFACE) add_library(Ripple::boost ALIAS ripple_boost) -if(is_xcode) +if(XCODE) target_include_directories(ripple_boost BEFORE INTERFACE ${Boost_INCLUDE_DIRS}) target_compile_options(ripple_boost INTERFACE --system-header-prefix="boost/") else() diff --git a/Builds/CMake/deps/Ed25519-donna.cmake b/Builds/CMake/deps/Ed25519-donna.cmake deleted file mode 100644 index 7f352423fdc..00000000000 --- a/Builds/CMake/deps/Ed25519-donna.cmake +++ /dev/null @@ -1,28 +0,0 @@ -#[===================================================================[ - NIH dep: ed25519-donna -#]===================================================================] - -add_library (ed25519-donna STATIC - src/ed25519-donna/ed25519.c) -target_include_directories (ed25519-donna - PUBLIC - $ - $ - PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR}/src/ed25519-donna) -#[=========================================================[ - NOTE for macos: - https://github.com/floodyberry/ed25519-donna/issues/29 - our source for ed25519-donna-portable.h has been - patched to workaround this. -#]=========================================================] -target_link_libraries (ed25519-donna PUBLIC OpenSSL::SSL) -add_library (NIH::ed25519-donna ALIAS ed25519-donna) -target_link_libraries (ripple_libs INTERFACE NIH::ed25519-donna) -#[===========================[ - headers installation -#]===========================] -install ( - FILES - src/ed25519-donna/ed25519.h - DESTINATION include/ed25519-donna) diff --git a/Builds/CMake/deps/FindBoost.cmake b/Builds/CMake/deps/FindBoost.cmake deleted file mode 100644 index 121e7264145..00000000000 --- a/Builds/CMake/deps/FindBoost.cmake +++ /dev/null @@ -1,2170 +0,0 @@ -# Distributed under the OSI-approved BSD 3-Clause License. See accompanying -# file Copyright.txt or https://cmake.org/licensing for details. - -#[=======================================================================[.rst: -FindBoost ---------- - -Find Boost include dirs and libraries - -Use this module by invoking find_package with the form:: - - find_package(Boost - [version] [EXACT] # Minimum or EXACT version e.g. 1.67.0 - [REQUIRED] # Fail with error if Boost is not found - [COMPONENTS ...] # Boost libraries by their canonical name - # e.g. "date_time" for "libboost_date_time" - [OPTIONAL_COMPONENTS ...] - # Optional Boost libraries by their canonical name) - ) # e.g. "date_time" for "libboost_date_time" - -This module finds headers and requested component libraries OR a CMake -package configuration file provided by a "Boost CMake" build. For the -latter case skip to the "Boost CMake" section below. For the former -case results are reported in variables:: - - Boost_FOUND - True if headers and requested libraries were found - Boost_INCLUDE_DIRS - Boost include directories - Boost_LIBRARY_DIRS - Link directories for Boost libraries - Boost_LIBRARIES - Boost component libraries to be linked - Boost__FOUND - True if component was found ( is upper-case) - Boost__LIBRARY - Libraries to link for component (may include - target_link_libraries debug/optimized keywords) - Boost_VERSION_MACRO - BOOST_VERSION value from boost/version.hpp - Boost_VERSION_STRING - Boost version number in x.y.z format - Boost_VERSION - if CMP0093 NEW => same as Boost_VERSION_STRING - if CMP0093 OLD or unset => same as Boost_VERSION_MACRO - Boost_LIB_VERSION - Version string appended to library filenames - Boost_VERSION_MAJOR - Boost major version number (X in X.y.z) - alias: Boost_MAJOR_VERSION - Boost_VERSION_MINOR - Boost minor version number (Y in x.Y.z) - alias: Boost_MINOR_VERSION - Boost_VERSION_PATCH - Boost subminor version number (Z in x.y.Z) - alias: Boost_SUBMINOR_VERSION - Boost_VERSION_COUNT - Amount of version components (3) - Boost_LIB_DIAGNOSTIC_DEFINITIONS (Windows) - - Pass to add_definitions() to have diagnostic - information about Boost's automatic linking - displayed during compilation - -Note that Boost Python components require a Python version suffix -(Boost 1.67 and later), e.g. ``python36`` or ``python27`` for the -versions built against Python 3.6 and 2.7, respectively. This also -applies to additional components using Python including -``mpi_python`` and ``numpy``. Earlier Boost releases may use -distribution-specific suffixes such as ``2``, ``3`` or ``2.7``. -These may also be used as suffixes, but note that they are not -portable. - -This module reads hints about search locations from variables:: - - BOOST_ROOT - Preferred installation prefix - (or BOOSTROOT) - BOOST_INCLUDEDIR - Preferred include directory e.g. /include - BOOST_LIBRARYDIR - Preferred library directory e.g. /lib - Boost_NO_SYSTEM_PATHS - Set to ON to disable searching in locations not - specified by these hint variables. Default is OFF. - Boost_ADDITIONAL_VERSIONS - - List of Boost versions not known to this module - (Boost install locations may contain the version) - -and saves search results persistently in CMake cache entries:: - - Boost_INCLUDE_DIR - Directory containing Boost headers - Boost_LIBRARY_DIR_RELEASE - Directory containing release Boost libraries - Boost_LIBRARY_DIR_DEBUG - Directory containing debug Boost libraries - Boost__LIBRARY_DEBUG - Component library debug variant - Boost__LIBRARY_RELEASE - Component library release variant - -The following :prop_tgt:`IMPORTED` targets are also defined:: - - Boost::headers - Target for header-only dependencies - (Boost include directory) - alias: Boost::boost - Boost:: - Target for specific component dependency - (shared or static library); is lower- - case - Boost::diagnostic_definitions - interface target to enable diagnostic - information about Boost's automatic linking - during compilation (adds BOOST_LIB_DIAGNOSTIC) - Boost::disable_autolinking - interface target to disable automatic - linking with MSVC (adds BOOST_ALL_NO_LIB) - Boost::dynamic_linking - interface target to enable dynamic linking - linking with MSVC (adds BOOST_ALL_DYN_LINK) - -Implicit dependencies such as ``Boost::filesystem`` requiring -``Boost::system`` will be automatically detected and satisfied, even -if system is not specified when using :command:`find_package` and if -``Boost::system`` is not added to :command:`target_link_libraries`. If using -``Boost::thread``, then ``Threads::Threads`` will also be added automatically. - -It is important to note that the imported targets behave differently -than variables created by this module: multiple calls to -:command:`find_package(Boost)` in the same directory or sub-directories with -different options (e.g. static or shared) will not override the -values of the targets created by the first call. - -Users may set these hints or results as ``CACHE`` entries. Projects -should not read these entries directly but instead use the above -result variables. Note that some hint names start in upper-case -"BOOST". One may specify these as environment variables if they are -not specified as CMake variables or cache entries. - -This module first searches for the ``Boost`` header files using the above -hint variables (excluding ``BOOST_LIBRARYDIR``) and saves the result in -``Boost_INCLUDE_DIR``. Then it searches for requested component libraries -using the above hints (excluding ``BOOST_INCLUDEDIR`` and -``Boost_ADDITIONAL_VERSIONS``), "lib" directories near ``Boost_INCLUDE_DIR``, -and the library name configuration settings below. It saves the -library directories in ``Boost_LIBRARY_DIR_DEBUG`` and -``Boost_LIBRARY_DIR_RELEASE`` and individual library -locations in ``Boost__LIBRARY_DEBUG`` and ``Boost__LIBRARY_RELEASE``. -When one changes settings used by previous searches in the same build -tree (excluding environment variables) this module discards previous -search results affected by the changes and searches again. - -Boost libraries come in many variants encoded in their file name. -Users or projects may tell this module which variant to find by -setting variables:: - - Boost_USE_DEBUG_LIBS - Set to ON or OFF to specify whether to search - and use the debug libraries. Default is ON. - Boost_USE_RELEASE_LIBS - Set to ON or OFF to specify whether to search - and use the release libraries. Default is ON. - Boost_USE_MULTITHREADED - Set to OFF to use the non-multithreaded - libraries ('mt' tag). Default is ON. - Boost_USE_STATIC_LIBS - Set to ON to force the use of the static - libraries. Default is OFF. - Boost_USE_STATIC_RUNTIME - Set to ON or OFF to specify whether to use - libraries linked statically to the C++ runtime - ('s' tag). Default is platform dependent. - Boost_USE_DEBUG_RUNTIME - Set to ON or OFF to specify whether to use - libraries linked to the MS debug C++ runtime - ('g' tag). Default is ON. - Boost_USE_DEBUG_PYTHON - Set to ON to use libraries compiled with a - debug Python build ('y' tag). Default is OFF. - Boost_USE_STLPORT - Set to ON to use libraries compiled with - STLPort ('p' tag). Default is OFF. - Boost_USE_STLPORT_DEPRECATED_NATIVE_IOSTREAMS - - Set to ON to use libraries compiled with - STLPort deprecated "native iostreams" - ('n' tag). Default is OFF. - Boost_COMPILER - Set to the compiler-specific library suffix - (e.g. "-gcc43"). Default is auto-computed - for the C++ compiler in use. A list may be - used if multiple compatible suffixes should - be tested for, in decreasing order of - preference. - Boost_ARCHITECTURE - Set to the architecture-specific library suffix - (e.g. "-x64"). Default is auto-computed for the - C++ compiler in use. - Boost_THREADAPI - Suffix for "thread" component library name, - such as "pthread" or "win32". Names with - and without this suffix will both be tried. - Boost_NAMESPACE - Alternate namespace used to build boost with - e.g. if set to "myboost", will search for - myboost_thread instead of boost_thread. - -Other variables one may set to control this module are:: - - Boost_DEBUG - Set to ON to enable debug output from FindBoost. - Please enable this before filing any bug report. - Boost_REALPATH - Set to ON to resolve symlinks for discovered - libraries to assist with packaging. For example, - the "system" component library may be resolved to - "/usr/lib/libboost_system.so.1.67.0" instead of - "/usr/lib/libboost_system.so". This does not - affect linking and should not be enabled unless - the user needs this information. - Boost_LIBRARY_DIR - Default value for Boost_LIBRARY_DIR_RELEASE and - Boost_LIBRARY_DIR_DEBUG. - -On Visual Studio and Borland compilers Boost headers request automatic -linking to corresponding libraries. This requires matching libraries -to be linked explicitly or available in the link library search path. -In this case setting ``Boost_USE_STATIC_LIBS`` to ``OFF`` may not achieve -dynamic linking. Boost automatic linking typically requests static -libraries with a few exceptions (such as ``Boost.Python``). Use:: - - add_definitions(${Boost_LIB_DIAGNOSTIC_DEFINITIONS}) - -to ask Boost to report information about automatic linking requests. - -Example to find Boost headers only:: - - find_package(Boost 1.36.0) - if(Boost_FOUND) - include_directories(${Boost_INCLUDE_DIRS}) - add_executable(foo foo.cc) - endif() - -Example to find Boost libraries and use imported targets:: - - find_package(Boost 1.56 REQUIRED COMPONENTS - date_time filesystem iostreams) - add_executable(foo foo.cc) - target_link_libraries(foo Boost::date_time Boost::filesystem - Boost::iostreams) - -Example to find Boost Python 3.6 libraries and use imported targets:: - - find_package(Boost 1.67 REQUIRED COMPONENTS - python36 numpy36) - add_executable(foo foo.cc) - target_link_libraries(foo Boost::python36 Boost::numpy36) - -Example to find Boost headers and some *static* (release only) libraries:: - - set(Boost_USE_STATIC_LIBS ON) # only find static libs - set(Boost_USE_DEBUG_LIBS OFF) # ignore debug libs and - set(Boost_USE_RELEASE_LIBS ON) # only find release libs - set(Boost_USE_MULTITHREADED ON) - set(Boost_USE_STATIC_RUNTIME OFF) - find_package(Boost 1.66.0 COMPONENTS date_time filesystem system ...) - if(Boost_FOUND) - include_directories(${Boost_INCLUDE_DIRS}) - add_executable(foo foo.cc) - target_link_libraries(foo ${Boost_LIBRARIES}) - endif() - -Boost CMake -^^^^^^^^^^^ - -If Boost was built using the boost-cmake project or from Boost 1.70.0 on -it provides a package configuration file for use with find_package's config mode. -This module looks for the package configuration file called -``BoostConfig.cmake`` or ``boost-config.cmake`` and stores the result in -``CACHE`` entry "Boost_DIR". If found, the package configuration file is loaded -and this module returns with no further action. See documentation of -the Boost CMake package configuration for details on what it provides. - -Set ``Boost_NO_BOOST_CMAKE`` to ``ON``, to disable the search for boost-cmake. -#]=======================================================================] - -# The FPHSA helper provides standard way of reporting final search results to -# the user including the version and component checks. -include(FindPackageHandleStandardArgs) - -# Save project's policies -cmake_policy(PUSH) -cmake_policy(SET CMP0057 NEW) # if IN_LIST - -#------------------------------------------------------------------------------- -# Before we go searching, check whether a boost cmake package is available, unless -# the user specifically asked NOT to search for one. -# -# If Boost_DIR is set, this behaves as any find_package call would. If not, -# it looks at BOOST_ROOT and BOOSTROOT to find Boost. -# -if (NOT Boost_NO_BOOST_CMAKE) - # If Boost_DIR is not set, look for BOOSTROOT and BOOST_ROOT as alternatives, - # since these are more conventional for Boost. - if ("$ENV{Boost_DIR}" STREQUAL "") - if (NOT "$ENV{BOOST_ROOT}" STREQUAL "") - set(ENV{Boost_DIR} $ENV{BOOST_ROOT}) - elseif (NOT "$ENV{BOOSTROOT}" STREQUAL "") - set(ENV{Boost_DIR} $ENV{BOOSTROOT}) - endif() - endif() - - # Do the same find_package call but look specifically for the CMake version. - # Note that args are passed in the Boost_FIND_xxxxx variables, so there is no - # need to delegate them to this find_package call. - find_package(Boost QUIET NO_MODULE) - mark_as_advanced(Boost_DIR) - - # If we found a boost cmake package, then we're done. Print out what we found. - # Otherwise let the rest of the module try to find it. - if(Boost_FOUND) - # Neither the legacy boost-cmake nor the new builtin BoostConfig (as in 1.70) - # report the found components in the standard variables, so we need to convert - # them here - if(Boost_FIND_COMPONENTS) - foreach(_comp IN LISTS Boost_FIND_COMPONENTS) - string(TOUPPER ${_comp} _uppercomp) - if(DEFINED Boost${_comp}_FOUND) - set(Boost_${_comp}_FOUND ${Boost${_comp}_FOUND}) - elseif(DEFINED Boost_${_uppercomp}_FOUND) - set(Boost_${_comp}_FOUND ${Boost_${_uppercomp}_FOUND}) - endif() - endforeach() - endif() - - find_package_handle_standard_args(Boost HANDLE_COMPONENTS CONFIG_MODE) - - # Restore project's policies - cmake_policy(POP) - return() - endif() -endif() - - -#------------------------------------------------------------------------------- -# FindBoost functions & macros -# - -# -# Print debug text if Boost_DEBUG is set. -# Call example: -# _Boost_DEBUG_PRINT("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "debug message") -# -function(_Boost_DEBUG_PRINT file line text) - if(Boost_DEBUG) - message(STATUS "[ ${file}:${line} ] ${text}") - endif() -endfunction() - -# -# _Boost_DEBUG_PRINT_VAR(file line variable_name [ENVIRONMENT] -# [SOURCE "short explanation of origin of var value"]) -# -# ENVIRONMENT - look up environment variable instead of CMake variable -# -# Print variable name and its value if Boost_DEBUG is set. -# Call example: -# _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" BOOST_ROOT) -# -function(_Boost_DEBUG_PRINT_VAR file line name) - if(Boost_DEBUG) - cmake_parse_arguments(_args "ENVIRONMENT" "SOURCE" "" ${ARGN}) - - unset(source) - if(_args_SOURCE) - set(source " (${_args_SOURCE})") - endif() - - if(_args_ENVIRONMENT) - if(DEFINED ENV{${name}}) - set(value "\"$ENV{${name}}\"") - else() - set(value "") - endif() - set(_name "ENV{${name}}") - else() - if(DEFINED "${name}") - set(value "\"${${name}}\"") - else() - set(value "") - endif() - set(_name "${name}") - endif() - - _Boost_DEBUG_PRINT("${file}" "${line}" "${_name} = ${value}${source}") - endif() -endfunction() - -############################################ -# -# Check the existence of the libraries. -# -############################################ -# This macro was taken directly from the FindQt4.cmake file that is included -# with the CMake distribution. This is NOT my work. All work was done by the -# original authors of the FindQt4.cmake file. Only minor modifications were -# made to remove references to Qt and make this file more generally applicable -# And ELSE/ENDIF pairs were removed for readability. -######################################################################### - -macro(_Boost_ADJUST_LIB_VARS basename) - if(Boost_INCLUDE_DIR ) - if(Boost_${basename}_LIBRARY_DEBUG AND Boost_${basename}_LIBRARY_RELEASE) - # if the generator is multi-config or if CMAKE_BUILD_TYPE is set for - # single-config generators, set optimized and debug libraries - get_property(_isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) - if(_isMultiConfig OR CMAKE_BUILD_TYPE) - set(Boost_${basename}_LIBRARY optimized ${Boost_${basename}_LIBRARY_RELEASE} debug ${Boost_${basename}_LIBRARY_DEBUG}) - else() - # For single-config generators where CMAKE_BUILD_TYPE has no value, - # just use the release libraries - set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_RELEASE} ) - endif() - # FIXME: This probably should be set for both cases - set(Boost_${basename}_LIBRARIES optimized ${Boost_${basename}_LIBRARY_RELEASE} debug ${Boost_${basename}_LIBRARY_DEBUG}) - endif() - - # if only the release version was found, set the debug variable also to the release version - if(Boost_${basename}_LIBRARY_RELEASE AND NOT Boost_${basename}_LIBRARY_DEBUG) - set(Boost_${basename}_LIBRARY_DEBUG ${Boost_${basename}_LIBRARY_RELEASE}) - set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_RELEASE}) - set(Boost_${basename}_LIBRARIES ${Boost_${basename}_LIBRARY_RELEASE}) - endif() - - # if only the debug version was found, set the release variable also to the debug version - if(Boost_${basename}_LIBRARY_DEBUG AND NOT Boost_${basename}_LIBRARY_RELEASE) - set(Boost_${basename}_LIBRARY_RELEASE ${Boost_${basename}_LIBRARY_DEBUG}) - set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_DEBUG}) - set(Boost_${basename}_LIBRARIES ${Boost_${basename}_LIBRARY_DEBUG}) - endif() - - # If the debug & release library ends up being the same, omit the keywords - if("${Boost_${basename}_LIBRARY_RELEASE}" STREQUAL "${Boost_${basename}_LIBRARY_DEBUG}") - set(Boost_${basename}_LIBRARY ${Boost_${basename}_LIBRARY_RELEASE} ) - set(Boost_${basename}_LIBRARIES ${Boost_${basename}_LIBRARY_RELEASE} ) - endif() - - if(Boost_${basename}_LIBRARY AND Boost_${basename}_HEADER) - set(Boost_${basename}_FOUND ON) - if("x${basename}" STREQUAL "xTHREAD" AND NOT TARGET Threads::Threads) - string(APPEND Boost_ERROR_REASON_THREAD " (missing dependency: Threads)") - set(Boost_THREAD_FOUND OFF) - endif() - endif() - - endif() - # Make variables changeable to the advanced user - mark_as_advanced( - Boost_${basename}_LIBRARY_RELEASE - Boost_${basename}_LIBRARY_DEBUG - ) -endmacro() - -# Detect changes in used variables. -# Compares the current variable value with the last one. -# In short form: -# v != v_LAST -> CHANGED = 1 -# v is defined, v_LAST not -> CHANGED = 1 -# v is not defined, but v_LAST is -> CHANGED = 1 -# otherwise -> CHANGED = 0 -# CHANGED is returned in variable named ${changed_var} -macro(_Boost_CHANGE_DETECT changed_var) - set(${changed_var} 0) - foreach(v ${ARGN}) - if(DEFINED _Boost_COMPONENTS_SEARCHED) - if(${v}) - if(_${v}_LAST) - string(COMPARE NOTEQUAL "${${v}}" "${_${v}_LAST}" _${v}_CHANGED) - else() - set(_${v}_CHANGED 1) - endif() - elseif(_${v}_LAST) - set(_${v}_CHANGED 1) - endif() - if(_${v}_CHANGED) - set(${changed_var} 1) - endif() - else() - set(_${v}_CHANGED 0) - endif() - endforeach() -endmacro() - -# -# Find the given library (var). -# Use 'build_type' to support different lib paths for RELEASE or DEBUG builds -# -macro(_Boost_FIND_LIBRARY var build_type) - - find_library(${var} ${ARGN}) - - if(${var}) - # If this is the first library found then save Boost_LIBRARY_DIR_[RELEASE,DEBUG]. - if(NOT Boost_LIBRARY_DIR_${build_type}) - get_filename_component(_dir "${${var}}" PATH) - set(Boost_LIBRARY_DIR_${build_type} "${_dir}" CACHE PATH "Boost library directory ${build_type}" FORCE) - endif() - elseif(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT) - # Try component-specific hints but do not save Boost_LIBRARY_DIR_[RELEASE,DEBUG]. - find_library(${var} HINTS ${_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT} ${ARGN}) - endif() - - # If Boost_LIBRARY_DIR_[RELEASE,DEBUG] is known then search only there. - if(Boost_LIBRARY_DIR_${build_type}) - set(_boost_LIBRARY_SEARCH_DIRS_${build_type} ${Boost_LIBRARY_DIR_${build_type}} NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "Boost_LIBRARY_DIR_${build_type}") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "_boost_LIBRARY_SEARCH_DIRS_${build_type}") - endif() -endmacro() - -#------------------------------------------------------------------------------- - -# Convert CMAKE_CXX_COMPILER_VERSION to boost compiler suffix version. -function(_Boost_COMPILER_DUMPVERSION _OUTPUT_VERSION _OUTPUT_VERSION_MAJOR _OUTPUT_VERSION_MINOR) - string(REGEX REPLACE "([0-9]+)\\.([0-9]+)(\\.[0-9]+)?" "\\1" - _boost_COMPILER_VERSION_MAJOR "${CMAKE_CXX_COMPILER_VERSION}") - string(REGEX REPLACE "([0-9]+)\\.([0-9]+)(\\.[0-9]+)?" "\\2" - _boost_COMPILER_VERSION_MINOR "${CMAKE_CXX_COMPILER_VERSION}") - - set(_boost_COMPILER_VERSION "${_boost_COMPILER_VERSION_MAJOR}${_boost_COMPILER_VERSION_MINOR}") - - set(${_OUTPUT_VERSION} ${_boost_COMPILER_VERSION} PARENT_SCOPE) - set(${_OUTPUT_VERSION_MAJOR} ${_boost_COMPILER_VERSION_MAJOR} PARENT_SCOPE) - set(${_OUTPUT_VERSION_MINOR} ${_boost_COMPILER_VERSION_MINOR} PARENT_SCOPE) -endfunction() - -# -# Take a list of libraries with "thread" in it -# and prepend duplicates with "thread_${Boost_THREADAPI}" -# at the front of the list -# -function(_Boost_PREPEND_LIST_WITH_THREADAPI _output) - set(_orig_libnames ${ARGN}) - string(REPLACE "thread" "thread_${Boost_THREADAPI}" _threadapi_libnames "${_orig_libnames}") - set(${_output} ${_threadapi_libnames} ${_orig_libnames} PARENT_SCOPE) -endfunction() - -# -# If a library is found, replace its cache entry with its REALPATH -# -function(_Boost_SWAP_WITH_REALPATH _library _docstring) - if(${_library}) - get_filename_component(_boost_filepathreal ${${_library}} REALPATH) - unset(${_library} CACHE) - set(${_library} ${_boost_filepathreal} CACHE FILEPATH "${_docstring}") - endif() -endfunction() - -function(_Boost_CHECK_SPELLING _var) - if(${_var}) - string(TOUPPER ${_var} _var_UC) - message(FATAL_ERROR "ERROR: ${_var} is not the correct spelling. The proper spelling is ${_var_UC}.") - endif() -endfunction() - -# Guesses Boost's compiler prefix used in built library names -# Returns the guess by setting the variable pointed to by _ret -function(_Boost_GUESS_COMPILER_PREFIX _ret) - if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xIntel") - if(WIN32) - set (_boost_COMPILER "-iw") - else() - set (_boost_COMPILER "-il") - endif() - elseif (GHSMULTI) - set(_boost_COMPILER "-ghs") - elseif("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC" OR "x${CMAKE_CXX_SIMULATE_ID}" STREQUAL "xMSVC") - if(MSVC_TOOLSET_VERSION GREATER_EQUAL 150) - # Not yet known. - set(_boost_COMPILER "") - elseif(MSVC_TOOLSET_VERSION GREATER_EQUAL 140) - # MSVC toolset 14.x versions are forward compatible. - set(_boost_COMPILER "") - foreach(v 9 8 7 6 5 4 3 2 1 0) - if(MSVC_TOOLSET_VERSION GREATER_EQUAL 14${v}) - list(APPEND _boost_COMPILER "-vc14${v}") - endif() - endforeach() - elseif(MSVC_TOOLSET_VERSION GREATER_EQUAL 80) - set(_boost_COMPILER "-vc${MSVC_TOOLSET_VERSION}") - elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13.10) - set(_boost_COMPILER "-vc71") - elseif(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13) # Good luck! - set(_boost_COMPILER "-vc7") # yes, this is correct - else() # VS 6.0 Good luck! - set(_boost_COMPILER "-vc6") # yes, this is correct - endif() - - if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xClang") - string(REPLACE "." ";" VERSION_LIST "${CMAKE_CXX_COMPILER_VERSION}") - list(GET VERSION_LIST 0 CLANG_VERSION_MAJOR) - set(_boost_COMPILER "-clangw${CLANG_VERSION_MAJOR};${_boost_COMPILER}") - endif() - elseif (BORLAND) - set(_boost_COMPILER "-bcb") - elseif(CMAKE_CXX_COMPILER_ID STREQUAL "SunPro") - set(_boost_COMPILER "-sw") - elseif(CMAKE_CXX_COMPILER_ID STREQUAL "XL") - set(_boost_COMPILER "-xlc") - elseif (MINGW) - if(Boost_VERSION_STRING VERSION_LESS 1.34) - set(_boost_COMPILER "-mgw") # no GCC version encoding prior to 1.34 - else() - _Boost_COMPILER_DUMPVERSION(_boost_COMPILER_VERSION _boost_COMPILER_VERSION_MAJOR _boost_COMPILER_VERSION_MINOR) - set(_boost_COMPILER "-mgw${_boost_COMPILER_VERSION}") - endif() - elseif (UNIX) - _Boost_COMPILER_DUMPVERSION(_boost_COMPILER_VERSION _boost_COMPILER_VERSION_MAJOR _boost_COMPILER_VERSION_MINOR) - if(NOT Boost_VERSION_STRING VERSION_LESS 1.69.0) - # From GCC 5 and clang 4, versioning changes and minor becomes patch. - # For those compilers, patch is exclude from compiler tag in Boost 1.69+ library naming. - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND _boost_COMPILER_VERSION_MAJOR VERSION_GREATER 4) - set(_boost_COMPILER_VERSION "${_boost_COMPILER_VERSION_MAJOR}") - elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND _boost_COMPILER_VERSION_MAJOR VERSION_GREATER 3) - set(_boost_COMPILER_VERSION "${_boost_COMPILER_VERSION_MAJOR}") - endif() - endif() - - if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if(Boost_VERSION_STRING VERSION_LESS 1.34) - set(_boost_COMPILER "-gcc") # no GCC version encoding prior to 1.34 - else() - # Determine which version of GCC we have. - if(APPLE) - if(Boost_VERSION_STRING VERSION_LESS 1.36.0) - # In Boost <= 1.35.0, there is no mangled compiler name for - # the macOS/Darwin version of GCC. - set(_boost_COMPILER "") - else() - # In Boost 1.36.0 and newer, the mangled compiler name used - # on macOS/Darwin is "xgcc". - set(_boost_COMPILER "-xgcc${_boost_COMPILER_VERSION}") - endif() - else() - set(_boost_COMPILER "-gcc${_boost_COMPILER_VERSION}") - endif() - endif() - elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") - # TODO: Find out any Boost version constraints vs clang support. - set(_boost_COMPILER "-clang${_boost_COMPILER_VERSION}") - endif() - else() - set(_boost_COMPILER "") - endif() - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "_boost_COMPILER" SOURCE "guessed") - set(${_ret} ${_boost_COMPILER} PARENT_SCOPE) -endfunction() - -# -# Get component dependencies. Requires the dependencies to have been -# defined for the Boost release version. -# -# component - the component to check -# _ret - list of library dependencies -# -function(_Boost_COMPONENT_DEPENDENCIES component _ret) - # Note: to add a new Boost release, run - # - # % cmake -DBOOST_DIR=/path/to/boost/source -P Utilities/Scripts/BoostScanDeps.cmake - # - # The output may be added in a new block below. If it's the same as - # the previous release, simply update the version range of the block - # for the previous release. Also check if any new components have - # been added, and add any new components to - # _Boost_COMPONENT_HEADERS. - # - # This information was originally generated by running - # BoostScanDeps.cmake against every boost release to date supported - # by FindBoost: - # - # % for version in /path/to/boost/sources/* - # do - # cmake -DBOOST_DIR=$version -P Utilities/Scripts/BoostScanDeps.cmake - # done - # - # The output was then updated by search and replace with these regexes: - # - # - Strip message(STATUS) prefix dashes - # s;^-- ;; - # - Indent - # s;^set(; set(;; - # - Add conditionals - # s;Scanning /path/to/boost/sources/boost_\(.*\)_\(.*\)_\(.*); elseif(NOT Boost_VERSION_STRING VERSION_LESS \1\.\2\.\3 AND Boost_VERSION_STRING VERSION_LESS xxxx); - # - # This results in the logic seen below, but will require the xxxx - # replacing with the following Boost release version (or the next - # minor version to be released, e.g. 1.59 was the latest at the time - # of writing, making 1.60 the next. Identical consecutive releases - # were then merged together by updating the end range of the first - # block and removing the following redundant blocks. - # - # Running the script against all historical releases should be - # required only if the BoostScanDeps.cmake script logic is changed. - # The addition of a new release should only require it to be run - # against the new release. - - # Handle Python version suffixes - if(component MATCHES "^(python|mpi_python|numpy)([0-9][0-9]?|[0-9]\\.[0-9])\$") - set(component "${CMAKE_MATCH_1}") - set(component_python_version "${CMAKE_MATCH_2}") - endif() - - set(_Boost_IMPORTED_TARGETS TRUE) - if(Boost_VERSION_STRING AND Boost_VERSION_STRING VERSION_LESS 1.33.0) - message(WARNING "Imported targets and dependency information not available for Boost version ${Boost_VERSION_STRING} (all versions older than 1.33)") - set(_Boost_IMPORTED_TARGETS FALSE) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.33.0 AND Boost_VERSION_STRING VERSION_LESS 1.35.0) - set(_Boost_IOSTREAMS_DEPENDENCIES regex thread) - set(_Boost_REGEX_DEPENDENCIES thread) - set(_Boost_WAVE_DEPENDENCIES filesystem thread) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.35.0 AND Boost_VERSION_STRING VERSION_LESS 1.36.0) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_WAVE_DEPENDENCIES filesystem system thread) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.36.0 AND Boost_VERSION_STRING VERSION_LESS 1.38.0) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_WAVE_DEPENDENCIES filesystem system thread) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.38.0 AND Boost_VERSION_STRING VERSION_LESS 1.43.0) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES date_time) - set(_Boost_WAVE_DEPENDENCIES filesystem system thread date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.43.0 AND Boost_VERSION_STRING VERSION_LESS 1.44.0) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES date_time) - set(_Boost_WAVE_DEPENDENCIES filesystem system thread date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.44.0 AND Boost_VERSION_STRING VERSION_LESS 1.45.0) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random serialization) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES date_time) - set(_Boost_WAVE_DEPENDENCIES serialization filesystem system thread date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.45.0 AND Boost_VERSION_STRING VERSION_LESS 1.47.0) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES date_time) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.47.0 AND Boost_VERSION_STRING VERSION_LESS 1.48.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES date_time) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.48.0 AND Boost_VERSION_STRING VERSION_LESS 1.50.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES date_time) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.50.0 AND Boost_VERSION_STRING VERSION_LESS 1.53.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.53.0 AND Boost_VERSION_STRING VERSION_LESS 1.54.0) - set(_Boost_ATOMIC_DEPENDENCIES thread chrono system date_time) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.54.0 AND Boost_VERSION_STRING VERSION_LESS 1.55.0) - set(_Boost_ATOMIC_DEPENDENCIES thread chrono system date_time) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.55.0 AND Boost_VERSION_STRING VERSION_LESS 1.56.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l regex random) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.56.0 AND Boost_VERSION_STRING VERSION_LESS 1.59.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.59.0 AND Boost_VERSION_STRING VERSION_LESS 1.60.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES log_setup date_time system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.60.0 AND Boost_VERSION_STRING VERSION_LESS 1.61.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.61.0 AND Boost_VERSION_STRING VERSION_LESS 1.62.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.62.0 AND Boost_VERSION_STRING VERSION_LESS 1.63.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FIBER_DEPENDENCIES context thread chrono system date_time) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.63.0 AND Boost_VERSION_STRING VERSION_LESS 1.65.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_COROUTINE2_DEPENDENCIES context fiber thread chrono system date_time) - set(_Boost_FIBER_DEPENDENCIES context thread chrono system date_time) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.65.0 AND Boost_VERSION_STRING VERSION_LESS 1.67.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FIBER_DEPENDENCIES context thread chrono system date_time) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_NUMPY_DEPENDENCIES python${component_python_version}) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.67.0 AND Boost_VERSION_STRING VERSION_LESS 1.68.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FIBER_DEPENDENCIES context thread chrono system date_time) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_NUMPY_DEPENDENCIES python${component_python_version}) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.68.0 AND Boost_VERSION_STRING VERSION_LESS 1.69.0) - set(_Boost_CHRONO_DEPENDENCIES system) - set(_Boost_CONTEXT_DEPENDENCIES thread chrono system date_time) - set(_Boost_CONTRACT_DEPENDENCIES thread chrono system date_time) - set(_Boost_COROUTINE_DEPENDENCIES context system) - set(_Boost_FIBER_DEPENDENCIES context thread chrono system date_time) - set(_Boost_FILESYSTEM_DEPENDENCIES system) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup system filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_NUMPY_DEPENDENCIES python${component_python_version}) - set(_Boost_RANDOM_DEPENDENCIES system) - set(_Boost_THREAD_DEPENDENCIES chrono system date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem system serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - elseif(NOT Boost_VERSION_STRING VERSION_LESS 1.69.0 AND Boost_VERSION_STRING VERSION_LESS 1.70.0) - set(_Boost_CONTRACT_DEPENDENCIES thread chrono date_time) - set(_Boost_COROUTINE_DEPENDENCIES context) - set(_Boost_FIBER_DEPENDENCIES context) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_NUMPY_DEPENDENCIES python${component_python_version}) - set(_Boost_THREAD_DEPENDENCIES chrono date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - else() - if(NOT Boost_VERSION_STRING VERSION_LESS 1.70.0) - set(_Boost_CONTRACT_DEPENDENCIES thread chrono date_time) - set(_Boost_COROUTINE_DEPENDENCIES context) - set(_Boost_FIBER_DEPENDENCIES context) - set(_Boost_IOSTREAMS_DEPENDENCIES regex) - set(_Boost_LOG_DEPENDENCIES date_time log_setup filesystem thread regex chrono atomic) - set(_Boost_MATH_DEPENDENCIES math_c99 math_c99f math_c99l math_tr1 math_tr1f math_tr1l atomic) - set(_Boost_MPI_DEPENDENCIES serialization) - set(_Boost_MPI_PYTHON_DEPENDENCIES python${component_python_version} mpi serialization) - set(_Boost_NUMPY_DEPENDENCIES python${component_python_version}) - set(_Boost_THREAD_DEPENDENCIES chrono date_time atomic) - set(_Boost_TIMER_DEPENDENCIES chrono system) - set(_Boost_WAVE_DEPENDENCIES filesystem serialization thread chrono date_time atomic) - set(_Boost_WSERIALIZATION_DEPENDENCIES serialization) - endif() - if(NOT Boost_VERSION_STRING VERSION_LESS 1.77.0) - message(WARNING "New Boost version may have incorrect or missing dependencies and imported targets") - endif() - endif() - - string(TOUPPER ${component} uppercomponent) - set(${_ret} ${_Boost_${uppercomponent}_DEPENDENCIES} PARENT_SCOPE) - set(_Boost_IMPORTED_TARGETS ${_Boost_IMPORTED_TARGETS} PARENT_SCOPE) - - string(REGEX REPLACE ";" " " _boost_DEPS_STRING "${_Boost_${uppercomponent}_DEPENDENCIES}") - if (NOT _boost_DEPS_STRING) - set(_boost_DEPS_STRING "(none)") - endif() - # message(STATUS "Dependencies for Boost::${component}: ${_boost_DEPS_STRING}") -endfunction() - -# -# Get component headers. This is the primary header (or headers) for -# a given component, and is used to check that the headers are present -# as well as the library itself as an extra sanity check of the build -# environment. -# -# component - the component to check -# _hdrs -# -function(_Boost_COMPONENT_HEADERS component _hdrs) - # Handle Python version suffixes - if(component MATCHES "^(python|mpi_python|numpy)([0-9][0-9]?|[0-9]\\.[0-9])\$") - set(component "${CMAKE_MATCH_1}") - set(component_python_version "${CMAKE_MATCH_2}") - endif() - - # Note: new boost components will require adding here. The header - # must be present in all versions of Boost providing a library. - set(_Boost_ATOMIC_HEADERS "boost/atomic.hpp") - set(_Boost_CHRONO_HEADERS "boost/chrono.hpp") - set(_Boost_CONTAINER_HEADERS "boost/container/container_fwd.hpp") - set(_Boost_CONTRACT_HEADERS "boost/contract.hpp") - if(Boost_VERSION_STRING VERSION_LESS 1.61.0) - set(_Boost_CONTEXT_HEADERS "boost/context/all.hpp") - else() - set(_Boost_CONTEXT_HEADERS "boost/context/detail/fcontext.hpp") - endif() - set(_Boost_COROUTINE_HEADERS "boost/coroutine/all.hpp") - set(_Boost_DATE_TIME_HEADERS "boost/date_time/date.hpp") - set(_Boost_EXCEPTION_HEADERS "boost/exception/exception.hpp") - set(_Boost_FIBER_HEADERS "boost/fiber/all.hpp") - set(_Boost_FILESYSTEM_HEADERS "boost/filesystem/path.hpp") - set(_Boost_GRAPH_HEADERS "boost/graph/adjacency_list.hpp") - set(_Boost_GRAPH_PARALLEL_HEADERS "boost/graph/adjacency_list.hpp") - set(_Boost_IOSTREAMS_HEADERS "boost/iostreams/stream.hpp") - set(_Boost_LOCALE_HEADERS "boost/locale.hpp") - set(_Boost_LOG_HEADERS "boost/log/core.hpp") - set(_Boost_LOG_SETUP_HEADERS "boost/log/detail/setup_config.hpp") - set(_Boost_MATH_HEADERS "boost/math_fwd.hpp") - set(_Boost_MATH_C99_HEADERS "boost/math/tr1.hpp") - set(_Boost_MATH_C99F_HEADERS "boost/math/tr1.hpp") - set(_Boost_MATH_C99L_HEADERS "boost/math/tr1.hpp") - set(_Boost_MATH_TR1_HEADERS "boost/math/tr1.hpp") - set(_Boost_MATH_TR1F_HEADERS "boost/math/tr1.hpp") - set(_Boost_MATH_TR1L_HEADERS "boost/math/tr1.hpp") - set(_Boost_MPI_HEADERS "boost/mpi.hpp") - set(_Boost_MPI_PYTHON_HEADERS "boost/mpi/python/config.hpp") - set(_Boost_NUMPY_HEADERS "boost/python/numpy.hpp") - set(_Boost_PRG_EXEC_MONITOR_HEADERS "boost/test/prg_exec_monitor.hpp") - set(_Boost_PROGRAM_OPTIONS_HEADERS "boost/program_options.hpp") - set(_Boost_PYTHON_HEADERS "boost/python.hpp") - set(_Boost_RANDOM_HEADERS "boost/random.hpp") - set(_Boost_REGEX_HEADERS "boost/regex.hpp") - set(_Boost_SERIALIZATION_HEADERS "boost/serialization/serialization.hpp") - set(_Boost_SIGNALS_HEADERS "boost/signals.hpp") - set(_Boost_STACKTRACE_ADDR2LINE_HEADERS "boost/stacktrace.hpp") - set(_Boost_STACKTRACE_BACKTRACE_HEADERS "boost/stacktrace.hpp") - set(_Boost_STACKTRACE_BASIC_HEADERS "boost/stacktrace.hpp") - set(_Boost_STACKTRACE_NOOP_HEADERS "boost/stacktrace.hpp") - set(_Boost_STACKTRACE_WINDBG_CACHED_HEADERS "boost/stacktrace.hpp") - set(_Boost_STACKTRACE_WINDBG_HEADERS "boost/stacktrace.hpp") - set(_Boost_SYSTEM_HEADERS "boost/system/config.hpp") - set(_Boost_TEST_EXEC_MONITOR_HEADERS "boost/test/test_exec_monitor.hpp") - set(_Boost_THREAD_HEADERS "boost/thread.hpp") - set(_Boost_TIMER_HEADERS "boost/timer.hpp") - set(_Boost_TYPE_ERASURE_HEADERS "boost/type_erasure/config.hpp") - set(_Boost_UNIT_TEST_FRAMEWORK_HEADERS "boost/test/framework.hpp") - set(_Boost_WAVE_HEADERS "boost/wave.hpp") - set(_Boost_WSERIALIZATION_HEADERS "boost/archive/text_wiarchive.hpp") - if(WIN32) - set(_Boost_BZIP2_HEADERS "boost/iostreams/filter/bzip2.hpp") - set(_Boost_ZLIB_HEADERS "boost/iostreams/filter/zlib.hpp") - endif() - - string(TOUPPER ${component} uppercomponent) - set(${_hdrs} ${_Boost_${uppercomponent}_HEADERS} PARENT_SCOPE) - - string(REGEX REPLACE ";" " " _boost_HDRS_STRING "${_Boost_${uppercomponent}_HEADERS}") - if (NOT _boost_HDRS_STRING) - set(_boost_HDRS_STRING "(none)") - endif() - # message(STATUS "Headers for Boost::${component}: ${_boost_HDRS_STRING}") -endfunction() - -# -# Determine if any missing dependencies require adding to the component list. -# -# Sets _Boost_${COMPONENT}_DEPENDENCIES for each required component, -# plus _Boost_IMPORTED_TARGETS (TRUE if imported targets should be -# defined; FALSE if dependency information is unavailable). -# -# componentvar - the component list variable name -# extravar - the indirect dependency list variable name -# -# -function(_Boost_MISSING_DEPENDENCIES componentvar extravar) - # _boost_unprocessed_components - list of components requiring processing - # _boost_processed_components - components already processed (or currently being processed) - # _boost_new_components - new components discovered for future processing - # - list(APPEND _boost_unprocessed_components ${${componentvar}}) - - while(_boost_unprocessed_components) - list(APPEND _boost_processed_components ${_boost_unprocessed_components}) - foreach(component ${_boost_unprocessed_components}) - string(TOUPPER ${component} uppercomponent) - set(${_ret} ${_Boost_${uppercomponent}_DEPENDENCIES} PARENT_SCOPE) - _Boost_COMPONENT_DEPENDENCIES("${component}" _Boost_${uppercomponent}_DEPENDENCIES) - set(_Boost_${uppercomponent}_DEPENDENCIES ${_Boost_${uppercomponent}_DEPENDENCIES} PARENT_SCOPE) - set(_Boost_IMPORTED_TARGETS ${_Boost_IMPORTED_TARGETS} PARENT_SCOPE) - foreach(componentdep ${_Boost_${uppercomponent}_DEPENDENCIES}) - if (NOT ("${componentdep}" IN_LIST _boost_processed_components OR "${componentdep}" IN_LIST _boost_new_components)) - list(APPEND _boost_new_components ${componentdep}) - endif() - endforeach() - endforeach() - set(_boost_unprocessed_components ${_boost_new_components}) - unset(_boost_new_components) - endwhile() - set(_boost_extra_components ${_boost_processed_components}) - if(_boost_extra_components AND ${componentvar}) - list(REMOVE_ITEM _boost_extra_components ${${componentvar}}) - endif() - set(${componentvar} ${_boost_processed_components} PARENT_SCOPE) - set(${extravar} ${_boost_extra_components} PARENT_SCOPE) -endfunction() - -# -# Some boost libraries may require particular set of compler features. -# The very first one was `boost::fiber` introduced in Boost 1.62. -# One can check required compiler features of it in -# - `${Boost_ROOT}/libs/fiber/build/Jamfile.v2`; -# - `${Boost_ROOT}/libs/context/build/Jamfile.v2`. -# -# TODO (Re)Check compiler features on (every?) release ??? -# One may use the following command to get the files to check: -# -# $ find . -name Jamfile.v2 | grep build | xargs grep -l cxx1 -# -function(_Boost_COMPILER_FEATURES component _ret) - # Boost >= 1.62 - if(NOT Boost_VERSION_STRING VERSION_LESS 1.62.0) - set(_Boost_FIBER_COMPILER_FEATURES - cxx_alias_templates - cxx_auto_type - cxx_constexpr - cxx_defaulted_functions - cxx_final - cxx_lambdas - cxx_noexcept - cxx_nullptr - cxx_rvalue_references - cxx_thread_local - cxx_variadic_templates - ) - # Compiler feature for `context` same as for `fiber`. - set(_Boost_CONTEXT_COMPILER_FEATURES ${_Boost_FIBER_COMPILER_FEATURES}) - endif() - - # Boost Contract library available in >= 1.67 - if(NOT Boost_VERSION_STRING VERSION_LESS 1.67.0) - # From `libs/contract/build/boost_contract_build.jam` - set(_Boost_CONTRACT_COMPILER_FEATURES - cxx_lambdas - cxx_variadic_templates - ) - endif() - - string(TOUPPER ${component} uppercomponent) - set(${_ret} ${_Boost_${uppercomponent}_COMPILER_FEATURES} PARENT_SCOPE) -endfunction() - -# -# Update library search directory hint variable with paths used by prebuilt boost binaries. -# -# Prebuilt windows binaries (https://sourceforge.net/projects/boost/files/boost-binaries/) -# have library directories named using MSVC compiler version and architecture. -# This function would append corresponding directories if MSVC is a current compiler, -# so having `BOOST_ROOT` would be enough to specify to find everything. -# -function(_Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS componentlibvar basedir) - if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC") - if(CMAKE_SIZEOF_VOID_P EQUAL 8) - set(_arch_suffix 64) - else() - set(_arch_suffix 32) - endif() - if(MSVC_TOOLSET_VERSION GREATER_EQUAL 150) - # Not yet known. - elseif(MSVC_TOOLSET_VERSION GREATER_EQUAL 140) - # MSVC toolset 14.x versions are forward compatible. - foreach(v 9 8 7 6 5 4 3 2 1 0) - if(MSVC_TOOLSET_VERSION GREATER_EQUAL 14${v}) - list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-14.${v}) - endif() - endforeach() - elseif(MSVC_TOOLSET_VERSION GREATER_EQUAL 80) - math(EXPR _toolset_major_version "${MSVC_TOOLSET_VERSION} / 10") - list(APPEND ${componentlibvar} ${basedir}/lib${_arch_suffix}-msvc-${_toolset_major_version}.0) - endif() - set(${componentlibvar} ${${componentlibvar}} PARENT_SCOPE) - endif() -endfunction() - -# -# End functions/macros -# -#------------------------------------------------------------------------------- - -#------------------------------------------------------------------------------- -# main. -#------------------------------------------------------------------------------- - - -# If the user sets Boost_LIBRARY_DIR, use it as the default for both -# configurations. -if(NOT Boost_LIBRARY_DIR_RELEASE AND Boost_LIBRARY_DIR) - set(Boost_LIBRARY_DIR_RELEASE "${Boost_LIBRARY_DIR}") -endif() -if(NOT Boost_LIBRARY_DIR_DEBUG AND Boost_LIBRARY_DIR) - set(Boost_LIBRARY_DIR_DEBUG "${Boost_LIBRARY_DIR}") -endif() - -if(NOT DEFINED Boost_USE_DEBUG_LIBS) - set(Boost_USE_DEBUG_LIBS TRUE) -endif() -if(NOT DEFINED Boost_USE_RELEASE_LIBS) - set(Boost_USE_RELEASE_LIBS TRUE) -endif() -if(NOT DEFINED Boost_USE_MULTITHREADED) - set(Boost_USE_MULTITHREADED TRUE) -endif() -if(NOT DEFINED Boost_USE_DEBUG_RUNTIME) - set(Boost_USE_DEBUG_RUNTIME TRUE) -endif() - -# Check the version of Boost against the requested version. -if(Boost_FIND_VERSION AND NOT Boost_FIND_VERSION_MINOR) - message(SEND_ERROR "When requesting a specific version of Boost, you must provide at least the major and minor version numbers, e.g., 1.34") -endif() - -if(Boost_FIND_VERSION_EXACT) - # The version may appear in a directory with or without the patch - # level, even when the patch level is non-zero. - set(_boost_TEST_VERSIONS - "${Boost_FIND_VERSION_MAJOR}.${Boost_FIND_VERSION_MINOR}.${Boost_FIND_VERSION_PATCH}" - "${Boost_FIND_VERSION_MAJOR}.${Boost_FIND_VERSION_MINOR}") -else() - # The user has not requested an exact version. Among known - # versions, find those that are acceptable to the user request. - # - # Note: When adding a new Boost release, also update the dependency - # information in _Boost_COMPONENT_DEPENDENCIES and - # _Boost_COMPONENT_HEADERS. See the instructions at the top of - # _Boost_COMPONENT_DEPENDENCIES. - set(_Boost_KNOWN_VERSIONS ${Boost_ADDITIONAL_VERSIONS} - "1.70.0" "1.70" "1.69.0" "1.69" - "1.68.0" "1.68" "1.67.0" "1.67" "1.66.0" "1.66" "1.65.1" "1.65.0" "1.65" - "1.64.0" "1.64" "1.63.0" "1.63" "1.62.0" "1.62" "1.61.0" "1.61" "1.60.0" "1.60" - "1.59.0" "1.59" "1.58.0" "1.58" "1.57.0" "1.57" "1.56.0" "1.56" "1.55.0" "1.55" - "1.54.0" "1.54" "1.53.0" "1.53" "1.52.0" "1.52" "1.51.0" "1.51" - "1.50.0" "1.50" "1.49.0" "1.49" "1.48.0" "1.48" "1.47.0" "1.47" "1.46.1" - "1.46.0" "1.46" "1.45.0" "1.45" "1.44.0" "1.44" "1.43.0" "1.43" "1.42.0" "1.42" - "1.41.0" "1.41" "1.40.0" "1.40" "1.39.0" "1.39" "1.38.0" "1.38" "1.37.0" "1.37" - "1.36.1" "1.36.0" "1.36" "1.35.1" "1.35.0" "1.35" "1.34.1" "1.34.0" - "1.34" "1.33.1" "1.33.0" "1.33") - - set(_boost_TEST_VERSIONS) - if(Boost_FIND_VERSION) - set(_Boost_FIND_VERSION_SHORT "${Boost_FIND_VERSION_MAJOR}.${Boost_FIND_VERSION_MINOR}") - # Select acceptable versions. - foreach(version ${_Boost_KNOWN_VERSIONS}) - if(NOT "${version}" VERSION_LESS "${Boost_FIND_VERSION}") - # This version is high enough. - list(APPEND _boost_TEST_VERSIONS "${version}") - elseif("${version}.99" VERSION_EQUAL "${_Boost_FIND_VERSION_SHORT}.99") - # This version is a short-form for the requested version with - # the patch level dropped. - list(APPEND _boost_TEST_VERSIONS "${version}") - endif() - endforeach() - else() - # Any version is acceptable. - set(_boost_TEST_VERSIONS "${_Boost_KNOWN_VERSIONS}") - endif() -endif() - -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_TEST_VERSIONS") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_USE_MULTITHREADED") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_USE_STATIC_LIBS") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_USE_STATIC_RUNTIME") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_ADDITIONAL_VERSIONS") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_NO_SYSTEM_PATHS") - -# Supply Boost_LIB_DIAGNOSTIC_DEFINITIONS as a convenience target. It -# will only contain any interface definitions on WIN32, but is created -# on all platforms to keep end user code free from platform dependent -# code. Also provide convenience targets to disable autolinking and -# enable dynamic linking. -if(NOT TARGET Boost::diagnostic_definitions) - add_library(Boost::diagnostic_definitions INTERFACE IMPORTED) - add_library(Boost::disable_autolinking INTERFACE IMPORTED) - add_library(Boost::dynamic_linking INTERFACE IMPORTED) - set_target_properties(Boost::dynamic_linking PROPERTIES - INTERFACE_COMPILE_DEFINITIONS "BOOST_ALL_DYN_LINK") -endif() -if(WIN32) - # In windows, automatic linking is performed, so you do not have - # to specify the libraries. If you are linking to a dynamic - # runtime, then you can choose to link to either a static or a - # dynamic Boost library, the default is to do a static link. You - # can alter this for a specific library "whatever" by defining - # BOOST_WHATEVER_DYN_LINK to force Boost library "whatever" to be - # linked dynamically. Alternatively you can force all Boost - # libraries to dynamic link by defining BOOST_ALL_DYN_LINK. - - # This feature can be disabled for Boost library "whatever" by - # defining BOOST_WHATEVER_NO_LIB, or for all of Boost by defining - # BOOST_ALL_NO_LIB. - - # If you want to observe which libraries are being linked against - # then defining BOOST_LIB_DIAGNOSTIC will cause the auto-linking - # code to emit a #pragma message each time a library is selected - # for linking. - set(Boost_LIB_DIAGNOSTIC_DEFINITIONS "-DBOOST_LIB_DIAGNOSTIC") - set_target_properties(Boost::diagnostic_definitions PROPERTIES - INTERFACE_COMPILE_DEFINITIONS "BOOST_LIB_DIAGNOSTIC") - set_target_properties(Boost::disable_autolinking PROPERTIES - INTERFACE_COMPILE_DEFINITIONS "BOOST_ALL_NO_LIB") -endif() - -if (POLICY CMP0074) - cmake_policy(GET CMP0074 _Boost_CMP0074) - if(NOT "x${_Boost_CMP0074}x" STREQUAL "xNEWx") - _Boost_CHECK_SPELLING(Boost_ROOT) - endif() - unset(_Boost_CMP0074) -endif () -_Boost_CHECK_SPELLING(Boost_LIBRARYDIR) -_Boost_CHECK_SPELLING(Boost_INCLUDEDIR) - -# Collect environment variable inputs as hints. Do not consider changes. -foreach(v BOOSTROOT BOOST_ROOT BOOST_INCLUDEDIR BOOST_LIBRARYDIR) - set(_env $ENV{${v}}) - if(_env) - file(TO_CMAKE_PATH "${_env}" _ENV_${v}) - else() - set(_ENV_${v} "") - endif() -endforeach() -if(NOT _ENV_BOOST_ROOT AND _ENV_BOOSTROOT) - set(_ENV_BOOST_ROOT "${_ENV_BOOSTROOT}") -endif() - -# Collect inputs and cached results. Detect changes since the last run. -if(NOT BOOST_ROOT AND BOOSTROOT) - set(BOOST_ROOT "${BOOSTROOT}") -endif() -set(_Boost_VARS_DIR - BOOST_ROOT - Boost_NO_SYSTEM_PATHS - ) - -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "BOOST_ROOT") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "BOOST_ROOT" ENVIRONMENT) -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "BOOST_INCLUDEDIR") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "BOOST_INCLUDEDIR" ENVIRONMENT) -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "BOOST_LIBRARYDIR") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "BOOST_LIBRARYDIR" ENVIRONMENT) - -# ------------------------------------------------------------------------ -# Search for Boost include DIR -# ------------------------------------------------------------------------ - -set(_Boost_VARS_INC BOOST_INCLUDEDIR Boost_INCLUDE_DIR Boost_ADDITIONAL_VERSIONS) -_Boost_CHANGE_DETECT(_Boost_CHANGE_INCDIR ${_Boost_VARS_DIR} ${_Boost_VARS_INC}) -# Clear Boost_INCLUDE_DIR if it did not change but other input affecting the -# location did. We will find a new one based on the new inputs. -if(_Boost_CHANGE_INCDIR AND NOT _Boost_INCLUDE_DIR_CHANGED) - unset(Boost_INCLUDE_DIR CACHE) -endif() - -if(NOT Boost_INCLUDE_DIR) - set(_boost_INCLUDE_SEARCH_DIRS "") - if(BOOST_INCLUDEDIR) - list(APPEND _boost_INCLUDE_SEARCH_DIRS ${BOOST_INCLUDEDIR}) - elseif(_ENV_BOOST_INCLUDEDIR) - list(APPEND _boost_INCLUDE_SEARCH_DIRS ${_ENV_BOOST_INCLUDEDIR}) - endif() - - if( BOOST_ROOT ) - list(APPEND _boost_INCLUDE_SEARCH_DIRS ${BOOST_ROOT}/include ${BOOST_ROOT}) - elseif( _ENV_BOOST_ROOT ) - list(APPEND _boost_INCLUDE_SEARCH_DIRS ${_ENV_BOOST_ROOT}/include ${_ENV_BOOST_ROOT}) - endif() - - if( Boost_NO_SYSTEM_PATHS) - list(APPEND _boost_INCLUDE_SEARCH_DIRS NO_CMAKE_SYSTEM_PATH NO_SYSTEM_ENVIRONMENT_PATH) - else() - if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC") - foreach(ver ${_boost_TEST_VERSIONS}) - string(REPLACE "." "_" ver "${ver}") - list(APPEND _boost_INCLUDE_SEARCH_DIRS PATHS "C:/local/boost_${ver}") - endforeach() - endif() - list(APPEND _boost_INCLUDE_SEARCH_DIRS PATHS - C:/boost/include - C:/boost - /sw/local/include - ) - endif() - - # Try to find Boost by stepping backwards through the Boost versions - # we know about. - # Build a list of path suffixes for each version. - set(_boost_PATH_SUFFIXES) - foreach(_boost_VER ${_boost_TEST_VERSIONS}) - # Add in a path suffix, based on the required version, ideally - # we could read this from version.hpp, but for that to work we'd - # need to know the include dir already - set(_boost_BOOSTIFIED_VERSION) - - # Transform 1.35 => 1_35 and 1.36.0 => 1_36_0 - if(_boost_VER MATCHES "([0-9]+)\\.([0-9]+)\\.([0-9]+)") - set(_boost_BOOSTIFIED_VERSION - "${CMAKE_MATCH_1}_${CMAKE_MATCH_2}_${CMAKE_MATCH_3}") - elseif(_boost_VER MATCHES "([0-9]+)\\.([0-9]+)") - set(_boost_BOOSTIFIED_VERSION - "${CMAKE_MATCH_1}_${CMAKE_MATCH_2}") - endif() - - list(APPEND _boost_PATH_SUFFIXES - "boost-${_boost_BOOSTIFIED_VERSION}" - "boost_${_boost_BOOSTIFIED_VERSION}" - "boost/boost-${_boost_BOOSTIFIED_VERSION}" - "boost/boost_${_boost_BOOSTIFIED_VERSION}" - ) - - endforeach() - - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_INCLUDE_SEARCH_DIRS") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_PATH_SUFFIXES") - - # Look for a standard boost header file. - find_path(Boost_INCLUDE_DIR - NAMES boost/config.hpp - HINTS ${_boost_INCLUDE_SEARCH_DIRS} - PATH_SUFFIXES ${_boost_PATH_SUFFIXES} - ) -endif() - -# ------------------------------------------------------------------------ -# Extract version information from version.hpp -# ------------------------------------------------------------------------ - -if(Boost_INCLUDE_DIR) - _Boost_DEBUG_PRINT("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "location of version.hpp: ${Boost_INCLUDE_DIR}/boost/version.hpp") - - # Extract Boost_VERSION_MACRO and Boost_LIB_VERSION from version.hpp - set(Boost_VERSION_MACRO 0) - set(Boost_LIB_VERSION "") - file(STRINGS "${Boost_INCLUDE_DIR}/boost/version.hpp" _boost_VERSION_HPP_CONTENTS REGEX "#define BOOST_(LIB_)?VERSION ") - if("${_boost_VERSION_HPP_CONTENTS}" MATCHES "#define BOOST_VERSION ([0-9]+)") - set(Boost_VERSION_MACRO "${CMAKE_MATCH_1}") - endif() - if("${_boost_VERSION_HPP_CONTENTS}" MATCHES "#define BOOST_LIB_VERSION \"([0-9_]+)\"") - set(Boost_LIB_VERSION "${CMAKE_MATCH_1}") - endif() - unset(_boost_VERSION_HPP_CONTENTS) - - # Calculate version components - math(EXPR Boost_VERSION_MAJOR "${Boost_VERSION_MACRO} / 100000") - math(EXPR Boost_VERSION_MINOR "${Boost_VERSION_MACRO} / 100 % 1000") - math(EXPR Boost_VERSION_PATCH "${Boost_VERSION_MACRO} % 100") - set(Boost_VERSION_COUNT 3) - - # Define alias variables for backwards compat. - set(Boost_MAJOR_VERSION ${Boost_VERSION_MAJOR}) - set(Boost_MINOR_VERSION ${Boost_VERSION_MINOR}) - set(Boost_SUBMINOR_VERSION ${Boost_VERSION_PATCH}) - - # Define Boost version in x.y.z format - set(Boost_VERSION_STRING "${Boost_VERSION_MAJOR}.${Boost_VERSION_MINOR}.${Boost_VERSION_PATCH}") - - if (POLICY CMP0093) - # Define final Boost_VERSION - cmake_policy(GET CMP0093 _Boost_CMP0093 - PARENT_SCOPE # undocumented, do not use outside of CMake - ) - if("x${_Boost_CMP0093}x" STREQUAL "xNEWx") - set(Boost_VERSION ${Boost_VERSION_STRING}) - endif() - unset(_Boost_CMP0093) - else() - set(Boost_VERSION ${Boost_VERSION_MACRO}) - endif() - - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_VERSION") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_VERSION_STRING") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_VERSION_MACRO") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_VERSION_MAJOR") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_VERSION_MINOR") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_VERSION_PATCH") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_VERSION_COUNT") -endif() - -# ------------------------------------------------------------------------ -# Prefix initialization -# ------------------------------------------------------------------------ - -set(Boost_LIB_PREFIX "") -if ( (GHSMULTI AND Boost_USE_STATIC_LIBS) OR - (WIN32 AND Boost_USE_STATIC_LIBS AND NOT CYGWIN) ) - set(Boost_LIB_PREFIX "lib") -endif() - -if ( NOT Boost_NAMESPACE ) - set(Boost_NAMESPACE "boost") -endif() - -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_LIB_PREFIX") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "Boost_NAMESPACE") - -# ------------------------------------------------------------------------ -# Suffix initialization and compiler suffix detection. -# ------------------------------------------------------------------------ - -set(_Boost_VARS_NAME - Boost_NAMESPACE - Boost_COMPILER - Boost_THREADAPI - Boost_USE_DEBUG_PYTHON - Boost_USE_MULTITHREADED - Boost_USE_STATIC_LIBS - Boost_USE_STATIC_RUNTIME - Boost_USE_STLPORT - Boost_USE_STLPORT_DEPRECATED_NATIVE_IOSTREAMS - ) -_Boost_CHANGE_DETECT(_Boost_CHANGE_LIBNAME ${_Boost_VARS_NAME}) - -# Setting some more suffixes for the library -if (Boost_COMPILER) - set(_boost_COMPILER ${Boost_COMPILER}) - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "_boost_COMPILER" SOURCE "user-specified via Boost_COMPILER") -else() - # Attempt to guess the compiler suffix - # NOTE: this is not perfect yet, if you experience any issues - # please report them and use the Boost_COMPILER variable - # to work around the problems. - _Boost_GUESS_COMPILER_PREFIX(_boost_COMPILER) -endif() - -set (_boost_MULTITHREADED "-mt") -if( NOT Boost_USE_MULTITHREADED ) - set (_boost_MULTITHREADED "") -endif() -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_MULTITHREADED") - -#====================== -# Systematically build up the Boost ABI tag for the 'tagged' and 'versioned' layouts -# http://boost.org/doc/libs/1_66_0/more/getting_started/windows.html#library-naming -# http://boost.org/doc/libs/1_66_0/boost/config/auto_link.hpp -# http://boost.org/doc/libs/1_66_0/tools/build/src/tools/common.jam -# http://boost.org/doc/libs/1_66_0/boostcpp.jam -set( _boost_RELEASE_ABI_TAG "-") -set( _boost_DEBUG_ABI_TAG "-") -# Key Use this library when: -# s linking statically to the C++ standard library and -# compiler runtime support libraries. -if(Boost_USE_STATIC_RUNTIME) - set( _boost_RELEASE_ABI_TAG "${_boost_RELEASE_ABI_TAG}s") - set( _boost_DEBUG_ABI_TAG "${_boost_DEBUG_ABI_TAG}s") -endif() -# g using debug versions of the standard and runtime -# support libraries -if(WIN32 AND Boost_USE_DEBUG_RUNTIME) - if("x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xMSVC" - OR "x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xClang" - OR "x${CMAKE_CXX_COMPILER_ID}" STREQUAL "xIntel") - string(APPEND _boost_DEBUG_ABI_TAG "g") - endif() -endif() -# y using special debug build of python -if(Boost_USE_DEBUG_PYTHON) - string(APPEND _boost_DEBUG_ABI_TAG "y") -endif() -# d using a debug version of your code -string(APPEND _boost_DEBUG_ABI_TAG "d") -# p using the STLport standard library rather than the -# default one supplied with your compiler -if(Boost_USE_STLPORT) - string(APPEND _boost_RELEASE_ABI_TAG "p") - string(APPEND _boost_DEBUG_ABI_TAG "p") -endif() -# n using the STLport deprecated "native iostreams" feature -# removed from the documentation in 1.43.0 but still present in -# boost/config/auto_link.hpp -if(Boost_USE_STLPORT_DEPRECATED_NATIVE_IOSTREAMS) - string(APPEND _boost_RELEASE_ABI_TAG "n") - string(APPEND _boost_DEBUG_ABI_TAG "n") -endif() - -# -x86 Architecture and address model tag -# First character is the architecture, then word-size, either 32 or 64 -# Only used in 'versioned' layout, added in Boost 1.66.0 -if(DEFINED Boost_ARCHITECTURE) - set(_boost_ARCHITECTURE_TAG "${Boost_ARCHITECTURE}") - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "_boost_ARCHITECTURE_TAG" SOURCE "user-specified via Boost_ARCHITECTURE") -else() - set(_boost_ARCHITECTURE_TAG "") - # {CMAKE_CXX_COMPILER_ARCHITECTURE_ID} is not currently set for all compilers - if(NOT "x${CMAKE_CXX_COMPILER_ARCHITECTURE_ID}" STREQUAL "x" AND NOT Boost_VERSION_STRING VERSION_LESS 1.66.0) - string(APPEND _boost_ARCHITECTURE_TAG "-") - # This needs to be kept in-sync with the section of CMakePlatformId.h.in - # inside 'defined(_WIN32) && defined(_MSC_VER)' - if(CMAKE_CXX_COMPILER_ARCHITECTURE_ID STREQUAL "IA64") - string(APPEND _boost_ARCHITECTURE_TAG "i") - elseif(CMAKE_CXX_COMPILER_ARCHITECTURE_ID STREQUAL "X86" - OR CMAKE_CXX_COMPILER_ARCHITECTURE_ID STREQUAL "x64") - string(APPEND _boost_ARCHITECTURE_TAG "x") - elseif(CMAKE_CXX_COMPILER_ARCHITECTURE_ID MATCHES "^ARM") - string(APPEND _boost_ARCHITECTURE_TAG "a") - elseif(CMAKE_CXX_COMPILER_ARCHITECTURE_ID STREQUAL "MIPS") - string(APPEND _boost_ARCHITECTURE_TAG "m") - endif() - - if(CMAKE_SIZEOF_VOID_P EQUAL 8) - string(APPEND _boost_ARCHITECTURE_TAG "64") - else() - string(APPEND _boost_ARCHITECTURE_TAG "32") - endif() - endif() - _Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "_boost_ARCHITECTURE_TAG" SOURCE "detected") -endif() - -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_RELEASE_ABI_TAG") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_DEBUG_ABI_TAG") - -# ------------------------------------------------------------------------ -# Begin finding boost libraries -# ------------------------------------------------------------------------ - -set(_Boost_VARS_LIB "") -foreach(c DEBUG RELEASE) - set(_Boost_VARS_LIB_${c} BOOST_LIBRARYDIR Boost_LIBRARY_DIR_${c}) - list(APPEND _Boost_VARS_LIB ${_Boost_VARS_LIB_${c}}) - _Boost_CHANGE_DETECT(_Boost_CHANGE_LIBDIR_${c} ${_Boost_VARS_DIR} ${_Boost_VARS_LIB_${c}} Boost_INCLUDE_DIR) - # Clear Boost_LIBRARY_DIR_${c} if it did not change but other input affecting the - # location did. We will find a new one based on the new inputs. - if(_Boost_CHANGE_LIBDIR_${c} AND NOT _Boost_LIBRARY_DIR_${c}_CHANGED) - unset(Boost_LIBRARY_DIR_${c} CACHE) - endif() - - # If Boost_LIBRARY_DIR_[RELEASE,DEBUG] is set, prefer its value. - if(Boost_LIBRARY_DIR_${c}) - set(_boost_LIBRARY_SEARCH_DIRS_${c} ${Boost_LIBRARY_DIR_${c}} NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH) - else() - set(_boost_LIBRARY_SEARCH_DIRS_${c} "") - if(BOOST_LIBRARYDIR) - list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${BOOST_LIBRARYDIR}) - elseif(_ENV_BOOST_LIBRARYDIR) - list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${_ENV_BOOST_LIBRARYDIR}) - endif() - - if(BOOST_ROOT) - list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${BOOST_ROOT}/lib ${BOOST_ROOT}/stage/lib) - _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "${BOOST_ROOT}") - elseif(_ENV_BOOST_ROOT) - list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} ${_ENV_BOOST_ROOT}/lib ${_ENV_BOOST_ROOT}/stage/lib) - _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "${_ENV_BOOST_ROOT}") - endif() - - list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} - ${Boost_INCLUDE_DIR}/lib - ${Boost_INCLUDE_DIR}/../lib - ${Boost_INCLUDE_DIR}/stage/lib - ) - _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "${Boost_INCLUDE_DIR}/..") - _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "${Boost_INCLUDE_DIR}") - if( Boost_NO_SYSTEM_PATHS ) - list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} NO_CMAKE_SYSTEM_PATH NO_SYSTEM_ENVIRONMENT_PATH) - else() - foreach(ver ${_boost_TEST_VERSIONS}) - string(REPLACE "." "_" ver "${ver}") - _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "C:/local/boost_${ver}") - endforeach() - _Boost_UPDATE_WINDOWS_LIBRARY_SEARCH_DIRS_WITH_PREBUILT_PATHS(_boost_LIBRARY_SEARCH_DIRS_${c} "C:/boost") - list(APPEND _boost_LIBRARY_SEARCH_DIRS_${c} PATHS - C:/boost/lib - C:/boost - /sw/local/lib - ) - endif() - endif() -endforeach() - -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_LIBRARY_SEARCH_DIRS_RELEASE") -_Boost_DEBUG_PRINT_VAR("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" "_boost_LIBRARY_SEARCH_DIRS_DEBUG") - -# Support preference of static libs by adjusting CMAKE_FIND_LIBRARY_SUFFIXES -if( Boost_USE_STATIC_LIBS ) - set( _boost_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) - if(WIN32) - list(INSERT CMAKE_FIND_LIBRARY_SUFFIXES 0 .lib .a) - else() - set(CMAKE_FIND_LIBRARY_SUFFIXES .a) - endif() -endif() - -# We want to use the tag inline below without risking double dashes -if(_boost_RELEASE_ABI_TAG) - if(${_boost_RELEASE_ABI_TAG} STREQUAL "-") - set(_boost_RELEASE_ABI_TAG "") - endif() -endif() -if(_boost_DEBUG_ABI_TAG) - if(${_boost_DEBUG_ABI_TAG} STREQUAL "-") - set(_boost_DEBUG_ABI_TAG "") - endif() -endif() - -# The previous behavior of FindBoost when Boost_USE_STATIC_LIBS was enabled -# on WIN32 was to: -# 1. Search for static libs compiled against a SHARED C++ standard runtime library (use if found) -# 2. Search for static libs compiled against a STATIC C++ standard runtime library (use if found) -# We maintain this behavior since changing it could break people's builds. -# To disable the ambiguous behavior, the user need only -# set Boost_USE_STATIC_RUNTIME either ON or OFF. -set(_boost_STATIC_RUNTIME_WORKAROUND false) -if(WIN32 AND Boost_USE_STATIC_LIBS) - if(NOT DEFINED Boost_USE_STATIC_RUNTIME) - set(_boost_STATIC_RUNTIME_WORKAROUND TRUE) - endif() -endif() - -# On versions < 1.35, remove the System library from the considered list -# since it wasn't added until 1.35. -if(Boost_VERSION_STRING AND Boost_FIND_COMPONENTS) - if(Boost_VERSION_STRING VERSION_LESS 1.35.0) - list(REMOVE_ITEM Boost_FIND_COMPONENTS system) - endif() -endif() - -# Additional components may be required via component dependencies. -# Add any missing components to the list. -_Boost_MISSING_DEPENDENCIES(Boost_FIND_COMPONENTS _Boost_EXTRA_FIND_COMPONENTS) - -# If thread is required, get the thread libs as a dependency -if("thread" IN_LIST Boost_FIND_COMPONENTS) - if(Boost_FIND_QUIETLY) - set(_Boost_find_quiet QUIET) - else() - set(_Boost_find_quiet "") - endif() - find_package(Threads ${_Boost_find_quiet}) - unset(_Boost_find_quiet) -endif() - -# If the user changed any of our control inputs flush previous results. -if(_Boost_CHANGE_LIBDIR_DEBUG OR _Boost_CHANGE_LIBDIR_RELEASE OR _Boost_CHANGE_LIBNAME) - foreach(COMPONENT ${_Boost_COMPONENTS_SEARCHED}) - string(TOUPPER ${COMPONENT} UPPERCOMPONENT) - foreach(c DEBUG RELEASE) - set(_var Boost_${UPPERCOMPONENT}_LIBRARY_${c}) - unset(${_var} CACHE) - set(${_var} "${_var}-NOTFOUND") - endforeach() - endforeach() - set(_Boost_COMPONENTS_SEARCHED "") -endif() - -foreach(COMPONENT ${Boost_FIND_COMPONENTS}) - string(TOUPPER ${COMPONENT} UPPERCOMPONENT) - - set( _boost_docstring_release "Boost ${COMPONENT} library (release)") - set( _boost_docstring_debug "Boost ${COMPONENT} library (debug)") - - # Compute component-specific hints. - set(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT "") - if(${COMPONENT} STREQUAL "mpi" OR ${COMPONENT} STREQUAL "mpi_python" OR - ${COMPONENT} STREQUAL "graph_parallel") - foreach(lib ${MPI_CXX_LIBRARIES} ${MPI_C_LIBRARIES}) - if(IS_ABSOLUTE "${lib}") - get_filename_component(libdir "${lib}" PATH) - string(REPLACE "\\" "/" libdir "${libdir}") - list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT ${libdir}) - endif() - endforeach() - endif() - - # Handle Python version suffixes - unset(COMPONENT_PYTHON_VERSION_MAJOR) - unset(COMPONENT_PYTHON_VERSION_MINOR) - if(${COMPONENT} MATCHES "^(python|mpi_python|numpy)([0-9])\$") - set(COMPONENT_UNVERSIONED "${CMAKE_MATCH_1}") - set(COMPONENT_PYTHON_VERSION_MAJOR "${CMAKE_MATCH_2}") - elseif(${COMPONENT} MATCHES "^(python|mpi_python|numpy)([0-9])\\.?([0-9])\$") - set(COMPONENT_UNVERSIONED "${CMAKE_MATCH_1}") - set(COMPONENT_PYTHON_VERSION_MAJOR "${CMAKE_MATCH_2}") - set(COMPONENT_PYTHON_VERSION_MINOR "${CMAKE_MATCH_3}") - endif() - - unset(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME) - if (COMPONENT_PYTHON_VERSION_MINOR) - # Boost >= 1.67 - list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME "${COMPONENT_UNVERSIONED}${COMPONENT_PYTHON_VERSION_MAJOR}${COMPONENT_PYTHON_VERSION_MINOR}") - # Debian/Ubuntu (Some versions omit the 2 and/or 3 from the suffix) - list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME "${COMPONENT_UNVERSIONED}${COMPONENT_PYTHON_VERSION_MAJOR}-py${COMPONENT_PYTHON_VERSION_MAJOR}${COMPONENT_PYTHON_VERSION_MINOR}") - list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME "${COMPONENT_UNVERSIONED}-py${COMPONENT_PYTHON_VERSION_MAJOR}${COMPONENT_PYTHON_VERSION_MINOR}") - # Gentoo - list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME "${COMPONENT_UNVERSIONED}-${COMPONENT_PYTHON_VERSION_MAJOR}.${COMPONENT_PYTHON_VERSION_MINOR}") - # RPMs - list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME "${COMPONENT_UNVERSIONED}-${COMPONENT_PYTHON_VERSION_MAJOR}${COMPONENT_PYTHON_VERSION_MINOR}") - endif() - if (COMPONENT_PYTHON_VERSION_MAJOR AND NOT COMPONENT_PYTHON_VERSION_MINOR) - # Boost < 1.67 - list(APPEND _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME "${COMPONENT_UNVERSIONED}${COMPONENT_PYTHON_VERSION_MAJOR}") - endif() - - # Consolidate and report component-specific hints. - if(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME) - list(REMOVE_DUPLICATES _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME) - _Boost_DEBUG_PRINT("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "Component-specific library search names for ${COMPONENT_NAME}: ${_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME}") - endif() - if(_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT) - list(REMOVE_DUPLICATES _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT) - _Boost_DEBUG_PRINT("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "Component-specific library search paths for ${COMPONENT}: ${_Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT}") - endif() - - # - # Find headers - # - _Boost_COMPONENT_HEADERS("${COMPONENT}" Boost_${UPPERCOMPONENT}_HEADER_NAME) - # Look for a standard boost header file. - if(Boost_${UPPERCOMPONENT}_HEADER_NAME) - if(EXISTS "${Boost_INCLUDE_DIR}/${Boost_${UPPERCOMPONENT}_HEADER_NAME}") - set(Boost_${UPPERCOMPONENT}_HEADER ON) - else() - set(Boost_${UPPERCOMPONENT}_HEADER OFF) - endif() - else() - set(Boost_${UPPERCOMPONENT}_HEADER ON) - message(WARNING "No header defined for ${COMPONENT}; skipping header check") - endif() - - # - # Find RELEASE libraries - # - unset(_boost_RELEASE_NAMES) - foreach(component IN LISTS _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME COMPONENT) - foreach(compiler IN LISTS _boost_COMPILER) - list(APPEND _boost_RELEASE_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG} ) - endforeach() - list(APPEND _boost_RELEASE_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_RELEASE_ABI_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component} ) - if(_boost_STATIC_RUNTIME_WORKAROUND) - set(_boost_RELEASE_STATIC_ABI_TAG "-s${_boost_RELEASE_ABI_TAG}") - foreach(compiler IN LISTS _boost_COMPILER) - list(APPEND _boost_RELEASE_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG} ) - endforeach() - list(APPEND _boost_RELEASE_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_RELEASE_STATIC_ABI_TAG} ) - endif() - endforeach() - if(Boost_THREADAPI AND ${COMPONENT} STREQUAL "thread") - _Boost_PREPEND_LIST_WITH_THREADAPI(_boost_RELEASE_NAMES ${_boost_RELEASE_NAMES}) - endif() - _Boost_DEBUG_PRINT("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "Searching for ${UPPERCOMPONENT}_LIBRARY_RELEASE: ${_boost_RELEASE_NAMES}") - - # if Boost_LIBRARY_DIR_RELEASE is not defined, - # but Boost_LIBRARY_DIR_DEBUG is, look there first for RELEASE libs - if(NOT Boost_LIBRARY_DIR_RELEASE AND Boost_LIBRARY_DIR_DEBUG) - list(INSERT _boost_LIBRARY_SEARCH_DIRS_RELEASE 0 ${Boost_LIBRARY_DIR_DEBUG}) - endif() - - # Avoid passing backslashes to _Boost_FIND_LIBRARY due to macro re-parsing. - string(REPLACE "\\" "/" _boost_LIBRARY_SEARCH_DIRS_tmp "${_boost_LIBRARY_SEARCH_DIRS_RELEASE}") - - if(Boost_USE_RELEASE_LIBS) - _Boost_FIND_LIBRARY(Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE RELEASE - NAMES ${_boost_RELEASE_NAMES} - HINTS ${_boost_LIBRARY_SEARCH_DIRS_tmp} - NAMES_PER_DIR - DOC "${_boost_docstring_release}" - ) - endif() - - # - # Find DEBUG libraries - # - unset(_boost_DEBUG_NAMES) - foreach(component IN LISTS _Boost_FIND_LIBRARY_HINTS_FOR_COMPONENT_NAME COMPONENT) - foreach(compiler IN LISTS _boost_COMPILER) - list(APPEND _boost_DEBUG_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG} ) - endforeach() - list(APPEND _boost_DEBUG_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_DEBUG_ABI_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component} ) - if(_boost_STATIC_RUNTIME_WORKAROUND) - set(_boost_DEBUG_STATIC_ABI_TAG "-s${_boost_DEBUG_ABI_TAG}") - foreach(compiler IN LISTS _boost_COMPILER) - list(APPEND _boost_DEBUG_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${compiler}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG} ) - endforeach() - list(APPEND _boost_DEBUG_NAMES - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG}-${Boost_LIB_VERSION} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG}${_boost_ARCHITECTURE_TAG} - ${Boost_LIB_PREFIX}${Boost_NAMESPACE}_${component}${_boost_MULTITHREADED}${_boost_DEBUG_STATIC_ABI_TAG} ) - endif() - endforeach() - if(Boost_THREADAPI AND ${COMPONENT} STREQUAL "thread") - _Boost_PREPEND_LIST_WITH_THREADAPI(_boost_DEBUG_NAMES ${_boost_DEBUG_NAMES}) - endif() - _Boost_DEBUG_PRINT("${CMAKE_CURRENT_LIST_FILE}" "${CMAKE_CURRENT_LIST_LINE}" - "Searching for ${UPPERCOMPONENT}_LIBRARY_DEBUG: ${_boost_DEBUG_NAMES}") - - # if Boost_LIBRARY_DIR_DEBUG is not defined, - # but Boost_LIBRARY_DIR_RELEASE is, look there first for DEBUG libs - if(NOT Boost_LIBRARY_DIR_DEBUG AND Boost_LIBRARY_DIR_RELEASE) - list(INSERT _boost_LIBRARY_SEARCH_DIRS_DEBUG 0 ${Boost_LIBRARY_DIR_RELEASE}) - endif() - - # Avoid passing backslashes to _Boost_FIND_LIBRARY due to macro re-parsing. - string(REPLACE "\\" "/" _boost_LIBRARY_SEARCH_DIRS_tmp "${_boost_LIBRARY_SEARCH_DIRS_DEBUG}") - - if(Boost_USE_DEBUG_LIBS) - _Boost_FIND_LIBRARY(Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG DEBUG - NAMES ${_boost_DEBUG_NAMES} - HINTS ${_boost_LIBRARY_SEARCH_DIRS_tmp} - NAMES_PER_DIR - DOC "${_boost_docstring_debug}" - ) - endif () - - if(Boost_REALPATH) - _Boost_SWAP_WITH_REALPATH(Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE "${_boost_docstring_release}") - _Boost_SWAP_WITH_REALPATH(Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG "${_boost_docstring_debug}" ) - endif() - - _Boost_ADJUST_LIB_VARS(${UPPERCOMPONENT}) - - # Check if component requires some compiler features - _Boost_COMPILER_FEATURES(${COMPONENT} _Boost_${UPPERCOMPONENT}_COMPILER_FEATURES) - -endforeach() - -# Restore the original find library ordering -if( Boost_USE_STATIC_LIBS ) - set(CMAKE_FIND_LIBRARY_SUFFIXES ${_boost_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES}) -endif() - -# ------------------------------------------------------------------------ -# End finding boost libraries -# ------------------------------------------------------------------------ - -set(Boost_INCLUDE_DIRS ${Boost_INCLUDE_DIR}) -set(Boost_LIBRARY_DIRS) -if(Boost_LIBRARY_DIR_RELEASE) - list(APPEND Boost_LIBRARY_DIRS ${Boost_LIBRARY_DIR_RELEASE}) -endif() -if(Boost_LIBRARY_DIR_DEBUG) - list(APPEND Boost_LIBRARY_DIRS ${Boost_LIBRARY_DIR_DEBUG}) -endif() -if(Boost_LIBRARY_DIRS) - list(REMOVE_DUPLICATES Boost_LIBRARY_DIRS) -endif() - -# ------------------------------------------------------------------------ -# Call FPHSA helper, see https://cmake.org/cmake/help/latest/module/FindPackageHandleStandardArgs.html -# ------------------------------------------------------------------------ - -# Define aliases as needed by the component handler in the FPHSA helper below -foreach(_comp IN LISTS Boost_FIND_COMPONENTS) - string(TOUPPER ${_comp} _uppercomp) - if(DEFINED Boost_${_uppercomp}_FOUND) - set(Boost_${_comp}_FOUND ${Boost_${_uppercomp}_FOUND}) - endif() -endforeach() - -find_package_handle_standard_args(Boost - REQUIRED_VARS Boost_INCLUDE_DIR - VERSION_VAR Boost_VERSION_STRING - HANDLE_COMPONENTS) - -if(Boost_FOUND) - if( NOT Boost_LIBRARY_DIRS ) - # Compatibility Code for backwards compatibility with CMake - # 2.4's FindBoost module. - - # Look for the boost library path. - # Note that the user may not have installed any libraries - # so it is quite possible the Boost_LIBRARY_DIRS may not exist. - set(_boost_LIB_DIR ${Boost_INCLUDE_DIR}) - - if("${_boost_LIB_DIR}" MATCHES "boost-[0-9]+") - get_filename_component(_boost_LIB_DIR ${_boost_LIB_DIR} PATH) - endif() - - if("${_boost_LIB_DIR}" MATCHES "/include$") - # Strip off the trailing "/include" in the path. - get_filename_component(_boost_LIB_DIR ${_boost_LIB_DIR} PATH) - endif() - - if(EXISTS "${_boost_LIB_DIR}/lib") - string(APPEND _boost_LIB_DIR /lib) - elseif(EXISTS "${_boost_LIB_DIR}/stage/lib") - string(APPEND _boost_LIB_DIR "/stage/lib") - else() - set(_boost_LIB_DIR "") - endif() - - if(_boost_LIB_DIR AND EXISTS "${_boost_LIB_DIR}") - set(Boost_LIBRARY_DIRS ${_boost_LIB_DIR}) - endif() - - endif() -else() - # Boost headers were not found so no components were found. - foreach(COMPONENT ${Boost_FIND_COMPONENTS}) - string(TOUPPER ${COMPONENT} UPPERCOMPONENT) - set(Boost_${UPPERCOMPONENT}_FOUND 0) - endforeach() -endif() - -# ------------------------------------------------------------------------ -# Add imported targets -# ------------------------------------------------------------------------ - -if(Boost_FOUND) - # The builtin CMake package in Boost 1.70+ introduces a new name - # for the header-only lib, let's provide the same UI in module mode - if(NOT TARGET Boost::headers) - add_library(Boost::headers INTERFACE IMPORTED) - if(Boost_INCLUDE_DIRS) - set_target_properties(Boost::headers PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${Boost_INCLUDE_DIRS}") - endif() - endif() - - # Define the old target name for header-only libraries for backwards - # compat. - if(NOT TARGET Boost::boost) - add_library(Boost::boost INTERFACE IMPORTED) - set_target_properties(Boost::boost - PROPERTIES INTERFACE_LINK_LIBRARIES Boost::headers) - endif() - - foreach(COMPONENT ${Boost_FIND_COMPONENTS}) - if(_Boost_IMPORTED_TARGETS AND NOT TARGET Boost::${COMPONENT}) - string(TOUPPER ${COMPONENT} UPPERCOMPONENT) - if(Boost_${UPPERCOMPONENT}_FOUND) - if(Boost_USE_STATIC_LIBS) - add_library(Boost::${COMPONENT} STATIC IMPORTED) - else() - # Even if Boost_USE_STATIC_LIBS is OFF, we might have static - # libraries as a result. - add_library(Boost::${COMPONENT} UNKNOWN IMPORTED) - endif() - if(Boost_INCLUDE_DIRS) - set_target_properties(Boost::${COMPONENT} PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${Boost_INCLUDE_DIRS}") - endif() - if(EXISTS "${Boost_${UPPERCOMPONENT}_LIBRARY}") - set_target_properties(Boost::${COMPONENT} PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" - IMPORTED_LOCATION "${Boost_${UPPERCOMPONENT}_LIBRARY}") - endif() - if(EXISTS "${Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE}") - set_property(TARGET Boost::${COMPONENT} APPEND PROPERTY - IMPORTED_CONFIGURATIONS RELEASE) - set_target_properties(Boost::${COMPONENT} PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" - IMPORTED_LOCATION_RELEASE "${Boost_${UPPERCOMPONENT}_LIBRARY_RELEASE}") - endif() - if(EXISTS "${Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG}") - set_property(TARGET Boost::${COMPONENT} APPEND PROPERTY - IMPORTED_CONFIGURATIONS DEBUG) - set_target_properties(Boost::${COMPONENT} PROPERTIES - IMPORTED_LINK_INTERFACE_LANGUAGES_DEBUG "CXX" - IMPORTED_LOCATION_DEBUG "${Boost_${UPPERCOMPONENT}_LIBRARY_DEBUG}") - endif() - if(_Boost_${UPPERCOMPONENT}_DEPENDENCIES) - unset(_Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES) - foreach(dep ${_Boost_${UPPERCOMPONENT}_DEPENDENCIES}) - list(APPEND _Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES Boost::${dep}) - endforeach() - if(COMPONENT STREQUAL "thread") - list(APPEND _Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES Threads::Threads) - endif() - set_target_properties(Boost::${COMPONENT} PROPERTIES - INTERFACE_LINK_LIBRARIES "${_Boost_${UPPERCOMPONENT}_TARGET_DEPENDENCIES}") - endif() - if(_Boost_${UPPERCOMPONENT}_COMPILER_FEATURES) - set_target_properties(Boost::${COMPONENT} PROPERTIES - INTERFACE_COMPILE_FEATURES "${_Boost_${UPPERCOMPONENT}_COMPILER_FEATURES}") - endif() - endif() - endif() - endforeach() -endif() - -# ------------------------------------------------------------------------ -# Finalize -# ------------------------------------------------------------------------ - -# Report Boost_LIBRARIES -set(Boost_LIBRARIES "") -foreach(_comp IN LISTS Boost_FIND_COMPONENTS) - string(TOUPPER ${_comp} _uppercomp) - if(Boost_${_uppercomp}_FOUND) - list(APPEND Boost_LIBRARIES ${Boost_${_uppercomp}_LIBRARY}) - if(_comp STREQUAL "thread") - list(APPEND Boost_LIBRARIES ${CMAKE_THREAD_LIBS_INIT}) - endif() - endif() -endforeach() - -# Configure display of cache entries in GUI. -foreach(v BOOSTROOT BOOST_ROOT ${_Boost_VARS_INC} ${_Boost_VARS_LIB}) - get_property(_type CACHE ${v} PROPERTY TYPE) - if(_type) - set_property(CACHE ${v} PROPERTY ADVANCED 1) - if("x${_type}" STREQUAL "xUNINITIALIZED") - if("x${v}" STREQUAL "xBoost_ADDITIONAL_VERSIONS") - set_property(CACHE ${v} PROPERTY TYPE STRING) - else() - set_property(CACHE ${v} PROPERTY TYPE PATH) - endif() - endif() - endif() -endforeach() - -# Record last used values of input variables so we can -# detect on the next run if the user changed them. -foreach(v - ${_Boost_VARS_INC} ${_Boost_VARS_LIB} - ${_Boost_VARS_DIR} ${_Boost_VARS_NAME} - ) - if(DEFINED ${v}) - set(_${v}_LAST "${${v}}" CACHE INTERNAL "Last used ${v} value.") - else() - unset(_${v}_LAST CACHE) - endif() -endforeach() - -# Maintain a persistent list of components requested anywhere since -# the last flush. -set(_Boost_COMPONENTS_SEARCHED "${_Boost_COMPONENTS_SEARCHED}") -list(APPEND _Boost_COMPONENTS_SEARCHED ${Boost_FIND_COMPONENTS}) -list(REMOVE_DUPLICATES _Boost_COMPONENTS_SEARCHED) -list(SORT _Boost_COMPONENTS_SEARCHED) -set(_Boost_COMPONENTS_SEARCHED "${_Boost_COMPONENTS_SEARCHED}" - CACHE INTERNAL "Components requested for this build tree.") - -# Restore project's policies -cmake_policy(POP) diff --git a/Builds/CMake/deps/Findjemalloc.cmake b/Builds/CMake/deps/Findjemalloc.cmake deleted file mode 100644 index 820ceeed4a1..00000000000 --- a/Builds/CMake/deps/Findjemalloc.cmake +++ /dev/null @@ -1,47 +0,0 @@ -# - Try to find jemalloc -# Once done this will define -# JEMALLOC_FOUND - System has jemalloc -# JEMALLOC_INCLUDE_DIRS - The jemalloc include directories -# JEMALLOC_LIBRARIES - The libraries needed to use jemalloc - -if(NOT USE_BUNDLED_JEMALLOC) - find_package(PkgConfig) - if (PKG_CONFIG_FOUND) - pkg_check_modules(PC_JEMALLOC QUIET jemalloc) - endif() -else() - set(PC_JEMALLOC_INCLUDEDIR) - set(PC_JEMALLOC_INCLUDE_DIRS) - set(PC_JEMALLOC_LIBDIR) - set(PC_JEMALLOC_LIBRARY_DIRS) - set(LIMIT_SEARCH NO_DEFAULT_PATH) -endif() - -set(JEMALLOC_DEFINITIONS ${PC_JEMALLOC_CFLAGS_OTHER}) - -find_path(JEMALLOC_INCLUDE_DIR jemalloc/jemalloc.h - PATHS ${PC_JEMALLOC_INCLUDEDIR} ${PC_JEMALLOC_INCLUDE_DIRS} - ${LIMIT_SEARCH}) - -# If we're asked to use static linkage, add libjemalloc.a as a preferred library name. -if(JEMALLOC_USE_STATIC) - list(APPEND JEMALLOC_NAMES - "${CMAKE_STATIC_LIBRARY_PREFIX}jemalloc${CMAKE_STATIC_LIBRARY_SUFFIX}") -endif() - -list(APPEND JEMALLOC_NAMES jemalloc) - -find_library(JEMALLOC_LIBRARY NAMES ${JEMALLOC_NAMES} - HINTS ${PC_JEMALLOC_LIBDIR} ${PC_JEMALLOC_LIBRARY_DIRS} - ${LIMIT_SEARCH}) - -set(JEMALLOC_LIBRARIES ${JEMALLOC_LIBRARY}) -set(JEMALLOC_INCLUDE_DIRS ${JEMALLOC_INCLUDE_DIR}) - -include(FindPackageHandleStandardArgs) -# handle the QUIETLY and REQUIRED arguments and set JEMALLOC_FOUND to TRUE -# if all listed variables are TRUE -find_package_handle_standard_args(JeMalloc DEFAULT_MSG - JEMALLOC_LIBRARY JEMALLOC_INCLUDE_DIR) - -mark_as_advanced(JEMALLOC_INCLUDE_DIR JEMALLOC_LIBRARY) diff --git a/Builds/CMake/deps/Findlibarchive_pc.cmake b/Builds/CMake/deps/Findlibarchive_pc.cmake deleted file mode 100644 index 8f248b28704..00000000000 --- a/Builds/CMake/deps/Findlibarchive_pc.cmake +++ /dev/null @@ -1,22 +0,0 @@ -find_package (PkgConfig REQUIRED) -pkg_search_module (libarchive_PC QUIET libarchive>=3.4.3) - -if(static) - set(LIBARCHIVE_LIB libarchive.a) -else() - set(LIBARCHIVE_LIB archive) -endif() - -find_library (archive - NAMES ${LIBARCHIVE_LIB} - HINTS - ${libarchive_PC_LIBDIR} - ${libarchive_PC_LIBRARY_DIRS} - NO_DEFAULT_PATH) - -find_path (LIBARCHIVE_INCLUDE_DIR - NAMES archive.h - HINTS - ${libarchive_PC_INCLUDEDIR} - ${libarchive_PC_INCLUDEDIRS} - NO_DEFAULT_PATH) diff --git a/Builds/CMake/deps/Findlz4.cmake b/Builds/CMake/deps/Findlz4.cmake deleted file mode 100644 index 835f5989dfa..00000000000 --- a/Builds/CMake/deps/Findlz4.cmake +++ /dev/null @@ -1,24 +0,0 @@ -find_package (PkgConfig) -if (PKG_CONFIG_FOUND) - pkg_search_module (lz4_PC QUIET liblz4>=1.9) -endif () - -if(static) - set(LZ4_LIB liblz4.a) -else() - set(LZ4_LIB lz4.so) -endif() - -find_library (lz4 - NAMES ${LZ4_LIB} - HINTS - ${lz4_PC_LIBDIR} - ${lz4_PC_LIBRARY_DIRS} - NO_DEFAULT_PATH) - -find_path (LZ4_INCLUDE_DIR - NAMES lz4.h - HINTS - ${lz4_PC_INCLUDEDIR} - ${lz4_PC_INCLUDEDIRS} - NO_DEFAULT_PATH) diff --git a/Builds/CMake/deps/Findsecp256k1.cmake b/Builds/CMake/deps/Findsecp256k1.cmake deleted file mode 100644 index 7be3d0272c8..00000000000 --- a/Builds/CMake/deps/Findsecp256k1.cmake +++ /dev/null @@ -1,24 +0,0 @@ -find_package (PkgConfig) -if (PKG_CONFIG_FOUND) - pkg_search_module (secp256k1_PC QUIET libsecp256k1) -endif () - -if(static) - set(SECP256K1_LIB libsecp256k1.a) -else() - set(SECP256K1_LIB secp256k1) -endif() - -find_library(secp256k1 - NAMES ${SECP256K1_LIB} - HINTS - ${secp256k1_PC_LIBDIR} - ${secp256k1_PC_LIBRARY_PATHS} - NO_DEFAULT_PATH) - -find_path (SECP256K1_INCLUDE_DIR - NAMES secp256k1.h - HINTS - ${secp256k1_PC_INCLUDEDIR} - ${secp256k1_PC_INCLUDEDIRS} - NO_DEFAULT_PATH) diff --git a/Builds/CMake/deps/Findsnappy.cmake b/Builds/CMake/deps/Findsnappy.cmake deleted file mode 100644 index ddf3cb280e0..00000000000 --- a/Builds/CMake/deps/Findsnappy.cmake +++ /dev/null @@ -1,24 +0,0 @@ -find_package (PkgConfig) -if (PKG_CONFIG_FOUND) - pkg_search_module (snappy_PC QUIET snappy>=1.1.7) -endif () - -if(static) - set(SNAPPY_LIB libsnappy.a) -else() - set(SNAPPY_LIB libsnappy.so) -endif() - -find_library (snappy - NAMES ${SNAPPY_LIB} - HINTS - ${snappy_PC_LIBDIR} - ${snappy_PC_LIBRARY_DIRS} - NO_DEFAULT_PATH) - -find_path (SNAPPY_INCLUDE_DIR - NAMES snappy.h - HINTS - ${snappy_PC_INCLUDEDIR} - ${snappy_PC_INCLUDEDIRS} - NO_DEFAULT_PATH) diff --git a/Builds/CMake/deps/Findsoci.cmake b/Builds/CMake/deps/Findsoci.cmake deleted file mode 100644 index 67b89276f6e..00000000000 --- a/Builds/CMake/deps/Findsoci.cmake +++ /dev/null @@ -1,17 +0,0 @@ -find_package (PkgConfig) -if (PKG_CONFIG_FOUND) - # TBD - currently no soci pkgconfig - #pkg_search_module (soci_PC QUIET libsoci_core>=3.2) -endif () - -if(static) - set(SOCI_LIB libsoci.a) -else() - set(SOCI_LIB libsoci_core.so) -endif() - -find_library (soci - NAMES ${SOCI_LIB}) - -find_path (SOCI_INCLUDE_DIR - NAMES soci/soci.h) diff --git a/Builds/CMake/deps/Findsqlite.cmake b/Builds/CMake/deps/Findsqlite.cmake deleted file mode 100644 index ef5c6befc1e..00000000000 --- a/Builds/CMake/deps/Findsqlite.cmake +++ /dev/null @@ -1,24 +0,0 @@ -find_package (PkgConfig) -if (PKG_CONFIG_FOUND) - pkg_search_module (sqlite_PC QUIET sqlite3>=3.26.0) -endif () - -if(static) - set(SQLITE_LIB libsqlite3.a) -else() - set(SQLITE_LIB sqlite3.so) -endif() - -find_library (sqlite3 - NAMES ${SQLITE_LIB} - HINTS - ${sqlite_PC_LIBDIR} - ${sqlite_PC_LIBRARY_DIRS} - NO_DEFAULT_PATH) - -find_path (SQLITE_INCLUDE_DIR - NAMES sqlite3.h - HINTS - ${sqlite_PC_INCLUDEDIR} - ${sqlite_PC_INCLUDEDIRS} - NO_DEFAULT_PATH) diff --git a/Builds/CMake/deps/Libarchive.cmake b/Builds/CMake/deps/Libarchive.cmake deleted file mode 100644 index 57b8d2e395b..00000000000 --- a/Builds/CMake/deps/Libarchive.cmake +++ /dev/null @@ -1,163 +0,0 @@ -#[===================================================================[ - NIH dep: libarchive -#]===================================================================] - -option (local_libarchive "use local build of libarchive." OFF) -add_library (archive_lib UNKNOWN IMPORTED GLOBAL) - -if (NOT local_libarchive) - if (NOT WIN32) - find_package(libarchive_pc REQUIRED) - endif () - if (archive) - message (STATUS "Found libarchive using pkg-config. Using ${archive}.") - set_target_properties (archive_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${archive} - IMPORTED_LOCATION_RELEASE - ${archive} - INTERFACE_INCLUDE_DIRECTORIES - ${LIBARCHIVE_INCLUDE_DIR}) - # pkg-config can return extra info for static lib linking - # this is probably needed/useful generally, but apply - # to APPLE for now (mostly for homebrew) - if (APPLE AND static AND libarchive_PC_STATIC_LIBRARIES) - message(STATUS "NOTE: libarchive static libs: ${libarchive_PC_STATIC_LIBRARIES}") - # also, APPLE seems to need iconv...maybe linux does too (TBD) - target_link_libraries (archive_lib - INTERFACE iconv ${libarchive_PC_STATIC_LIBRARIES}) - endif () - else () - ## now try searching using the minimal find module that cmake provides - find_package(LibArchive 3.4.3 QUIET) - if (LibArchive_FOUND) - if (static) - # find module doesn't find static libs currently, so we re-search - get_filename_component(_loc ${LibArchive_LIBRARY} DIRECTORY) - find_library(_la_static - NAMES libarchive.a archive_static.lib archive.lib - PATHS ${_loc}) - if (_la_static) - set (_la_lib ${_la_static}) - else () - message (WARNING "unable to find libarchive static lib - switching to local build") - set (local_libarchive ON CACHE BOOL "" FORCE) - endif () - else () - set (_la_lib ${LibArchive_LIBRARY}) - endif () - if (NOT local_libarchive) - message (STATUS "Found libarchive using module/config. Using ${_la_lib}.") - set_target_properties (archive_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${_la_lib} - IMPORTED_LOCATION_RELEASE - ${_la_lib} - INTERFACE_INCLUDE_DIRECTORIES - ${LibArchive_INCLUDE_DIRS}) - endif () - else () - set (local_libarchive ON CACHE BOOL "" FORCE) - endif () - endif () -endif() - -if (local_libarchive) - set (lib_post "") - if (MSVC) - set (lib_post "_static") - endif () - ExternalProject_Add (libarchive - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/libarchive/libarchive.git - GIT_TAG v3.4.3 - CMAKE_ARGS - # passing the compiler seems to be needed for windows CI, sadly - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DENABLE_LZ4=ON - -ULZ4_* - -DLZ4_INCLUDE_DIR=$,::> - # because we are building a static lib, this lz4 library doesn't - # actually matter since you can't generally link static libs to other static - # libs. The include files are needed, but the library itself is not (until - # we link our application, at which point we use the lz4 we built above). - # nonetheless, we need to provide a library to libarchive else it will - # NOT include lz4 support when configuring - -DLZ4_LIBRARY=$,$,$> - -DENABLE_WERROR=OFF - -DENABLE_TAR=OFF - -DENABLE_TAR_SHARED=OFF - -DENABLE_INSTALL=ON - -DENABLE_NETTLE=OFF - -DENABLE_OPENSSL=OFF - -DENABLE_LZO=OFF - -DENABLE_LZMA=OFF - -DENABLE_ZLIB=OFF - -DENABLE_BZip2=OFF - -DENABLE_LIBXML2=OFF - -DENABLE_EXPAT=OFF - -DENABLE_PCREPOSIX=OFF - -DENABLE_LibGCC=OFF - -DENABLE_CNG=OFF - -DENABLE_CPIO=OFF - -DENABLE_CPIO_SHARED=OFF - -DENABLE_CAT=OFF - -DENABLE_CAT_SHARED=OFF - -DENABLE_XATTR=OFF - -DENABLE_ACL=OFF - -DENABLE_ICONV=OFF - -DENABLE_TEST=OFF - -DENABLE_COVERAGE=OFF - $<$: - "-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP" - "-DCMAKE_C_FLAGS_DEBUG=-MTd" - "-DCMAKE_C_FLAGS_RELEASE=-MT" - > - LIST_SEPARATOR :: - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --target archive_static - --parallel ${ep_procs} - $<$: - COMMAND - ${CMAKE_COMMAND} -E copy - /libarchive/$/${ep_lib_prefix}archive${lib_post}$<$:_d>${ep_lib_suffix} - /libarchive - > - TEST_COMMAND "" - INSTALL_COMMAND "" - DEPENDS lz4_lib - BUILD_BYPRODUCTS - /libarchive/${ep_lib_prefix}archive${lib_post}${ep_lib_suffix} - /libarchive/${ep_lib_prefix}archive${lib_post}_d${ep_lib_suffix} - ) - ExternalProject_Get_Property (libarchive BINARY_DIR) - ExternalProject_Get_Property (libarchive SOURCE_DIR) - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (libarchive) - endif () - file (MAKE_DIRECTORY ${SOURCE_DIR}/libarchive) - set_target_properties (archive_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/libarchive/${ep_lib_prefix}archive${lib_post}_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/libarchive/${ep_lib_prefix}archive${lib_post}${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}/libarchive - INTERFACE_COMPILE_DEFINITIONS - LIBARCHIVE_STATIC) -endif() - -add_dependencies (archive_lib libarchive) -target_link_libraries (archive_lib INTERFACE lz4_lib) -target_link_libraries (ripple_libs INTERFACE archive_lib) -exclude_if_included (libarchive) -exclude_if_included (archive_lib) diff --git a/Builds/CMake/deps/Lz4.cmake b/Builds/CMake/deps/Lz4.cmake deleted file mode 100644 index 15d890692c5..00000000000 --- a/Builds/CMake/deps/Lz4.cmake +++ /dev/null @@ -1,79 +0,0 @@ -#[===================================================================[ - NIH dep: lz4 -#]===================================================================] - -add_library (lz4_lib STATIC IMPORTED GLOBAL) - -if (NOT WIN32) - find_package(lz4) -endif() - -if(lz4) - set_target_properties (lz4_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${lz4} - IMPORTED_LOCATION_RELEASE - ${lz4} - INTERFACE_INCLUDE_DIRECTORIES - ${LZ4_INCLUDE_DIR}) - -else() - ExternalProject_Add (lz4 - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/lz4/lz4.git - GIT_TAG v1.9.2 - SOURCE_SUBDIR contrib/cmake_unofficial - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DBUILD_STATIC_LIBS=ON - -DBUILD_SHARED_LIBS=OFF - $<$: - "-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP" - "-DCMAKE_C_FLAGS_DEBUG=-MTd" - "-DCMAKE_C_FLAGS_RELEASE=-MT" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --target lz4_static - --parallel ${ep_procs} - $<$: - COMMAND - ${CMAKE_COMMAND} -E copy - /$/${ep_lib_prefix}lz4$<$:_d>${ep_lib_suffix} - - > - TEST_COMMAND "" - INSTALL_COMMAND "" - BUILD_BYPRODUCTS - /${ep_lib_prefix}lz4${ep_lib_suffix} - /${ep_lib_prefix}lz4_d${ep_lib_suffix} - ) - ExternalProject_Get_Property (lz4 BINARY_DIR) - ExternalProject_Get_Property (lz4 SOURCE_DIR) - - file (MAKE_DIRECTORY ${SOURCE_DIR}/lz4) - set_target_properties (lz4_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/${ep_lib_prefix}lz4_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/${ep_lib_prefix}lz4${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}/lib) - - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (lz4) - endif () - add_dependencies (lz4_lib lz4) - target_link_libraries (ripple_libs INTERFACE lz4_lib) - exclude_if_included (lz4) -endif() - -exclude_if_included (lz4_lib) diff --git a/Builds/CMake/deps/Nudb.cmake b/Builds/CMake/deps/Nudb.cmake deleted file mode 100644 index 9698d3f061c..00000000000 --- a/Builds/CMake/deps/Nudb.cmake +++ /dev/null @@ -1,31 +0,0 @@ -#[===================================================================[ - NIH dep: nudb - - NuDB is header-only, thus is an INTERFACE lib in CMake. - TODO: move the library definition into NuDB repo and add - proper targets and export/install -#]===================================================================] - -if (is_root_project) # NuDB not needed in the case of xrpl_core inclusion build - add_library (nudb INTERFACE) - FetchContent_Declare( - nudb_src - GIT_REPOSITORY https://github.com/CPPAlliance/NuDB.git - GIT_TAG 2.0.5 - ) - FetchContent_GetProperties(nudb_src) - if(NOT nudb_src_POPULATED) - message (STATUS "Pausing to download NuDB...") - FetchContent_Populate(nudb_src) - endif() - - file(TO_CMAKE_PATH "${nudb_src_SOURCE_DIR}" nudb_src_SOURCE_DIR) - # specify as system includes so as to avoid warnings - target_include_directories (nudb SYSTEM INTERFACE ${nudb_src_SOURCE_DIR}/include) - target_link_libraries (nudb - INTERFACE - Boost::thread - Boost::system) - add_library (NIH::nudb ALIAS nudb) - target_link_libraries (ripple_libs INTERFACE NIH::nudb) -endif () diff --git a/Builds/CMake/deps/OpenSSL.cmake b/Builds/CMake/deps/OpenSSL.cmake deleted file mode 100644 index ad5117aacb0..00000000000 --- a/Builds/CMake/deps/OpenSSL.cmake +++ /dev/null @@ -1,48 +0,0 @@ -#[===================================================================[ - NIH dep: openssl -#]===================================================================] - -#[===============================================[ - OPENSSL_ROOT_DIR is the only variable that - FindOpenSSL honors for locating, so convert any - OPENSSL_ROOT vars to this -#]===============================================] -if (NOT DEFINED OPENSSL_ROOT_DIR) - if (DEFINED ENV{OPENSSL_ROOT}) - set (OPENSSL_ROOT_DIR $ENV{OPENSSL_ROOT}) - elseif (HOMEBREW) - execute_process (COMMAND ${HOMEBREW} --prefix openssl - OUTPUT_VARIABLE OPENSSL_ROOT_DIR - OUTPUT_STRIP_TRAILING_WHITESPACE) - endif () - file (TO_CMAKE_PATH "${OPENSSL_ROOT_DIR}" OPENSSL_ROOT_DIR) -endif () - -if (static) - set (OPENSSL_USE_STATIC_LIBS ON) -endif () -set (OPENSSL_MSVC_STATIC_RT ON) -find_package (OpenSSL 1.1.1 REQUIRED) -target_link_libraries (ripple_libs - INTERFACE - OpenSSL::SSL - OpenSSL::Crypto) -# disable SSLv2...this can also be done when building/configuring OpenSSL -set_target_properties(OpenSSL::SSL PROPERTIES - INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2) -#[=========================================================[ - https://gitlab.kitware.com/cmake/cmake/issues/16885 - depending on how openssl is built, it might depend - on zlib. In fact, the openssl find package should - figure this out for us, but it does not currently... - so let's add zlib ourselves to the lib list - TODO: investigate linking to static zlib for static - build option -#]=========================================================] -find_package (ZLIB) -set (has_zlib FALSE) -if (TARGET ZLIB::ZLIB) - set_target_properties(OpenSSL::Crypto PROPERTIES - INTERFACE_LINK_LIBRARIES ZLIB::ZLIB) - set (has_zlib TRUE) -endif () diff --git a/Builds/CMake/deps/Postgres.cmake b/Builds/CMake/deps/Postgres.cmake deleted file mode 100644 index bb94832a48b..00000000000 --- a/Builds/CMake/deps/Postgres.cmake +++ /dev/null @@ -1,70 +0,0 @@ -if(reporting) - find_package(PostgreSQL) - if(NOT PostgreSQL_FOUND) - message("find_package did not find postgres") - find_library(postgres NAMES pq libpq libpq-dev pq-dev postgresql-devel) - find_path(libpq-fe NAMES libpq-fe.h PATH_SUFFIXES postgresql pgsql include) - - if(NOT libpq-fe_FOUND OR NOT postgres_FOUND) - message("No system installed Postgres found. Will build") - add_library(postgres SHARED IMPORTED GLOBAL) - add_library(pgport SHARED IMPORTED GLOBAL) - add_library(pgcommon SHARED IMPORTED GLOBAL) - ExternalProject_Add(postgres_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/postgres/postgres.git - GIT_TAG REL_14_5 - CONFIGURE_COMMAND ./configure --without-readline > /dev/null - BUILD_COMMAND ${CMAKE_COMMAND} -E env --unset=MAKELEVEL make - UPDATE_COMMAND "" - BUILD_IN_SOURCE 1 - INSTALL_COMMAND "" - BUILD_BYPRODUCTS - /src/interfaces/libpq/${ep_lib_prefix}pq.a - /src/common/${ep_lib_prefix}pgcommon.a - /src/port/${ep_lib_prefix}pgport.a - LOG_BUILD TRUE - ) - ExternalProject_Get_Property (postgres_src SOURCE_DIR) - ExternalProject_Get_Property (postgres_src BINARY_DIR) - - set (postgres_src_SOURCE_DIR "${SOURCE_DIR}") - file (MAKE_DIRECTORY ${postgres_src_SOURCE_DIR}) - list(APPEND INCLUDE_DIRS - ${SOURCE_DIR}/src/include - ${SOURCE_DIR}/src/interfaces/libpq - ) - set_target_properties(postgres PROPERTIES - IMPORTED_LOCATION - ${BINARY_DIR}/src/interfaces/libpq/${ep_lib_prefix}pq.a - INTERFACE_INCLUDE_DIRECTORIES - "${INCLUDE_DIRS}" - ) - set_target_properties(pgcommon PROPERTIES - IMPORTED_LOCATION - ${BINARY_DIR}/src/common/${ep_lib_prefix}pgcommon.a - INTERFACE_INCLUDE_DIRECTORIES - "${INCLUDE_DIRS}" - ) - set_target_properties(pgport PROPERTIES - IMPORTED_LOCATION - ${BINARY_DIR}/src/port/${ep_lib_prefix}pgport.a - INTERFACE_INCLUDE_DIRECTORIES - "${INCLUDE_DIRS}" - ) - add_dependencies(postgres postgres_src) - add_dependencies(pgcommon postgres_src) - add_dependencies(pgport postgres_src) - file(TO_CMAKE_PATH "${postgres_src_SOURCE_DIR}" postgres_src_SOURCE_DIR) - target_link_libraries(ripple_libs INTERFACE postgres pgcommon pgport) - else() - message("Found system installed Postgres via find_libary") - target_include_directories(ripple_libs INTERFACE ${libpq-fe}) - target_link_libraries(ripple_libs INTERFACE ${postgres}) - endif() - else() - message("Found system installed Postgres via find_package") - target_include_directories(ripple_libs INTERFACE ${PostgreSQL_INCLUDE_DIRS}) - target_link_libraries(ripple_libs INTERFACE ${PostgreSQL_LIBRARIES}) - endif() -endif() diff --git a/Builds/CMake/deps/Protobuf.cmake b/Builds/CMake/deps/Protobuf.cmake index 35d5b9f0ff7..0706ae32243 100644 --- a/Builds/CMake/deps/Protobuf.cmake +++ b/Builds/CMake/deps/Protobuf.cmake @@ -1,155 +1,22 @@ -#[===================================================================[ - import protobuf (lib and compiler) and create a lib - from our proto message definitions. If the system protobuf - is not found, fallback on EP to download and build a version - from official source. -#]===================================================================] +find_package(Protobuf 3.8) -if (static) - set (Protobuf_USE_STATIC_LIBS ON) -endif () -find_package (Protobuf 3.8) -if (is_multiconfig) - set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARIES}) -else () - string(TOUPPER ${CMAKE_BUILD_TYPE} upper_cmake_build_type) - set(protobuf_protoc_lib ${Protobuf_PROTOC_LIBRARY_${upper_cmake_build_type}}) -endif () -if (local_protobuf OR NOT (Protobuf_FOUND AND Protobuf_PROTOC_EXECUTABLE AND protobuf_protoc_lib)) - include (GNUInstallDirs) - message (STATUS "using local protobuf build.") - set(protobuf_reqs Protobuf_PROTOC_EXECUTABLE protobuf_protoc_lib) - foreach(lib ${protobuf_reqs}) - if(NOT ${lib}) - message(STATUS "Couldn't find ${lib}") - endif() - endforeach() - if (WIN32) - # protobuf prepends lib even on windows - set (pbuf_lib_pre "lib") - else () - set (pbuf_lib_pre ${ep_lib_prefix}) - endif () - # for the external project build of protobuf, we currently ignore the - # static option and always build static libs here. This is consistent - # with our other EP builds. Dynamic libs in an EP would add complexity - # because we'd need to get them into the runtime path, and probably - # install them. - ExternalProject_Add (protobuf_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/protocolbuffers/protobuf.git - GIT_TAG v3.8.0 - SOURCE_SUBDIR cmake - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - -DCMAKE_INSTALL_PREFIX=/_installed_ - -Dprotobuf_BUILD_TESTS=OFF - -Dprotobuf_BUILD_EXAMPLES=OFF - -Dprotobuf_BUILD_PROTOC_BINARIES=ON - -Dprotobuf_MSVC_STATIC_RUNTIME=ON - -DBUILD_SHARED_LIBS=OFF - -Dprotobuf_BUILD_SHARED_LIBS=OFF - -DCMAKE_DEBUG_POSTFIX=_d - -Dprotobuf_DEBUG_POSTFIX=_d - -Dprotobuf_WITH_ZLIB=$,ON,OFF> - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - $<$:-DCMAKE_UNITY_BUILD=ON}> - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - $<$: - "-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - TEST_COMMAND "" - INSTALL_COMMAND - ${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $ --target install - BUILD_BYPRODUCTS - /_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf${ep_lib_suffix} - /_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf_d${ep_lib_suffix} - /_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc${ep_lib_suffix} - /_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc_d${ep_lib_suffix} - /_installed_/bin/protoc${CMAKE_EXECUTABLE_SUFFIX} - ) - ExternalProject_Get_Property (protobuf_src BINARY_DIR) - ExternalProject_Get_Property (protobuf_src SOURCE_DIR) - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (protobuf_src) - endif () - exclude_if_included (protobuf_src) +file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/proto_gen) +set(ccbd ${CMAKE_CURRENT_BINARY_DIR}) +set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_BINARY_DIR}/proto_gen) +protobuf_generate_cpp(PROTO_SRCS PROTO_HDRS src/ripple/proto/ripple.proto) +set(CMAKE_CURRENT_BINARY_DIR ${ccbd}) - if (NOT TARGET protobuf::libprotobuf) - add_library (protobuf::libprotobuf STATIC IMPORTED GLOBAL) - endif () - file (MAKE_DIRECTORY ${BINARY_DIR}/_installed_/include) - set_target_properties (protobuf::libprotobuf PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protobuf${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${BINARY_DIR}/_installed_/include) - add_dependencies (protobuf::libprotobuf protobuf_src) - exclude_if_included (protobuf::libprotobuf) - - if (NOT TARGET protobuf::libprotoc) - add_library (protobuf::libprotoc STATIC IMPORTED GLOBAL) - endif () - set_target_properties (protobuf::libprotoc PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/_installed_/${CMAKE_INSTALL_LIBDIR}/${pbuf_lib_pre}protoc${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${BINARY_DIR}/_installed_/include) - add_dependencies (protobuf::libprotoc protobuf_src) - exclude_if_included (protobuf::libprotoc) - - if (NOT TARGET protobuf::protoc) - add_executable (protobuf::protoc IMPORTED) - exclude_if_included (protobuf::protoc) - endif () - set_target_properties (protobuf::protoc PROPERTIES - IMPORTED_LOCATION "${BINARY_DIR}/_installed_/bin/protoc${CMAKE_EXECUTABLE_SUFFIX}") - add_dependencies (protobuf::protoc protobuf_src) -else () - if (NOT TARGET protobuf::protoc) - if (EXISTS "${Protobuf_PROTOC_EXECUTABLE}") - add_executable (protobuf::protoc IMPORTED) - set_target_properties (protobuf::protoc PROPERTIES - IMPORTED_LOCATION "${Protobuf_PROTOC_EXECUTABLE}") - else () - message (FATAL_ERROR "Protobuf import failed") - endif () - endif () -endif () - -file (MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/proto_gen) -set (save_CBD ${CMAKE_CURRENT_BINARY_DIR}) -set (CMAKE_CURRENT_BINARY_DIR ${CMAKE_BINARY_DIR}/proto_gen) -protobuf_generate_cpp ( - PROTO_SRCS - PROTO_HDRS - src/ripple/proto/ripple.proto) -set (CMAKE_CURRENT_BINARY_DIR ${save_CBD}) - -add_library (pbufs STATIC ${PROTO_SRCS} ${PROTO_HDRS}) - -target_include_directories (pbufs PRIVATE src) -target_include_directories (pbufs - SYSTEM PUBLIC ${CMAKE_BINARY_DIR}/proto_gen) -target_link_libraries (pbufs protobuf::libprotobuf) -target_compile_options (pbufs +add_library(pbufs STATIC ${PROTO_SRCS} ${PROTO_HDRS}) +target_include_directories(pbufs SYSTEM PUBLIC + ${CMAKE_BINARY_DIR}/proto_gen + ${CMAKE_BINARY_DIR}/proto_gen/src/ripple/proto +) +target_link_libraries(pbufs protobuf::libprotobuf) +target_compile_options(pbufs PUBLIC - $<$: + $<$: --system-header-prefix="google/protobuf" -Wno-deprecated-dynamic-exception-spec - >) -add_library (Ripple::pbufs ALIAS pbufs) -target_link_libraries (ripple_libs INTERFACE Ripple::pbufs) -exclude_if_included (pbufs) + > +) +add_library(Ripple::pbufs ALIAS pbufs) diff --git a/Builds/CMake/deps/Rocksdb.cmake b/Builds/CMake/deps/Rocksdb.cmake deleted file mode 100644 index 2c832c593f5..00000000000 --- a/Builds/CMake/deps/Rocksdb.cmake +++ /dev/null @@ -1,177 +0,0 @@ -#[===================================================================[ - NIH dep: rocksdb -#]===================================================================] - -add_library (rocksdb_lib UNKNOWN IMPORTED GLOBAL) -set_target_properties (rocksdb_lib - PROPERTIES INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1) - -option (local_rocksdb "use local build of rocksdb." OFF) -if (NOT local_rocksdb) - find_package (RocksDB 6.27 QUIET CONFIG) - if (TARGET RocksDB::rocksdb) - message (STATUS "Found RocksDB using config.") - get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION_DEBUG) - if (_rockslib_l) - set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_DEBUG ${_rockslib_l}) - endif () - get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION_RELEASE) - if (_rockslib_l) - set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_RELEASE ${_rockslib_l}) - endif () - get_target_property (_rockslib_l RocksDB::rocksdb IMPORTED_LOCATION) - if (_rockslib_l) - set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION ${_rockslib_l}) - endif () - get_target_property (_rockslib_i RocksDB::rocksdb INTERFACE_INCLUDE_DIRECTORIES) - if (_rockslib_i) - set_target_properties (rocksdb_lib PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${_rockslib_i}) - endif () - target_link_libraries (ripple_libs INTERFACE RocksDB::rocksdb) - else () - # using a find module with rocksdb is difficult because - # you have no idea how it was configured (transitive dependencies). - # the code below will generally find rocksdb using the module, but - # will then result in linker errors for static linkage since the - # transitive dependencies are unknown. force local build here for now, but leave the code as - # a placeholder for future investigation. - if (static) - set (local_rocksdb ON CACHE BOOL "" FORCE) - # TBD if there is some way to extract transitive deps..then: - #set (RocksDB_USE_STATIC ON) - else () - find_package (RocksDB 6.27 MODULE) - if (ROCKSDB_FOUND) - if (RocksDB_LIBRARY_DEBUG) - set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_DEBUG ${RocksDB_LIBRARY_DEBUG}) - endif () - set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION_RELEASE ${RocksDB_LIBRARIES}) - set_target_properties (rocksdb_lib PROPERTIES IMPORTED_LOCATION ${RocksDB_LIBRARIES}) - set_target_properties (rocksdb_lib PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${RocksDB_INCLUDE_DIRS}) - else () - set (local_rocksdb ON CACHE BOOL "" FORCE) - endif () - endif () - endif () -endif () - -if (local_rocksdb) - message (STATUS "Using local build of RocksDB.") - ExternalProject_Add (rocksdb - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/facebook/rocksdb.git - GIT_TAG v6.27.3 - PATCH_COMMAND - # only used by windows build - ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocks_thirdparty.inc - /thirdparty.inc - COMMAND - # fixup their build version file to keep the values - # from changing always - ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/rocksdb_build_version.cc.in - /util/build_version.cc.in - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - $<$:-DCMAKE_UNITY_BUILD=ON}> - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DBUILD_SHARED_LIBS=OFF - -DCMAKE_POSITION_INDEPENDENT_CODE=ON - -DWITH_JEMALLOC=$,ON,OFF> - -DWITH_SNAPPY=ON - -DWITH_LZ4=ON - -DWITH_ZLIB=OFF - -DUSE_RTTI=ON - -DWITH_ZSTD=OFF - -DWITH_GFLAGS=OFF - -DWITH_BZ2=OFF - -ULZ4_* - -Ulz4_* - -Dlz4_INCLUDE_DIRS=$,::> - -Dlz4_LIBRARIES=$,$,$> - -Dlz4_FOUND=ON - -USNAPPY_* - -Usnappy_* - -USnappy_* - -Dsnappy_INCLUDE_DIRS=$,::> - -Dsnappy_LIBRARIES=$,$,$> - -Dsnappy_FOUND=ON - -DSnappy_INCLUDE_DIRS=$,::> - -DSnappy_LIBRARIES=$,$,$> - -DSnappy_FOUND=ON - -DWITH_MD_LIBRARY=OFF - -DWITH_RUNTIME_DEBUG=$,ON,OFF> - -DFAIL_ON_WARNINGS=OFF - -DWITH_ASAN=OFF - -DWITH_TSAN=OFF - -DWITH_UBSAN=OFF - -DWITH_NUMA=OFF - -DWITH_TBB=OFF - -DWITH_WINDOWS_UTF8_FILENAMES=OFF - -DWITH_XPRESS=OFF - -DPORTABLE=ON - -DFORCE_SSE42=OFF - -DDISABLE_STALL_NOTIF=OFF - -DOPTDBG=ON - -DROCKSDB_LITE=OFF - -DWITH_FALLOCATE=ON - -DWITH_LIBRADOS=OFF - -DWITH_JNI=OFF - -DROCKSDB_INSTALL_ON_WINDOWS=OFF - -DWITH_TESTS=OFF - -DWITH_TOOLS=OFF - $<$: - "-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -MP /DNDEBUG" - > - $<$>: - "-DCMAKE_CXX_FLAGS=-DNDEBUG" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - $<$: - COMMAND - ${CMAKE_COMMAND} -E copy - /$/${ep_lib_prefix}rocksdb$<$:_d>${ep_lib_suffix} - - > - LIST_SEPARATOR :: - TEST_COMMAND "" - INSTALL_COMMAND "" - DEPENDS snappy_lib lz4_lib - BUILD_BYPRODUCTS - /${ep_lib_prefix}rocksdb${ep_lib_suffix} - /${ep_lib_prefix}rocksdb_d${ep_lib_suffix} - ) - ExternalProject_Get_Property (rocksdb BINARY_DIR) - ExternalProject_Get_Property (rocksdb SOURCE_DIR) - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (rocksdb) - endif () - file (MAKE_DIRECTORY ${SOURCE_DIR}/include) - set_target_properties (rocksdb_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/${ep_lib_prefix}rocksdb_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/${ep_lib_prefix}rocksdb${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}/include) - add_dependencies (rocksdb_lib rocksdb) - exclude_if_included (rocksdb) -endif () - -target_link_libraries (rocksdb_lib - INTERFACE - snappy_lib - lz4_lib - $<$:rpcrt4>) -exclude_if_included (rocksdb_lib) -target_link_libraries (ripple_libs INTERFACE rocksdb_lib) diff --git a/Builds/CMake/deps/Secp256k1.cmake b/Builds/CMake/deps/Secp256k1.cmake deleted file mode 100644 index 3197315626b..00000000000 --- a/Builds/CMake/deps/Secp256k1.cmake +++ /dev/null @@ -1,58 +0,0 @@ -#[===================================================================[ - NIH dep: secp256k1 -#]===================================================================] - -add_library (secp256k1_lib STATIC IMPORTED GLOBAL) - -if (NOT WIN32) - find_package(secp256k1) -endif() - -if(secp256k1) - set_target_properties (secp256k1_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${secp256k1} - IMPORTED_LOCATION_RELEASE - ${secp256k1} - INTERFACE_INCLUDE_DIRECTORIES - ${SECP256K1_INCLUDE_DIR}) - - add_library (secp256k1 ALIAS secp256k1_lib) - add_library (NIH::secp256k1 ALIAS secp256k1_lib) - -else() - set(INSTALL_SECP256K1 true) - - add_library (secp256k1 STATIC - src/secp256k1/src/secp256k1.c) - target_compile_definitions (secp256k1 - PRIVATE - USE_NUM_NONE - USE_FIELD_10X26 - USE_FIELD_INV_BUILTIN - USE_SCALAR_8X32 - USE_SCALAR_INV_BUILTIN) - target_include_directories (secp256k1 - PUBLIC - $ - $ - PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src/secp256k1) - target_compile_options (secp256k1 - PRIVATE - $<$:-wd4319> - $<$>: - -Wno-deprecated-declarations - -Wno-unused-function - > - $<$:-Wno-nonnull-compare>) - target_link_libraries (ripple_libs INTERFACE NIH::secp256k1) -#[===========================[ - headers installation -#]===========================] - install ( - FILES - src/secp256k1/include/secp256k1.h - DESTINATION include/secp256k1/include) - - add_library (NIH::secp256k1 ALIAS secp256k1) -endif() diff --git a/Builds/CMake/deps/Snappy.cmake b/Builds/CMake/deps/Snappy.cmake deleted file mode 100644 index 331ac2fbe95..00000000000 --- a/Builds/CMake/deps/Snappy.cmake +++ /dev/null @@ -1,77 +0,0 @@ -#[===================================================================[ - NIH dep: snappy -#]===================================================================] - -add_library (snappy_lib STATIC IMPORTED GLOBAL) - -if (NOT WIN32) - find_package(snappy) -endif() - -if(snappy) - set_target_properties (snappy_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${snappy} - IMPORTED_LOCATION_RELEASE - ${snappy} - INTERFACE_INCLUDE_DIRECTORIES - ${SNAPPY_INCLUDE_DIR}) - -else() - ExternalProject_Add (snappy - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/google/snappy.git - GIT_TAG 1.1.7 - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DBUILD_SHARED_LIBS=OFF - -DCMAKE_POSITION_INDEPENDENT_CODE=ON - -DSNAPPY_BUILD_TESTS=OFF - $<$: - "-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP" - "-DCMAKE_CXX_FLAGS_DEBUG=-MTd" - "-DCMAKE_CXX_FLAGS_RELEASE=-MT" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - $<$: - COMMAND - ${CMAKE_COMMAND} -E copy - /$/${ep_lib_prefix}snappy$<$:_d>${ep_lib_suffix} - - > - TEST_COMMAND "" - INSTALL_COMMAND - ${CMAKE_COMMAND} -E copy_if_different /config.h /snappy-stubs-public.h - BUILD_BYPRODUCTS - /${ep_lib_prefix}snappy${ep_lib_suffix} - /${ep_lib_prefix}snappy_d${ep_lib_suffix} - ) - ExternalProject_Get_Property (snappy BINARY_DIR) - ExternalProject_Get_Property (snappy SOURCE_DIR) - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (snappy) - endif () - file (MAKE_DIRECTORY ${SOURCE_DIR}/snappy) - set_target_properties (snappy_lib PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/${ep_lib_prefix}snappy_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/${ep_lib_prefix}snappy${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}) -endif() - -add_dependencies (snappy_lib snappy) -target_link_libraries (ripple_libs INTERFACE snappy_lib) -exclude_if_included (snappy) -exclude_if_included (snappy_lib) diff --git a/Builds/CMake/deps/Soci.cmake b/Builds/CMake/deps/Soci.cmake deleted file mode 100644 index d165d6e1f84..00000000000 --- a/Builds/CMake/deps/Soci.cmake +++ /dev/null @@ -1,165 +0,0 @@ -#[===================================================================[ - NIH dep: soci -#]===================================================================] - -foreach (_comp core empty sqlite3) - add_library ("soci_${_comp}" STATIC IMPORTED GLOBAL) -endforeach () - -if (NOT WIN32) - find_package(soci) -endif() - -if (soci) - foreach (_comp core empty sqlite3) - set_target_properties ("soci_${_comp}" PROPERTIES - IMPORTED_LOCATION_DEBUG - ${soci} - IMPORTED_LOCATION_RELEASE - ${soci} - INTERFACE_INCLUDE_DIRECTORIES - ${SOCI_INCLUDE_DIR}) - endforeach () - -else() - set (soci_lib_pre ${ep_lib_prefix}) - set (soci_lib_post "") - if (WIN32) - # for some reason soci on windows still prepends lib (non-standard) - set (soci_lib_pre lib) - # this version in the name might change if/when we change versions of soci - set (soci_lib_post "_4_0") - endif () - get_target_property (_boost_incs Boost::date_time INTERFACE_INCLUDE_DIRECTORIES) - get_target_property (_boost_dt Boost::date_time IMPORTED_LOCATION) - if (NOT _boost_dt) - get_target_property (_boost_dt Boost::date_time IMPORTED_LOCATION_RELEASE) - endif () - if (NOT _boost_dt) - get_target_property (_boost_dt Boost::date_time IMPORTED_LOCATION_DEBUG) - endif () - - ExternalProject_Add (soci - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/SOCI/soci.git - GIT_TAG 04e1870294918d20761736743bb6136314c42dd5 - # We had an issue with soci integer range checking for boost::optional - # and needed to remove the exception that SOCI throws in this case. - # This is *probably* a bug in SOCI, but has never been investigated more - # nor reported to the maintainers. - # This cmake script comments out the lines in question. - # This patch process is likely fragile and should be reviewed carefully - # whenever we update the GIT_TAG above. - PATCH_COMMAND - ${CMAKE_COMMAND} -D RIPPLED_SOURCE=${CMAKE_CURRENT_SOURCE_DIR} - -P ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/soci_patch.cmake - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - $<$:-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}> - $<$:-DVCPKG_TARGET_TRIPLET=${VCPKG_TARGET_TRIPLET}> - $<$:-DCMAKE_UNITY_BUILD=ON}> - -DCMAKE_PREFIX_PATH=${CMAKE_BINARY_DIR}/sqlite3 - -DCMAKE_MODULE_PATH=${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake - -DCMAKE_INCLUDE_PATH=$,::> - -DCMAKE_LIBRARY_PATH=${sqlite_BINARY_DIR} - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DSOCI_CXX_C11=ON - -DSOCI_STATIC=ON - -DSOCI_LIBDIR=lib - -DSOCI_SHARED=OFF - -DSOCI_TESTS=OFF - # hacks to workaround the fact that soci doesn't currently use - # boost imported targets in its cmake. If they switch to - # proper imported targets, this next line can be removed - # (as well as the get_property above that sets _boost_incs) - -DBoost_INCLUDE_DIRS=$ - -DBoost_INCLUDE_DIR=$ - -DBOOST_ROOT=${BOOST_ROOT} - -DWITH_BOOST=ON - -DBoost_FOUND=ON - -DBoost_NO_BOOST_CMAKE=ON - -DBoost_DATE_TIME_FOUND=ON - -DSOCI_HAVE_BOOST=ON - -DSOCI_HAVE_BOOST_DATE_TIME=ON - -DBoost_DATE_TIME_LIBRARY=${_boost_dt} - -DSOCI_DB2=OFF - -DSOCI_FIREBIRD=OFF - -DSOCI_MYSQL=OFF - -DSOCI_ODBC=OFF - -DSOCI_ORACLE=OFF - -DSOCI_POSTGRESQL=OFF - -DSOCI_SQLITE3=ON - -DSQLITE3_INCLUDE_DIR=$,::> - -DSQLITE3_LIBRARY=$,$,$> - $<$:-DCMAKE_FIND_FRAMEWORK=LAST> - $<$: - "-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP" - "-DCMAKE_CXX_FLAGS_DEBUG=-MTd" - "-DCMAKE_CXX_FLAGS_RELEASE=-MT" - > - $<$>: - "-DCMAKE_CXX_FLAGS=-Wno-deprecated-declarations" - > - # SEE: https://github.com/SOCI/soci/issues/640 - $<$,$>: - "-DCMAKE_CXX_FLAGS=-Wno-deprecated-declarations -Wno-error=format-overflow -Wno-format-overflow -Wno-error=format-truncation" - > - LIST_SEPARATOR :: - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - $<$: - COMMAND - ${CMAKE_COMMAND} -E copy - /lib/$/${soci_lib_pre}soci_core${soci_lib_post}$<$:_d>${ep_lib_suffix} - /lib/$/${soci_lib_pre}soci_empty${soci_lib_post}$<$:_d>${ep_lib_suffix} - /lib/$/${soci_lib_pre}soci_sqlite3${soci_lib_post}$<$:_d>${ep_lib_suffix} - /lib - > - TEST_COMMAND "" - INSTALL_COMMAND "" - DEPENDS sqlite - BUILD_BYPRODUCTS - /lib/${soci_lib_pre}soci_core${soci_lib_post}${ep_lib_suffix} - /lib/${soci_lib_pre}soci_core${soci_lib_post}_d${ep_lib_suffix} - /lib/${soci_lib_pre}soci_empty${soci_lib_post}${ep_lib_suffix} - /lib/${soci_lib_pre}soci_empty${soci_lib_post}_d${ep_lib_suffix} - /lib/${soci_lib_pre}soci_sqlite3${soci_lib_post}${ep_lib_suffix} - /lib/${soci_lib_pre}soci_sqlite3${soci_lib_post}_d${ep_lib_suffix} - ) - ExternalProject_Get_Property (soci BINARY_DIR) - ExternalProject_Get_Property (soci SOURCE_DIR) - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (soci) - endif () - file (MAKE_DIRECTORY ${SOURCE_DIR}/include) - file (MAKE_DIRECTORY ${BINARY_DIR}/include) - foreach (_comp core empty sqlite3) - set_target_properties ("soci_${_comp}" PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/lib/${soci_lib_pre}soci_${_comp}${soci_lib_post}_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/lib/${soci_lib_pre}soci_${_comp}${soci_lib_post}${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - "${SOURCE_DIR}/include;${BINARY_DIR}/include") - add_dependencies ("soci_${_comp}" soci) # something has to depend on the ExternalProject to trigger it - target_link_libraries (ripple_libs INTERFACE "soci_${_comp}") - if (NOT _comp STREQUAL "core") - target_link_libraries ("soci_${_comp}" INTERFACE soci_core) - endif () - endforeach () -endif() - -foreach (_comp core empty sqlite3) - exclude_if_included ("soci_${_comp}") -endforeach () - - -exclude_if_included (soci) diff --git a/Builds/CMake/deps/Sqlite.cmake b/Builds/CMake/deps/Sqlite.cmake deleted file mode 100644 index 7b34c1121f2..00000000000 --- a/Builds/CMake/deps/Sqlite.cmake +++ /dev/null @@ -1,93 +0,0 @@ -#[===================================================================[ - NIH dep: sqlite -#]===================================================================] - -add_library (sqlite STATIC IMPORTED GLOBAL) - -if (NOT WIN32) - find_package(sqlite) -endif() - - -if(sqlite3) - set_target_properties (sqlite PROPERTIES - IMPORTED_LOCATION_DEBUG - ${sqlite3} - IMPORTED_LOCATION_RELEASE - ${sqlite3} - INTERFACE_INCLUDE_DIRECTORIES - ${SQLITE_INCLUDE_DIR}) - -else() - ExternalProject_Add (sqlite3 - PREFIX ${nih_cache_path} - # sqlite doesn't use git, but it provides versioned tarballs - URL https://www.sqlite.org/2018/sqlite-amalgamation-3260000.zip - http://www.sqlite.org/2018/sqlite-amalgamation-3260000.zip - https://www2.sqlite.org/2018/sqlite-amalgamation-3260000.zip - http://www2.sqlite.org/2018/sqlite-amalgamation-3260000.zip - # ^^^ version is apparent in the URL: 3260000 => 3.26.0 - URL_HASH SHA256=de5dcab133aa339a4cf9e97c40aa6062570086d6085d8f9ad7bc6ddf8a52096e - # Don't need to worry about MITM attacks too much because the download - # is checked against a strong hash - TLS_VERIFY false - # we wrote a very simple CMake file to build sqlite - # so that's what we copy here so that we can build with - # CMake. sqlite doesn't generally provided a build system - # for the single amalgamation source file. - PATCH_COMMAND - ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/CMake_sqlite3.txt - /CMakeLists.txt - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - $<$: - "-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP" - "-DCMAKE_C_FLAGS_DEBUG=-MTd" - "-DCMAKE_C_FLAGS_RELEASE=-MT" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - $<$: - COMMAND - ${CMAKE_COMMAND} -E copy - /$/${ep_lib_prefix}sqlite3$<$:_d>${ep_lib_suffix} - - > - TEST_COMMAND "" - INSTALL_COMMAND "" - BUILD_BYPRODUCTS - /${ep_lib_prefix}sqlite3${ep_lib_suffix} - /${ep_lib_prefix}sqlite3_d${ep_lib_suffix} - ) - ExternalProject_Get_Property (sqlite3 BINARY_DIR) - ExternalProject_Get_Property (sqlite3 SOURCE_DIR) - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (sqlite3) - endif () - - set_target_properties (sqlite PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/${ep_lib_prefix}sqlite3_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/${ep_lib_prefix}sqlite3${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}) - - add_dependencies (sqlite sqlite3) - exclude_if_included (sqlite3) -endif() - -target_link_libraries (sqlite INTERFACE $<$>:dl>) -target_link_libraries (ripple_libs INTERFACE sqlite) -exclude_if_included (sqlite) -set(sqlite_BINARY_DIR ${BINARY_DIR}) diff --git a/Builds/CMake/deps/cassandra.cmake b/Builds/CMake/deps/cassandra.cmake deleted file mode 100644 index 4563a34137e..00000000000 --- a/Builds/CMake/deps/cassandra.cmake +++ /dev/null @@ -1,167 +0,0 @@ -if(reporting) - find_library(cassandra NAMES cassandra) - if(NOT cassandra) - - message("System installed Cassandra cpp driver not found. Will build") - - find_library(zlib NAMES zlib1g-dev zlib-devel zlib z) - if(NOT zlib) - message("zlib not found. will build") - add_library(zlib STATIC IMPORTED GLOBAL) - ExternalProject_Add(zlib_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/madler/zlib.git - GIT_TAG v1.2.12 - INSTALL_COMMAND "" - BUILD_BYPRODUCTS /${ep_lib_prefix}z.a - LOG_BUILD TRUE - LOG_CONFIGURE TRUE - ) - - - ExternalProject_Get_Property (zlib_src SOURCE_DIR) - ExternalProject_Get_Property (zlib_src BINARY_DIR) - set (zlib_src_SOURCE_DIR "${SOURCE_DIR}") - file (MAKE_DIRECTORY ${zlib_src_SOURCE_DIR}/include) - - set_target_properties (zlib PROPERTIES - IMPORTED_LOCATION - ${BINARY_DIR}/${ep_lib_prefix}z.a - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}/include) - add_dependencies(zlib zlib_src) - - file(TO_CMAKE_PATH "${zlib_src_SOURCE_DIR}" zlib_src_SOURCE_DIR) - endif() - - - - - find_library(krb5 NAMES krb5-dev libkrb5-dev) - - if(NOT krb5) - message("krb5 not found. will build") - add_library(krb5 STATIC IMPORTED GLOBAL) - ExternalProject_Add(krb5_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/krb5/krb5.git - GIT_TAG krb5-1.20-final - UPDATE_COMMAND "" - CONFIGURE_COMMAND autoreconf src && CFLAGS=-fcommon ./src/configure --enable-static --disable-shared > /dev/null - BUILD_IN_SOURCE 1 - BUILD_COMMAND make - INSTALL_COMMAND "" - BUILD_BYPRODUCTS /lib/${ep_lib_prefix}krb5.a - LOG_BUILD TRUE - ) - - ExternalProject_Get_Property (krb5_src SOURCE_DIR) - ExternalProject_Get_Property (krb5_src BINARY_DIR) - set (krb5_src_SOURCE_DIR "${SOURCE_DIR}") - file (MAKE_DIRECTORY ${krb5_src_SOURCE_DIR}/include) - - set_target_properties (krb5 PROPERTIES - IMPORTED_LOCATION - ${BINARY_DIR}/lib/${ep_lib_prefix}krb5.a - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}/include) - add_dependencies(krb5 krb5_src) - - file(TO_CMAKE_PATH "${krb5_src_SOURCE_DIR}" krb5_src_SOURCE_DIR) - endif() - - - find_library(libuv1 NAMES uv1 libuv1 liubuv1-dev libuv1:amd64) - - - if(NOT libuv1) - message("libuv1 not found, will build") - add_library(libuv1 STATIC IMPORTED GLOBAL) - ExternalProject_Add(libuv_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/libuv/libuv.git - GIT_TAG v1.44.2 - INSTALL_COMMAND "" - BUILD_BYPRODUCTS /${ep_lib_prefix}uv_a.a - LOG_BUILD TRUE - LOG_CONFIGURE TRUE - ) - - ExternalProject_Get_Property (libuv_src SOURCE_DIR) - ExternalProject_Get_Property (libuv_src BINARY_DIR) - set (libuv_src_SOURCE_DIR "${SOURCE_DIR}") - file (MAKE_DIRECTORY ${libuv_src_SOURCE_DIR}/include) - - set_target_properties (libuv1 PROPERTIES - IMPORTED_LOCATION - ${BINARY_DIR}/${ep_lib_prefix}uv_a.a - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}/include) - add_dependencies(libuv1 libuv_src) - - file(TO_CMAKE_PATH "${libuv_src_SOURCE_DIR}" libuv_src_SOURCE_DIR) - endif() - - add_library (cassandra STATIC IMPORTED GLOBAL) - ExternalProject_Add(cassandra_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/datastax/cpp-driver.git - GIT_TAG 2.16.2 - CMAKE_ARGS - -DLIBUV_ROOT_DIR=${BINARY_DIR} - -DLIBUV_LIBARY=${BINARY_DIR}/libuv_a.a - -DLIBUV_INCLUDE_DIR=${SOURCE_DIR}/include - -DCASS_BUILD_STATIC=ON - -DCASS_BUILD_SHARED=OFF - -DOPENSSL_ROOT_DIR=/opt/local/openssl - INSTALL_COMMAND "" - BUILD_BYPRODUCTS /${ep_lib_prefix}cassandra_static.a - LOG_BUILD TRUE - LOG_CONFIGURE TRUE - ) - - ExternalProject_Get_Property (cassandra_src SOURCE_DIR) - ExternalProject_Get_Property (cassandra_src BINARY_DIR) - set (cassandra_src_SOURCE_DIR "${SOURCE_DIR}") - file (MAKE_DIRECTORY ${cassandra_src_SOURCE_DIR}/include) - - set_target_properties (cassandra PROPERTIES - IMPORTED_LOCATION - ${BINARY_DIR}/${ep_lib_prefix}cassandra_static.a - INTERFACE_INCLUDE_DIRECTORIES - ${SOURCE_DIR}/include) - add_dependencies(cassandra cassandra_src) - - if(NOT libuv1) - ExternalProject_Add_StepDependencies(cassandra_src build libuv1) - target_link_libraries(cassandra INTERFACE libuv1) - else() - target_link_libraries(cassandra INTERFACE ${libuv1}) - endif() - if(NOT krb5) - - ExternalProject_Add_StepDependencies(cassandra_src build krb5) - target_link_libraries(cassandra INTERFACE krb5) - else() - target_link_libraries(cassandra INTERFACE ${krb5}) - endif() - - if(NOT zlib) - ExternalProject_Add_StepDependencies(cassandra_src build zlib) - target_link_libraries(cassandra INTERFACE zlib) - else() - target_link_libraries(cassandra INTERFACE ${zlib}) - endif() - - file(TO_CMAKE_PATH "${cassandra_src_SOURCE_DIR}" cassandra_src_SOURCE_DIR) - target_link_libraries(ripple_libs INTERFACE cassandra) - else() - message("Found system installed cassandra cpp driver") - - find_path(cassandra_includes NAMES cassandra.h REQUIRED) - target_link_libraries (ripple_libs INTERFACE ${cassandra}) - target_include_directories(ripple_libs INTERFACE ${cassandra_includes}) - endif() - - exclude_if_included (cassandra) -endif() diff --git a/Builds/CMake/deps/date.cmake b/Builds/CMake/deps/date.cmake deleted file mode 100644 index b9155c26475..00000000000 --- a/Builds/CMake/deps/date.cmake +++ /dev/null @@ -1,18 +0,0 @@ -#[===================================================================[ - NIH dep: date - - the main library is header-only, thus is an INTERFACE lib in CMake. - - NOTE: this has been accepted into c++20 so can likely be replaced - when we update to that standard -#]===================================================================] - -find_package (date QUIET) -if (NOT TARGET date::date) - FetchContent_Declare( - hh_date_src - GIT_REPOSITORY https://github.com/HowardHinnant/date.git - GIT_TAG fc4cf092f9674f2670fb9177edcdee870399b829 - ) - FetchContent_MakeAvailable(hh_date_src) -endif () diff --git a/Builds/CMake/deps/gRPC.cmake b/Builds/CMake/deps/gRPC.cmake index 8dd09417563..44185b3a248 100644 --- a/Builds/CMake/deps/gRPC.cmake +++ b/Builds/CMake/deps/gRPC.cmake @@ -1,319 +1,15 @@ - -# currently linking to unsecure versions...if we switch, we'll -# need to add ssl as a link dependency to the grpc targets -option (use_secure_grpc "use TLS version of grpc libs." OFF) -if (use_secure_grpc) - set (grpc_suffix "") -else () - set (grpc_suffix "_unsecure") -endif () - -find_package (gRPC 1.23 CONFIG QUIET) -if (TARGET gRPC::gpr AND NOT local_grpc) - get_target_property (_grpc_l gRPC::gpr IMPORTED_LOCATION_DEBUG) - if (NOT _grpc_l) - get_target_property (_grpc_l gRPC::gpr IMPORTED_LOCATION_RELEASE) - endif () - if (NOT _grpc_l) - get_target_property (_grpc_l gRPC::gpr IMPORTED_LOCATION) - endif () - message (STATUS "Found cmake config for gRPC. Using ${_grpc_l}.") -else () - find_package (PkgConfig QUIET) - if (PKG_CONFIG_FOUND) - pkg_check_modules (grpc QUIET "grpc${grpc_suffix}>=1.25" "grpc++${grpc_suffix}" gpr) - endif () - - if (grpc_FOUND) - message (STATUS "Found gRPC using pkg-config. Using ${grpc_gpr_PREFIX}.") - endif () - - add_executable (gRPC::grpc_cpp_plugin IMPORTED) - exclude_if_included (gRPC::grpc_cpp_plugin) - - if (grpc_FOUND AND NOT local_grpc) - # use installed grpc (via pkg-config) - macro (add_imported_grpc libname_) - if (static) - set (_search "${CMAKE_STATIC_LIBRARY_PREFIX}${libname_}${CMAKE_STATIC_LIBRARY_SUFFIX}") - else () - set (_search "${CMAKE_SHARED_LIBRARY_PREFIX}${libname_}${CMAKE_SHARED_LIBRARY_SUFFIX}") - endif() - find_library(_found_${libname_} - NAMES ${_search} - HINTS ${grpc_LIBRARY_DIRS}) - if (_found_${libname_}) - message (STATUS "importing ${libname_} as ${_found_${libname_}}") - else () - message (FATAL_ERROR "using pkg-config for grpc, can't find ${_search}") - endif () - add_library ("gRPC::${libname_}" STATIC IMPORTED GLOBAL) - set_target_properties ("gRPC::${libname_}" PROPERTIES IMPORTED_LOCATION ${_found_${libname_}}) - if (grpc_INCLUDE_DIRS) - set_target_properties ("gRPC::${libname_}" PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${grpc_INCLUDE_DIRS}) - endif () - target_link_libraries (ripple_libs INTERFACE "gRPC::${libname_}") - exclude_if_included ("gRPC::${libname_}") - endmacro () - - set_target_properties (gRPC::grpc_cpp_plugin PROPERTIES - IMPORTED_LOCATION "${grpc_gpr_PREFIX}/bin/grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX}") - - pkg_check_modules (cares QUIET libcares) - if (cares_FOUND) - if (static) - set (_search "${CMAKE_STATIC_LIBRARY_PREFIX}cares${CMAKE_STATIC_LIBRARY_SUFFIX}") - set (_prefix cares_STATIC) - set (_static STATIC) - else () - set (_search "${CMAKE_SHARED_LIBRARY_PREFIX}cares${CMAKE_SHARED_LIBRARY_SUFFIX}") - set (_prefix cares) - set (_static) - endif() - find_library(_location NAMES ${_search} HINTS ${cares_LIBRARY_DIRS}) - if (NOT _location) - message (FATAL_ERROR "using pkg-config for grpc, can't find c-ares") - endif () - add_library (c-ares::cares ${_static} IMPORTED GLOBAL) - set_target_properties (c-ares::cares PROPERTIES - IMPORTED_LOCATION ${_location} - INTERFACE_INCLUDE_DIRECTORIES "${${_prefix}_INCLUDE_DIRS}" - INTERFACE_LINK_OPTIONS "${${_prefix}_LDFLAGS}" - ) - exclude_if_included (c-ares::cares) - else () - message (FATAL_ERROR "using pkg-config for grpc, can't find c-ares") - endif () - else () - #[===========================[ - c-ares (grpc requires) - #]===========================] - ExternalProject_Add (c-ares_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/c-ares/c-ares.git - GIT_TAG cares-1_15_0 - CMAKE_ARGS - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DCMAKE_INSTALL_PREFIX=/_installed_ - -DCARES_SHARED=OFF - -DCARES_STATIC=ON - -DCARES_STATIC_PIC=ON - -DCARES_INSTALL=ON - -DCARES_MSVC_STATIC_RUNTIME=ON - $<$: - "-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - TEST_COMMAND "" - INSTALL_COMMAND - ${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $ --target install - BUILD_BYPRODUCTS - /_installed_/lib/${ep_lib_prefix}cares${ep_lib_suffix} - /_installed_/lib/${ep_lib_prefix}cares_d${ep_lib_suffix} - ) - exclude_if_included (c-ares_src) - ExternalProject_Get_Property (c-ares_src BINARY_DIR) - set (cares_binary_dir "${BINARY_DIR}") - - add_library (c-ares::cares STATIC IMPORTED GLOBAL) - file (MAKE_DIRECTORY ${BINARY_DIR}/_installed_/include) - set_target_properties (c-ares::cares PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}cares_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}cares${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${BINARY_DIR}/_installed_/include) - add_dependencies (c-ares::cares c-ares_src) - exclude_if_included (c-ares::cares) - - if (NOT has_zlib) - #[===========================[ - zlib (grpc requires) - #]===========================] - if (MSVC) - set (zlib_debug_postfix "d") # zlib cmake sets this internally for MSVC, so we really don't have a choice - set (zlib_base "zlibstatic") - else () - set (zlib_debug_postfix "_d") - set (zlib_base "z") - endif () - ExternalProject_Add (zlib_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/madler/zlib.git - GIT_TAG v1.2.11 - CMAKE_ARGS - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - -DCMAKE_DEBUG_POSTFIX=${zlib_debug_postfix} - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DCMAKE_INSTALL_PREFIX=/_installed_ - -DBUILD_SHARED_LIBS=OFF - $<$: - "-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP" - "-DCMAKE_C_FLAGS_DEBUG=-MTd" - "-DCMAKE_C_FLAGS_RELEASE=-MT" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - TEST_COMMAND "" - INSTALL_COMMAND - ${CMAKE_COMMAND} -E env --unset=DESTDIR ${CMAKE_COMMAND} --build . --config $ --target install - BUILD_BYPRODUCTS - /_installed_/lib/${ep_lib_prefix}${zlib_base}${ep_lib_suffix} - /_installed_/lib/${ep_lib_prefix}${zlib_base}${zlib_debug_postfix}${ep_lib_suffix} - ) - exclude_if_included (zlib_src) - ExternalProject_Get_Property (zlib_src BINARY_DIR) - set (zlib_binary_dir "${BINARY_DIR}") - - add_library (ZLIB::ZLIB STATIC IMPORTED GLOBAL) - file (MAKE_DIRECTORY ${BINARY_DIR}/_installed_/include) - set_target_properties (ZLIB::ZLIB PROPERTIES - IMPORTED_LOCATION_DEBUG - ${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}${zlib_base}${zlib_debug_postfix}${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${BINARY_DIR}/_installed_/lib/${ep_lib_prefix}${zlib_base}${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${BINARY_DIR}/_installed_/include) - add_dependencies (ZLIB::ZLIB zlib_src) - exclude_if_included (ZLIB::ZLIB) - endif () - - #[===========================[ - grpc - #]===========================] - ExternalProject_Add (grpc_src - PREFIX ${nih_cache_path} - GIT_REPOSITORY https://github.com/grpc/grpc.git - GIT_TAG v1.25.0 - CMAKE_ARGS - -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} - -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} - $<$:-DCMAKE_VERBOSE_MAKEFILE=ON> - $<$:-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}> - $<$:-DVCPKG_TARGET_TRIPLET=${VCPKG_TARGET_TRIPLET}> - $<$:-DCMAKE_UNITY_BUILD=ON}> - -DCMAKE_DEBUG_POSTFIX=_d - $<$>:-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}> - -DgRPC_BUILD_TESTS=OFF - -DgRPC_BENCHMARK_PROVIDER="" - -DgRPC_BUILD_CSHARP_EXT=OFF - -DgRPC_MSVC_STATIC_RUNTIME=ON - -DgRPC_INSTALL=OFF - -DgRPC_CARES_PROVIDER=package - -Dc-ares_DIR=${cares_binary_dir}/_installed_/lib/cmake/c-ares - -DgRPC_SSL_PROVIDER=package - -DOPENSSL_ROOT_DIR=${OPENSSL_ROOT_DIR} - -DgRPC_PROTOBUF_PROVIDER=package - -DProtobuf_USE_STATIC_LIBS=$,$>>,OFF,ON> - -DProtobuf_INCLUDE_DIR=$,:_:> - -DProtobuf_LIBRARY=$,$,$> - -DProtobuf_PROTOC_LIBRARY=$,$,$> - -DProtobuf_PROTOC_EXECUTABLE=$ - -DgRPC_ZLIB_PROVIDER=package - $<$>:-DZLIB_ROOT=${zlib_binary_dir}/_installed_> - $<$: - "-DCMAKE_CXX_FLAGS=-GR -Gd -fp:precise -FS -EHa -MP" - "-DCMAKE_C_FLAGS=-GR -Gd -fp:precise -FS -MP" - > - LOG_BUILD ON - LOG_CONFIGURE ON - BUILD_COMMAND - ${CMAKE_COMMAND} - --build . - --config $ - --parallel ${ep_procs} - $<$: - COMMAND - ${CMAKE_COMMAND} -E copy - /$/${ep_lib_prefix}grpc${grpc_suffix}$<$:_d>${ep_lib_suffix} - /$/${ep_lib_prefix}grpc++${grpc_suffix}$<$:_d>${ep_lib_suffix} - /$/${ep_lib_prefix}address_sorting$<$:_d>${ep_lib_suffix} - /$/${ep_lib_prefix}gpr$<$:_d>${ep_lib_suffix} - /$/grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX} - - > - LIST_SEPARATOR :_: - TEST_COMMAND "" - INSTALL_COMMAND "" - DEPENDS c-ares_src - BUILD_BYPRODUCTS - /${ep_lib_prefix}grpc${grpc_suffix}${ep_lib_suffix} - /${ep_lib_prefix}grpc${grpc_suffix}_d${ep_lib_suffix} - /${ep_lib_prefix}grpc++${grpc_suffix}${ep_lib_suffix} - /${ep_lib_prefix}grpc++${grpc_suffix}_d${ep_lib_suffix} - /${ep_lib_prefix}address_sorting${ep_lib_suffix} - /${ep_lib_prefix}address_sorting_d${ep_lib_suffix} - /${ep_lib_prefix}gpr${ep_lib_suffix} - /${ep_lib_prefix}gpr_d${ep_lib_suffix} - /grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX} - ) - if (TARGET protobuf_src) - ExternalProject_Add_StepDependencies(grpc_src build protobuf_src) - endif () - exclude_if_included (grpc_src) - ExternalProject_Get_Property (grpc_src BINARY_DIR) - ExternalProject_Get_Property (grpc_src SOURCE_DIR) - set (grpc_binary_dir "${BINARY_DIR}") - set (grpc_source_dir "${SOURCE_DIR}") - if (CMAKE_VERBOSE_MAKEFILE) - print_ep_logs (grpc_src) - endif () - file (MAKE_DIRECTORY ${SOURCE_DIR}/include) - - macro (add_imported_grpc libname_) - add_library ("gRPC::${libname_}" STATIC IMPORTED GLOBAL) - set_target_properties ("gRPC::${libname_}" PROPERTIES - IMPORTED_LOCATION_DEBUG - ${grpc_binary_dir}/${ep_lib_prefix}${libname_}_d${ep_lib_suffix} - IMPORTED_LOCATION_RELEASE - ${grpc_binary_dir}/${ep_lib_prefix}${libname_}${ep_lib_suffix} - INTERFACE_INCLUDE_DIRECTORIES - ${grpc_source_dir}/include) - add_dependencies ("gRPC::${libname_}" grpc_src) - target_link_libraries (ripple_libs INTERFACE "gRPC::${libname_}") - exclude_if_included ("gRPC::${libname_}") - endmacro () - - set_target_properties (gRPC::grpc_cpp_plugin PROPERTIES - IMPORTED_LOCATION "${grpc_binary_dir}/grpc_cpp_plugin${CMAKE_EXECUTABLE_SUFFIX}") - add_dependencies (gRPC::grpc_cpp_plugin grpc_src) - endif () - - add_imported_grpc (gpr) - add_imported_grpc ("grpc${grpc_suffix}") - add_imported_grpc ("grpc++${grpc_suffix}") - add_imported_grpc (address_sorting) - - target_link_libraries ("gRPC::grpc${grpc_suffix}" INTERFACE c-ares::cares gRPC::gpr gRPC::address_sorting ZLIB::ZLIB) - target_link_libraries ("gRPC::grpc++${grpc_suffix}" INTERFACE "gRPC::grpc${grpc_suffix}" gRPC::gpr) -endif () +find_package(gRPC 1.23) #[=================================[ generate protobuf sources for grpc defs and bundle into a static lib #]=================================] -set (GRPC_GEN_DIR "${CMAKE_BINARY_DIR}/proto_gen_grpc") -file (MAKE_DIRECTORY ${GRPC_GEN_DIR}) -set (GRPC_PROTO_SRCS) -set (GRPC_PROTO_HDRS) -set (GRPC_PROTO_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/proto/org") +set(GRPC_GEN_DIR "${CMAKE_BINARY_DIR}/proto_gen_grpc") +file(MAKE_DIRECTORY ${GRPC_GEN_DIR}) +set(GRPC_PROTO_SRCS) +set(GRPC_PROTO_HDRS) +set(GRPC_PROTO_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/src/ripple/proto/org") file(GLOB_RECURSE GRPC_DEFINITION_FILES LIST_DIRECTORIES false "${GRPC_PROTO_ROOT}/*.proto") foreach(file ${GRPC_DEFINITION_FILES}) get_filename_component(_abs_file ${file} ABSOLUTE) @@ -324,10 +20,10 @@ foreach(file ${GRPC_DEFINITION_FILES}) get_filename_component(_rel_root_dir ${_rel_root_file} DIRECTORY) file(RELATIVE_PATH _rel_dir ${CMAKE_CURRENT_SOURCE_DIR} ${_abs_dir}) - set (src_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.cc") - set (src_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.cc") - set (hdr_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.h") - set (hdr_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.h") + set(src_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.cc") + set(src_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.cc") + set(hdr_1 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.grpc.pb.h") + set(hdr_2 "${GRPC_GEN_DIR}/${_rel_root_dir}/${_basename}.pb.h") add_custom_command( OUTPUT ${src_1} ${src_2} ${hdr_1} ${hdr_2} COMMAND protobuf::protoc @@ -345,20 +41,22 @@ foreach(file ${GRPC_DEFINITION_FILES}) list(APPEND GRPC_PROTO_HDRS ${hdr_1} ${hdr_2}) endforeach() -add_library (grpc_pbufs STATIC ${GRPC_PROTO_SRCS} ${GRPC_PROTO_HDRS}) -#target_include_directories (grpc_pbufs PRIVATE src) -target_include_directories (grpc_pbufs SYSTEM PUBLIC ${GRPC_GEN_DIR}) -target_link_libraries (grpc_pbufs protobuf::libprotobuf "gRPC::grpc++${grpc_suffix}") -target_compile_options (grpc_pbufs +add_library(grpc_pbufs STATIC ${GRPC_PROTO_SRCS} ${GRPC_PROTO_HDRS}) +#target_include_directories(grpc_pbufs PRIVATE src) +target_include_directories(grpc_pbufs SYSTEM PUBLIC ${GRPC_GEN_DIR}) +target_link_libraries(grpc_pbufs + "gRPC::grpc++" + # libgrpc is missing references. + absl::random_random +) +target_compile_options(grpc_pbufs PRIVATE $<$:-wd4065> $<$>:-Wno-deprecated-declarations> PUBLIC $<$:-wd4996> - $<$: + $<$: --system-header-prefix="google/protobuf" -Wno-deprecated-dynamic-exception-spec >) -add_library (Ripple::grpc_pbufs ALIAS grpc_pbufs) -target_link_libraries (ripple_libs INTERFACE Ripple::grpc_pbufs) -exclude_if_included (grpc_pbufs) +add_library(Ripple::grpc_pbufs ALIAS grpc_pbufs) diff --git a/Builds/CMake/rocks_thirdparty.inc b/Builds/CMake/rocks_thirdparty.inc deleted file mode 100644 index cba23df71c8..00000000000 --- a/Builds/CMake/rocks_thirdparty.inc +++ /dev/null @@ -1,15 +0,0 @@ -set (THIRDPARTY_LIBS "") - -if(WITH_SNAPPY) - add_definitions(-DSNAPPY) - include_directories(${snappy_INCLUDE_DIRS}) - set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${snappy_LIBRARIES}) -endif() - -if(WITH_LZ4) - add_definitions(-DLZ4) - include_directories(${lz4_INCLUDE_DIRS}) - set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${lz4_LIBRARIES}) -endif() - - diff --git a/Builds/CMake/rocksdb_build_version.cc.in b/Builds/CMake/rocksdb_build_version.cc.in deleted file mode 100644 index 9ef424669b4..00000000000 --- a/Builds/CMake/rocksdb_build_version.cc.in +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. - -#include - -#include "rocksdb/version.h" -#include "util/string_util.h" - -// The build script may replace these values with real values based -// on whether or not GIT is available and the platform settings -static const std::string rocksdb_build_git_sha = "rocksdb_build_git_sha:@GIT_SHA@"; -static const std::string rocksdb_build_git_tag = "rocksdb_build_git_tag:@GIT_TAG@"; -#define HAS_GIT_CHANGES @GIT_MOD@ -#if HAS_GIT_CHANGES == 0 -// If HAS_GIT_CHANGES is 0, the GIT date is used. -// Use the time the branch/tag was last modified -static const std::string rocksdb_build_date = "rocksdb_build_date:@GIT_DATE@"; -#else -// If HAS_GIT_CHANGES is > 0, the branch/tag has modifications. -// Use the time the build was created. -static const std::string rocksdb_build_date = "rocksdb_build_date:@BUILD_DATE@"; -#endif - -namespace ROCKSDB_NAMESPACE { -static void AddProperty(std::unordered_map *props, const std::string& name) { - size_t colon = name.find(":"); - if (colon != std::string::npos && colon > 0 && colon < name.length() - 1) { - // If we found a "@:", then this property was a build-time substitution that failed. Skip it - size_t at = name.find("@", colon); - if (at != colon + 1) { - // Everything before the colon is the name, after is the value - (*props)[name.substr(0, colon)] = name.substr(colon + 1); - } - } -} - -static std::unordered_map* LoadPropertiesSet() { - auto * properties = new std::unordered_map(); - AddProperty(properties, rocksdb_build_git_sha); - AddProperty(properties, rocksdb_build_git_tag); - AddProperty(properties, rocksdb_build_date); - return properties; -} - -const std::unordered_map& GetRocksBuildProperties() { - static std::unique_ptr> props(LoadPropertiesSet()); - return *props; -} - -std::string GetRocksVersionAsString(bool with_patch) { - std::string version = ToString(ROCKSDB_MAJOR) + "." + ToString(ROCKSDB_MINOR); - if (with_patch) { - return version + "." + ToString(ROCKSDB_PATCH); - } else { - return version; - } -} - -std::string GetRocksBuildInfoAsString(const std::string& program, bool verbose) { - std::string info = program + " (RocksDB) " + GetRocksVersionAsString(true); - if (verbose) { - for (const auto& it : GetRocksBuildProperties()) { - info.append("\n "); - info.append(it.first); - info.append(": "); - info.append(it.second); - } - } - return info; -} -} // namespace ROCKSDB_NAMESPACE - diff --git a/Builds/CMake/soci_patch.cmake b/Builds/CMake/soci_patch.cmake deleted file mode 100644 index 0c2a75c0dff..00000000000 --- a/Builds/CMake/soci_patch.cmake +++ /dev/null @@ -1,49 +0,0 @@ -# This patches unsigned-types.h in the soci official sources -# so as to remove type range check exceptions that cause -# us trouble when using boost::optional to select int values - -# Soci's CMake setup leaves flags in place that will cause warnings to -# be treated as errors, but some compiler versions throw "new" warnings -# that then cause the build to fail. Simplify that until soci fixes -# those warnings. -if (RIPPLED_SOURCE) - execute_process( COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${RIPPLED_SOURCE}/Builds/CMake/SociConfig.cmake.patched - cmake/SociConfig.cmake ) -endif () - -# Some versions of CMake erroneously patch external projects on every build. -# If the patch makes no changes, skip it. This workaround can be -# removed once we stop supporting vulnerable versions of CMake. -# https://gitlab.kitware.com/cmake/cmake/-/issues/21086 -file (STRINGS include/soci/unsigned-types.h sourcecode) -# Delete the .patched file if it exists, so it doesn't end up duplicated. -# Trying to remove a file that does not exist is not a problem. -file (REMOVE include/soci/unsigned-types.h.patched) -foreach (line_ ${sourcecode}) - if (line_ MATCHES "^[ \\t]+throw[ ]+soci_error[ ]*\\([ ]*\"Value outside of allowed.+$") - set (line_ "//${CMAKE_MATCH_0}") - endif () - file (APPEND include/soci/unsigned-types.h.patched "${line_}\n") -endforeach () -execute_process( COMMAND ${CMAKE_COMMAND} -E compare_files - include/soci/unsigned-types.h include/soci/unsigned-types.h.patched - RESULT_VARIABLE compare_result -) -if( compare_result EQUAL 0) - message(DEBUG "The soci source and patch files are identical. Make no changes.") - file (REMOVE include/soci/unsigned-types.h.patched) - return() -endif() -file (RENAME include/soci/unsigned-types.h include/soci/unsigned-types.h.orig) -file (RENAME include/soci/unsigned-types.h.patched include/soci/unsigned-types.h) -# also fix Boost.cmake so that it just returns when we override the Boost_FOUND var -file (APPEND cmake/dependencies/Boost.cmake.patched "if (Boost_FOUND)\n") -file (APPEND cmake/dependencies/Boost.cmake.patched " return ()\n") -file (APPEND cmake/dependencies/Boost.cmake.patched "endif ()\n") -file (STRINGS cmake/dependencies/Boost.cmake sourcecode) -foreach (line_ ${sourcecode}) - file (APPEND cmake/dependencies/Boost.cmake.patched "${line_}\n") -endforeach () -file (RENAME cmake/dependencies/Boost.cmake.patched cmake/dependencies/Boost.cmake) - diff --git a/CMakeLists.txt b/CMakeLists.txt index d3b494c103a..a3696e89d04 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,10 +1,17 @@ -cmake_minimum_required (VERSION 3.16) +cmake_minimum_required(VERSION 3.16) -if (POLICY CMP0074) +if(POLICY CMP0074) cmake_policy(SET CMP0074 NEW) -endif () +endif() +if(POLICY CMP0077) + cmake_policy(SET CMP0077 NEW) +endif() -project (rippled) +# Fix "unrecognized escape" issues when passing CMAKE_MODULE_PATH on Windows. +file(TO_CMAKE_PATH "${CMAKE_MODULE_PATH}" CMAKE_MODULE_PATH) +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake") + +project(rippled) set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -21,15 +28,12 @@ if(Git_FOUND) endif() endif() #git -if (thread_safety_analysis) +if(thread_safety_analysis) add_compile_options(-Wthread-safety -D_LIBCPP_ENABLE_THREAD_SAFETY_ANNOTATIONS -DRIPPLE_ENABLE_THREAD_SAFETY_ANNOTATIONS) add_compile_options("-stdlib=libc++") add_link_options("-stdlib=libc++") endif() -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake") -list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Builds/CMake/deps") - include (CheckCXXCompilerFlag) include (FetchContent) include (ExternalProject) @@ -42,7 +46,6 @@ endif () include(RippledSanity) include(RippledVersion) include(RippledSettings) -include(RippledNIH) include(RippledRelease) # this check has to remain in the top-level cmake # because of the early return statement @@ -58,21 +61,57 @@ include(RippledInterface) ### include(deps/Boost) -include(deps/OpenSSL) -include(deps/Secp256k1) -include(deps/Ed25519-donna) -include(deps/Lz4) -include(deps/Libarchive) -include(deps/Sqlite) -include(deps/Soci) -include(deps/Snappy) -include(deps/Rocksdb) -include(deps/Nudb) -include(deps/date) +find_package(OpenSSL 1.1.1 REQUIRED) +set_target_properties(OpenSSL::SSL PROPERTIES + INTERFACE_COMPILE_DEFINITIONS OPENSSL_NO_SSL2 +) +add_subdirectory(src/secp256k1) +add_subdirectory(src/ed25519-donna) +find_package(lz4 REQUIRED) +# Target names with :: are not allowed in a generator expression. +# We need to pull the include directories and imported location properties +# from separate targets. +find_package(LibArchive REQUIRED) +find_package(SOCI REQUIRED) +find_package(SQLite3 REQUIRED) +find_package(Snappy REQUIRED) + +option(rocksdb "Enable RocksDB" ON) +if(rocksdb) + find_package(RocksDB REQUIRED) + set_target_properties(RocksDB::rocksdb PROPERTIES + INTERFACE_COMPILE_DEFINITIONS RIPPLE_ROCKSDB_AVAILABLE=1 + ) + target_link_libraries(ripple_libs INTERFACE RocksDB::rocksdb) +endif() + +find_package(nudb REQUIRED) +find_package(date REQUIRED) include(deps/Protobuf) include(deps/gRPC) -include(deps/cassandra) -include(deps/Postgres) + +target_link_libraries(ripple_libs INTERFACE + ed25519::ed25519 + LibArchive::LibArchive + lz4::lz4 + nudb::core + OpenSSL::Crypto + OpenSSL::SSL + Ripple::grpc_pbufs + Ripple::pbufs + secp256k1::secp256k1 + soci::soci + SQLite::SQLite3 +) + +if(reporting) + find_package(cassandra-cpp-driver REQUIRED) + find_package(PostgreSQL REQUIRED) + target_link_libraries(ripple_libs INTERFACE + cassandra-cpp-driver::cassandra-cpp-driver + PostgreSQL::PostgreSQL + ) +endif() ### diff --git a/conanfile.py b/conanfile.py new file mode 100644 index 00000000000..61e131f2f3d --- /dev/null +++ b/conanfile.py @@ -0,0 +1,149 @@ +from conans import ConanFile +from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout +import re + +class Xrpl(ConanFile): + name = 'xrpl' + + license = 'ISC' + author = 'John Freeman ' + url = 'https://github.com/xrplf/rippled' + description = 'The XRP Ledger' + settings = 'os', 'compiler', 'build_type', 'arch' + options = { + 'assertions': [True, False], + 'coverage': [True, False], + 'fPIC': [True, False], + 'jemalloc': [True, False], + 'reporting': [True, False], + 'rocksdb': [True, False], + 'shared': [True, False], + 'static': [True, False], + 'tests': [True, False], + 'unity': [True, False], + } + + requires = [ + 'boost/1.77.0', + 'date/3.0.1', + 'libarchive/3.6.0', + 'lz4/1.9.3', + 'grpc/1.44.0', + 'nudb/2.0.8', + 'openssl/1.1.1m', + 'protobuf/3.21.4', + 'snappy/1.1.9', + 'soci/4.0.3', + 'sqlite3/3.38.0', + 'zlib/1.2.12', + ] + + default_options = { + 'assertions': False, + 'coverage': False, + 'fPIC': True, + 'jemalloc': False, + 'reporting': False, + 'rocksdb': True, + 'shared': False, + 'static': True, + 'tests': True, + 'unity': False, + + 'cassandra-cpp-driver:shared': False, + 'date:header_only': True, + 'grpc:shared': False, + 'grpc:secure': True, + 'libarchive:shared': False, + 'libarchive:with_acl': False, + 'libarchive:with_bzip2': False, + 'libarchive:with_cng': False, + 'libarchive:with_expat': False, + 'libarchive:with_iconv': False, + 'libarchive:with_libxml2': False, + 'libarchive:with_lz4': True, + 'libarchive:with_lzma': False, + 'libarchive:with_lzo': False, + 'libarchive:with_nettle': False, + 'libarchive:with_openssl': False, + 'libarchive:with_pcreposix': False, + 'libarchive:with_xattr': False, + 'libarchive:with_zlib': False, + 'libpq:shared': False, + 'lz4:shared': False, + 'openssl:shared': False, + 'protobuf:shared': False, + 'protobuf:with_zlib': True, + 'rocksdb:enable_sse': False, + 'rocksdb:lite': False, + 'rocksdb:shared': False, + 'rocksdb:use_rtti': True, + 'rocksdb:with_jemalloc': False, + 'rocksdb:with_lz4': True, + 'rocksdb:with_snappy': True, + 'snappy:shared': False, + 'soci:shared': False, + 'soci:with_sqlite3': True, + 'soci:with_boost': True, + } + + def set_version(self): + path = f'{self.recipe_folder}/src/ripple/protocol/impl/BuildInfo.cpp' + regex = r'versionString\s?=\s?\"(.*)\"' + with open(path, 'r') as file: + matches = (re.search(regex, line) for line in file) + match = next(m for m in matches if m) + self.version = match.group(1) + + def configure(self): + if self.settings.compiler == 'apple-clang': + self.options['boost'].visibility = 'global' + + def requirements(self): + if self.options.jemalloc: + self.requires('jemalloc/5.2.1') + if self.options.reporting: + self.requires('cassandra-cpp-driver/2.15.3') + self.requires('libpq/13.6') + if self.options.rocksdb: + self.requires('rocksdb/6.27.3') + + exports_sources = 'CMakeLists.txt', 'Builds/CMake/*', 'src/*', 'cfg/*' + + def layout(self): + cmake_layout(self) + # Fix this setting to follow the default introduced in Conan 1.48 + # to align with our build instructions. + self.folders.generators = 'build/generators' + + generators = 'CMakeDeps' + def generate(self): + tc = CMakeToolchain(self) + tc.variables['tests'] = self.options.tests + tc.variables['assert'] = self.options.assertions + tc.variables['coverage'] = self.options.coverage + tc.variables['jemalloc'] = self.options.jemalloc + tc.variables['reporting'] = self.options.reporting + tc.variables['rocksdb'] = self.options.rocksdb + tc.variables['BUILD_SHARED_LIBS'] = self.options.shared + tc.variables['static'] = self.options.static + tc.variables['unity'] = self.options.unity + tc.generate() + + def build(self): + cmake = CMake(self) + cmake.verbose = True + cmake.configure() + cmake.build() + + def package(self): + cmake = CMake(self) + cmake.verbose = True + cmake.install() + + def package_info(self): + self.cpp_info.libs = [ + 'libxrpl_core.a', + 'libed25519-donna.a', + 'libsecp256k1.a', + ] diff --git a/external/rocksdb/conanfile.py b/external/rocksdb/conanfile.py new file mode 100644 index 00000000000..a219f4f5547 --- /dev/null +++ b/external/rocksdb/conanfile.py @@ -0,0 +1,193 @@ +import os +import shutil +from conans import ConanFile, CMake +from conan.tools import microsoft as ms + +class RocksDB(ConanFile): + name = 'rocksdb' + version = '6.27.3' + + license = ('GPL-2.0-only', 'Apache-2.0') + url = 'https://github.com/conan-io/conan-center-index' + description = 'A library that provides an embeddable, persistent key-value store for fast storage' + topics = ('rocksdb', 'database', 'leveldb', 'facebook', 'key-value') + + settings = 'os', 'compiler', 'build_type', 'arch' + options = { + 'enable_sse': [False, 'sse42', 'avx2'], + 'fPIC': [True, False], + 'lite': [True, False], + 'shared': [True, False], + 'use_rtti': [True, False], + 'with_gflags': [True, False], + 'with_jemalloc': [True, False], + 'with_lz4': [True, False], + 'with_snappy': [True, False], + 'with_tbb': [True, False], + 'with_zlib': [True, False], + 'with_zstd': [True, False], + } + default_options = { + 'enable_sse': False, + 'fPIC': True, + 'lite': False, + 'shared': False, + 'use_rtti': False, + 'with_gflags': False, + 'with_jemalloc': False, + 'with_lz4': False, + 'with_snappy': False, + 'with_tbb': False, + 'with_zlib': False, + 'with_zstd': False, + } + + def requirements(self): + if self.options.with_gflags: + self.requires('gflags/2.2.2') + if self.options.with_jemalloc: + self.requires('jemalloc/5.2.1') + if self.options.with_lz4: + self.requires('lz4/1.9.3') + if self.options.with_snappy: + self.requires('snappy/1.1.9') + if self.options.with_tbb: + self.requires('onetbb/2020.3') + if self.options.with_zlib: + self.requires('zlib/1.2.11') + if self.options.with_zstd: + self.requires('zstd/1.5.2') + + def config_options(self): + if self.settings.os == 'Windows': + del self.options.fPIC + + def configure(self): + if self.options.shared: + del self.options.fPIC + + generators = 'cmake', 'cmake_find_package' + + scm = { + 'type': 'git', + 'url': 'https://github.com/facebook/rocksdb.git', + 'revision': 'v6.27.3', + } + + exports_sources = 'thirdparty.inc' + # For out-of-source build. + no_copy_source = True + + _cmake = None + + def _configure_cmake(self): + if self._cmake: + return + + self._cmake = CMake(self) + + self._cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = True + + self._cmake.definitions['DISABLE_STALL_NOTIF'] = False + self._cmake.definitions['FAIL_ON_WARNINGS'] = False + self._cmake.definitions['OPTDBG'] = True + self._cmake.definitions['WITH_TESTS'] = False + self._cmake.definitions['WITH_TOOLS'] = False + + self._cmake.definitions['WITH_GFLAGS'] = self.options.with_gflags + self._cmake.definitions['WITH_JEMALLOC'] = self.options.with_jemalloc + self._cmake.definitions['WITH_LZ4'] = self.options.with_lz4 + self._cmake.definitions['WITH_SNAPPY'] = self.options.with_snappy + self._cmake.definitions['WITH_TBB'] = self.options.with_tbb + self._cmake.definitions['WITH_ZLIB'] = self.options.with_zlib + self._cmake.definitions['WITH_ZSTD'] = self.options.with_zstd + + self._cmake.definitions['USE_RTTI'] = self.options.use_rtti + self._cmake.definitions['ROCKSDB_LITE'] = self.options.lite + self._cmake.definitions['ROCKSDB_INSTALL_ON_WINDOWS'] = ( + self.settings.os == 'Windows' + ) + + if not self.options.enable_sse: + self._cmake.definitions['PORTABLE'] = True + self._cmake.definitions['FORCE_SSE42'] = False + elif self.options.enable_sse == 'sse42': + self._cmake.definitions['PORTABLE'] = True + self._cmake.definitions['FORCE_SSE42'] = True + elif self.options.enable_sse == 'avx2': + self._cmake.definitions['PORTABLE'] = False + self._cmake.definitions['FORCE_SSE42'] = False + + self._cmake.definitions['WITH_ASAN'] = False + self._cmake.definitions['WITH_BZ2'] = False + self._cmake.definitions['WITH_JNI'] = False + self._cmake.definitions['WITH_LIBRADOS'] = False + if ms.is_msvc(self): + self._cmake.definitions['WITH_MD_LIBRARY'] = ( + ms.msvc_runtime_flag(self).startswith('MD') + ) + self._cmake.definitions['WITH_RUNTIME_DEBUG'] = ( + ms.msvc_runtime_flag(self).endswith('d') + ) + self._cmake.definitions['WITH_NUMA'] = False + self._cmake.definitions['WITH_TSAN'] = False + self._cmake.definitions['WITH_UBSAN'] = False + self._cmake.definitions['WITH_WINDOWS_UTF8_FILENAMES'] = False + self._cmake.definitions['WITH_XPRESS'] = False + self._cmake.definitions['WITH_FALLOCATE'] = True + + + def build(self): + if ms.is_msvc(self): + file = os.path.join( + self.recipe_folder, '..', 'export_source', 'thirdparty.inc' + ) + shutil.copy(file, self.build_folder) + self._configure_cmake() + self._cmake.configure() + self._cmake.build() + + def package(self): + self._configure_cmake() + self._cmake.install() + + def package_info(self): + self.cpp_info.filenames['cmake_find_package'] = 'RocksDB' + self.cpp_info.filenames['cmake_find_package_multi'] = 'RocksDB' + self.cpp_info.set_property('cmake_file_name', 'RocksDB') + + self.cpp_info.names['cmake_find_package'] = 'RocksDB' + self.cpp_info.names['cmake_find_package_multi'] = 'RocksDB' + + self.cpp_info.components['librocksdb'].names['cmake_find_package'] = 'rocksdb' + self.cpp_info.components['librocksdb'].names['cmake_find_package_multi'] = 'rocksdb' + self.cpp_info.components['librocksdb'].set_property( + 'cmake_target_name', 'RocksDB::rocksdb' + ) + + self.cpp_info.components['librocksdb'].libs = ['rocksdb'] + + if self.settings.os == "Windows": + self.cpp_info.components["librocksdb"].system_libs = ["shlwapi", "rpcrt4"] + if self.options.shared: + self.cpp_info.components["librocksdb"].defines = ["ROCKSDB_DLL"] + elif self.settings.os in ["Linux", "FreeBSD"]: + self.cpp_info.components["librocksdb"].system_libs = ["pthread", "m"] + + if self.options.lite: + self.cpp_info.components["librocksdb"].defines.append("ROCKSDB_LITE") + + if self.options.with_gflags: + self.cpp_info.components["librocksdb"].requires.append("gflags::gflags") + if self.options.with_jemalloc: + self.cpp_info.components["librocksdb"].requires.append("jemalloc::jemalloc") + if self.options.with_lz4: + self.cpp_info.components["librocksdb"].requires.append("lz4::lz4") + if self.options.with_snappy: + self.cpp_info.components["librocksdb"].requires.append("snappy::snappy") + if self.options.with_tbb: + self.cpp_info.components["librocksdb"].requires.append("onetbb::onetbb") + if self.options.with_zlib: + self.cpp_info.components["librocksdb"].requires.append("zlib::zlib") + if self.options.with_zstd: + self.cpp_info.components["librocksdb"].requires.append("zstd::zstd") diff --git a/external/rocksdb/thirdparty.inc b/external/rocksdb/thirdparty.inc new file mode 100644 index 00000000000..fce11784b92 --- /dev/null +++ b/external/rocksdb/thirdparty.inc @@ -0,0 +1,62 @@ +if(WITH_GFLAGS) + # Config with namespace available since gflags 2.2.2 + find_package(gflags REQUIRED) + set(GFLAGS_LIB gflags::gflags) + list(APPEND THIRDPARTY_LIBS ${GFLAGS_LIB}) + add_definitions(-DGFLAGS=1) +endif() + +if(WITH_SNAPPY) + find_package(Snappy REQUIRED) + add_definitions(-DSNAPPY) + list(APPEND THIRDPARTY_LIBS Snappy::snappy) +endif() + +if(WITH_LZ4) + find_package(lz4 REQUIRED) + add_definitions(-DLZ4) + list(APPEND THIRDPARTY_LIBS lz4::lz4) +endif() + +if(WITH_ZLIB) + find_package(ZLIB REQUIRED) + add_definitions(-DZLIB) + list(APPEND THIRDPARTY_LIBS ZLIB::ZLIB) +endif() + +option(WITH_BZ2 "build with bzip2" OFF) +if(WITH_BZ2) + find_package(BZip2 REQUIRED) + add_definitions(-DBZIP2) + list(APPEND THIRDPARTY_LIBS BZip2::BZip2) +endif() + +if(WITH_ZSTD) + find_package(zstd REQUIRED) + add_definitions(-DZSTD) + list(APPEND THIRDPARTY_LIBS zstd::zstd) +endif() + +# ================================================== XPRESS ================================================== +# This makes use of built-in Windows API, no additional includes, links to a system lib + +if(WITH_XPRESS) + message(STATUS "XPRESS is enabled") + add_definitions(-DXPRESS) + # We are using the implementation provided by the system + list(APPEND SYSTEM_LIBS Cabinet.lib) +else() + message(STATUS "XPRESS is disabled") +endif() + +# ================================================== JEMALLOC ================================================== +if(WITH_JEMALLOC) + message(STATUS "JEMALLOC library is enabled") + add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_EXPORT= -DJEMALLOC_NO_RENAME) + list(APPEND THIRDPARTY_LIBS jemalloc::jemalloc) + set(ARTIFACT_SUFFIX "_je") + +else () + set(ARTIFACT_SUFFIX "") + message(STATUS "JEMALLOC library is disabled") +endif () diff --git a/src/ed25519-donna/CMakeLists.txt b/src/ed25519-donna/CMakeLists.txt new file mode 100644 index 00000000000..418dc38326b --- /dev/null +++ b/src/ed25519-donna/CMakeLists.txt @@ -0,0 +1,48 @@ +cmake_minimum_required(VERSION 3.11) + +project(ed25519 + LANGUAGES C +) + +if(PROJECT_NAME STREQUAL CMAKE_PROJECT_NAME) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/output/$/lib") +endif() + +if(NOT TARGET OpenSSL::SSL) + find_package(OpenSSL) +endif() + +add_library(ed25519 STATIC + ed25519.c +) +add_library(ed25519::ed25519 ALIAS ed25519) +target_link_libraries(ed25519 PUBLIC OpenSSL::SSL) + +include(GNUInstallDirs) + +#[=========================================================[ + NOTE for macos: + https://github.com/floodyberry/ed25519-donna/issues/29 + our source for ed25519-donna-portable.h has been + patched to workaround this. +#]=========================================================] +target_include_directories(ed25519 PUBLIC + $ + $ +) + +install( + TARGETS ed25519 + EXPORT ${PROJECT_NAME}-exports + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" +) +install( + EXPORT ${PROJECT_NAME}-exports + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + FILE ${PROJECT_NAME}-targets.cmake + NAMESPACE ${PROJECT_NAME}:: +) +install( + FILES ed25519.h + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" +) diff --git a/src/ripple/protocol/impl/PublicKey.cpp b/src/ripple/protocol/impl/PublicKey.cpp index ac86634f1ef..8ab1bd46cdf 100644 --- a/src/ripple/protocol/impl/PublicKey.cpp +++ b/src/ripple/protocol/impl/PublicKey.cpp @@ -23,7 +23,8 @@ #include #include #include -#include +#include +#include namespace ripple { diff --git a/src/ripple/protocol/impl/SecretKey.cpp b/src/ripple/protocol/impl/SecretKey.cpp index 13aafdb1f10..63661888f48 100644 --- a/src/ripple/protocol/impl/SecretKey.cpp +++ b/src/ripple/protocol/impl/SecretKey.cpp @@ -26,7 +26,7 @@ #include #include #include -#include +#include namespace ripple { diff --git a/src/ripple/rpc/handlers/WalletPropose.cpp b/src/ripple/rpc/handlers/WalletPropose.cpp index 012ed5c1689..624c5c83c2b 100644 --- a/src/ripple/rpc/handlers/WalletPropose.cpp +++ b/src/ripple/rpc/handlers/WalletPropose.cpp @@ -29,7 +29,7 @@ #include #include #include -#include +#include #include namespace ripple { diff --git a/src/secp256k1/CMakeLists.txt b/src/secp256k1/CMakeLists.txt new file mode 100644 index 00000000000..6b41b66c8ab --- /dev/null +++ b/src/secp256k1/CMakeLists.txt @@ -0,0 +1,52 @@ +cmake_minimum_required(VERSION 3.11) + +project(secp256k1 + LANGUAGES C +) + +if(PROJECT_NAME STREQUAL CMAKE_PROJECT_NAME) + set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${PROJECT_BINARY_DIR}/output/$/lib") +endif() + +add_library(secp256k1 STATIC + src/secp256k1.c +) +add_library(secp256k1::secp256k1 ALIAS secp256k1) + +include(GNUInstallDirs) + +target_compile_definitions(secp256k1 PRIVATE + USE_NUM_NONE + USE_FIELD_10X26 + USE_FIELD_INV_BUILTIN + USE_SCALAR_8X32 + USE_SCALAR_INV_BUILTIN +) +target_include_directories(secp256k1 + PUBLIC $ + PRIVATE $ +) +target_compile_options(secp256k1 PRIVATE + $<$:-wd4319> + $<$>: + -Wno-deprecated-declarations + -Wno-unused-function + > + $<$:-Wno-nonnull-compare> +) + +install( + TARGETS secp256k1 + EXPORT ${PROJECT_NAME}-exports + ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" +) +install( + EXPORT ${PROJECT_NAME}-exports + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" + FILE ${PROJECT_NAME}-targets.cmake + NAMESPACE ${PROJECT_NAME}:: +) +install( + FILES include/secp256k1.h + DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}" +) diff --git a/src/test/core/SociDB_test.cpp b/src/test/core/SociDB_test.cpp index 875af9aa053..c0365ad9ae7 100644 --- a/src/test/core/SociDB_test.cpp +++ b/src/test/core/SociDB_test.cpp @@ -226,13 +226,15 @@ class SociDB_test final : public TestSuite // SOCI requires boost::optional (not std::optional) as // parameters. boost::optional ig; - boost::optional uig; + // Known bug: https://github.com/SOCI/soci/issues/926 + // boost::optional uig; + uint32_t uig = 0; boost::optional big; boost::optional ubig; s << "SELECT I, UI, BI, UBI from STT;", soci::into(ig), soci::into(uig), soci::into(big), soci::into(ubig); BEAST_EXPECT( - *ig == id[0] && *uig == uid[0] && *big == bid[0] && + *ig == id[0] && uig == uid[0] && *big == bid[0] && *ubig == ubid[0]); } catch (std::exception&) @@ -357,18 +359,13 @@ class SociDB_test final : public TestSuite bfs::remove(dbPath); } void - testSQLite() + run() override { testSQLiteFileNames(); testSQLiteSession(); testSQLiteSelect(); testSQLiteDeleteWithSubselect(); } - void - run() override - { - testSQLite(); - } }; BEAST_DEFINE_TESTSUITE(SociDB, core, ripple); From d8a84e9530d0cb358e97dc3c25ff3cdd7a5447b2 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Tue, 20 Dec 2022 02:35:35 +0100 Subject: [PATCH 074/508] `featureDisallowIncoming`: Opt-out of incoming Checks, PayChans, NFTokenOffers and Trustlines (#4336) featureDisallowIncoming is a new amendment that would allow users to opt-out of incoming Checks, Payment Channels, NFTokenOffers, and trust lines. This commit includes tests. Adds four new AccountSet Flags: 1. asfDisallowIncomingNFTOffer 2. asfDisallowIncomingCheck 3. asfDisallowIncomingPayChan 4. asfDisallowIncomingTrustline --- src/ripple/app/tx/impl/CreateCheck.cpp | 10 +- src/ripple/app/tx/impl/NFTokenCreateOffer.cpp | 41 +++- src/ripple/app/tx/impl/PayChan.cpp | 13 +- src/ripple/app/tx/impl/SetAccount.cpp | 24 ++ src/ripple/app/tx/impl/SetTrust.cpp | 14 ++ src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/LedgerFormats.h | 11 + src/ripple/protocol/TxFlags.h | 7 + src/ripple/protocol/impl/Feature.cpp | 1 + src/test/app/Check_test.cpp | 98 ++++++++ src/test/app/NFToken_test.cpp | 133 ++++++++++ src/test/app/PayChan_test.cpp | 230 ++++++++++++------ src/test/app/SetTrust_test.cpp | 143 +++++++++-- src/test/rpc/AccountSet_test.cpp | 15 +- 14 files changed, 644 insertions(+), 99 deletions(-) diff --git a/src/ripple/app/tx/impl/CreateCheck.cpp b/src/ripple/app/tx/impl/CreateCheck.cpp index a59a7c12eba..f5c2cbfbfd9 100644 --- a/src/ripple/app/tx/impl/CreateCheck.cpp +++ b/src/ripple/app/tx/impl/CreateCheck.cpp @@ -90,8 +90,14 @@ CreateCheck::preclaim(PreclaimContext const& ctx) return tecNO_DST; } - if ((sleDst->getFlags() & lsfRequireDestTag) && - !ctx.tx.isFieldPresent(sfDestinationTag)) + auto const flags = sleDst->getFlags(); + + // Check if the destination has disallowed incoming checks + if (ctx.view.rules().enabled(featureDisallowIncoming) && + (flags & lsfDisallowIncomingCheck)) + return tecNO_PERMISSION; + + if ((flags & lsfRequireDestTag) && !ctx.tx.isFieldPresent(sfDestinationTag)) { // The tag is basically account-specific information we don't // understand, but we can require someone to fill it in. diff --git a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp index 80e4c3964a7..695efdd0aa4 100644 --- a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp @@ -165,11 +165,42 @@ NFTokenCreateOffer::preclaim(PreclaimContext const& ctx) return tecUNFUNDED_OFFER; } - // If a destination is specified, the destination must already be in - // the ledger. - if (auto const destination = ctx.tx[~sfDestination]; - destination && !ctx.view.exists(keylet::account(*destination))) - return tecNO_DST; + if (auto const destination = ctx.tx[~sfDestination]) + { + // If a destination is specified, the destination must already be in + // the ledger. + auto const sleDst = ctx.view.read(keylet::account(*destination)); + + if (!sleDst) + return tecNO_DST; + + // check if the destination has disallowed incoming offers + if (ctx.view.rules().enabled(featureDisallowIncoming)) + { + // flag cannot be set unless amendment is enabled but + // out of an abundance of caution check anyway + + if (sleDst->getFlags() & lsfDisallowIncomingNFTOffer) + return tecNO_PERMISSION; + } + } + + if (auto const owner = ctx.tx[~sfOwner]) + { + // Check if the owner (buy offer) has disallowed incoming offers + if (ctx.view.rules().enabled(featureDisallowIncoming)) + { + auto const sleOwner = ctx.view.read(keylet::account(*owner)); + + // defensively check + // it should not be possible to specify owner that doesn't exist + if (!sleOwner) + return tecNO_TARGET; + + if (sleOwner->getFlags() & lsfDisallowIncomingNFTOffer) + return tecNO_PERMISSION; + } + } return tesSUCCESS; } diff --git a/src/ripple/app/tx/impl/PayChan.cpp b/src/ripple/app/tx/impl/PayChan.cpp index aab3dcc5a6b..1667bddcdb1 100644 --- a/src/ripple/app/tx/impl/PayChan.cpp +++ b/src/ripple/app/tx/impl/PayChan.cpp @@ -217,14 +217,21 @@ PayChanCreate::preclaim(PreclaimContext const& ctx) auto const sled = ctx.view.read(keylet::account(dst)); if (!sled) return tecNO_DST; - if (((*sled)[sfFlags] & lsfRequireDestTag) && - !ctx.tx[~sfDestinationTag]) + + auto const flags = sled->getFlags(); + + // Check if they have disallowed incoming payment channels + if (ctx.view.rules().enabled(featureDisallowIncoming) && + (flags & lsfDisallowIncomingPayChan)) + return tecNO_PERMISSION; + + if ((flags & lsfRequireDestTag) && !ctx.tx[~sfDestinationTag]) return tecDST_TAG_NEEDED; // Obeying the lsfDisallowXRP flag was a bug. Piggyback on // featureDepositAuth to remove the bug. if (!ctx.view.rules().enabled(featureDepositAuth) && - ((*sled)[sfFlags] & lsfDisallowXRP)) + (flags & lsfDisallowXRP)) return tecNO_TARGET; } diff --git a/src/ripple/app/tx/impl/SetAccount.cpp b/src/ripple/app/tx/impl/SetAccount.cpp index 85fe290ca55..5c7d4369a76 100644 --- a/src/ripple/app/tx/impl/SetAccount.cpp +++ b/src/ripple/app/tx/impl/SetAccount.cpp @@ -538,6 +538,30 @@ SetAccount::doApply() sle->makeFieldAbsent(sfNFTokenMinter); } + // Set or clear flags for disallowing various incoming instruments + if (ctx_.view().rules().enabled(featureDisallowIncoming)) + { + if (uSetFlag == asfDisallowIncomingNFTOffer) + uFlagsOut |= lsfDisallowIncomingNFTOffer; + else if (uClearFlag == asfDisallowIncomingNFTOffer) + uFlagsOut &= ~lsfDisallowIncomingNFTOffer; + + if (uSetFlag == asfDisallowIncomingCheck) + uFlagsOut |= lsfDisallowIncomingCheck; + else if (uClearFlag == asfDisallowIncomingCheck) + uFlagsOut &= ~lsfDisallowIncomingCheck; + + if (uSetFlag == asfDisallowIncomingPayChan) + uFlagsOut |= lsfDisallowIncomingPayChan; + else if (uClearFlag == asfDisallowIncomingPayChan) + uFlagsOut &= ~lsfDisallowIncomingPayChan; + + if (uSetFlag == asfDisallowIncomingTrustline) + uFlagsOut |= lsfDisallowIncomingTrustline; + else if (uClearFlag == asfDisallowIncomingTrustline) + uFlagsOut &= ~lsfDisallowIncomingTrustline; + } + if (uFlagsIn != uFlagsOut) sle->setFieldU32(sfFlags, uFlagsOut); diff --git a/src/ripple/app/tx/impl/SetTrust.cpp b/src/ripple/app/tx/impl/SetTrust.cpp index 23af19c7b15..acbbedabf10 100644 --- a/src/ripple/app/tx/impl/SetTrust.cpp +++ b/src/ripple/app/tx/impl/SetTrust.cpp @@ -128,6 +128,20 @@ SetTrust::preclaim(PreclaimContext const& ctx) } } + // If the destination has opted to disallow incoming trustlines + // then honour that flag + if (ctx.view.rules().enabled(featureDisallowIncoming)) + { + auto const sleDst = ctx.view.read(keylet::account(uDstAccountID)); + + if (!sleDst) + return tecNO_DST; + + auto const dstFlags = sleDst->getFlags(); + if (dstFlags & lsfDisallowIncomingTrustline) + return tecNO_PERMISSION; + } + return tesSUCCESS; } diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index fac54c2fa71..d4e65a31af8 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 53; +static constexpr std::size_t numFeatures = 54; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -340,6 +340,7 @@ extern uint256 const featureNonFungibleTokensV1_1; extern uint256 const fixTrustLinesToSelf; extern uint256 const fixRemoveNFTokenAutoTrustLine; extern uint256 const featureImmediateOfferKilled; +extern uint256 const featureDisallowIncoming; } // namespace ripple diff --git a/src/ripple/protocol/LedgerFormats.h b/src/ripple/protocol/LedgerFormats.h index 2dd04b1264b..45258a3d02b 100644 --- a/src/ripple/protocol/LedgerFormats.h +++ b/src/ripple/protocol/LedgerFormats.h @@ -232,6 +232,17 @@ enum LedgerSpecificFlags { lsfDefaultRipple = 0x00800000, // True, trust lines allow rippling by default lsfDepositAuth = 0x01000000, // True, all deposits require authorization +/* // reserved for Hooks amendment + lsfTshCollect = 0x02000000, // True, allow TSH collect-calls to acc hooks +*/ + lsfDisallowIncomingNFTOffer = + 0x04000000, // True, reject new incoming NFT offers + lsfDisallowIncomingCheck = + 0x08000000, // True, reject new checks + lsfDisallowIncomingPayChan = + 0x10000000, // True, reject new paychans + lsfDisallowIncomingTrustline = + 0x20000000, // True, reject new trustlines (only if no issued assets) // ltOFFER lsfPassive = 0x00010000, diff --git a/src/ripple/protocol/TxFlags.h b/src/ripple/protocol/TxFlags.h index 0ad088c41d6..c4218219868 100644 --- a/src/ripple/protocol/TxFlags.h +++ b/src/ripple/protocol/TxFlags.h @@ -79,6 +79,13 @@ constexpr std::uint32_t asfGlobalFreeze = 7; constexpr std::uint32_t asfDefaultRipple = 8; constexpr std::uint32_t asfDepositAuth = 9; constexpr std::uint32_t asfAuthorizedNFTokenMinter = 10; +/* // reserved for Hooks amendment +constexpr std::uint32_t asfTshCollect = 11; +*/ +constexpr std::uint32_t asfDisallowIncomingNFTOffer = 12; +constexpr std::uint32_t asfDisallowIncomingCheck = 13; +constexpr std::uint32_t asfDisallowIncomingPayChan = 14; +constexpr std::uint32_t asfDisallowIncomingTrustline = 15; // OfferCreate flags: constexpr std::uint32_t tfPassive = 0x00010000; diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index fa0d167ef09..5903603f975 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -450,6 +450,7 @@ REGISTER_FEATURE(NonFungibleTokensV1_1, Supported::yes, DefaultVote::no) REGISTER_FIX (fixTrustLinesToSelf, Supported::yes, DefaultVote::no); REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, DefaultVote::yes); REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no); +REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/test/app/Check_test.cpp b/src/test/app/Check_test.cpp index 31a2e572e70..8f0c0ec46b8 100644 --- a/src/test/app/Check_test.cpp +++ b/src/test/app/Check_test.cpp @@ -85,6 +85,8 @@ class dest_tag class Check_test : public beast::unit_test::suite { + FeatureBitset const disallowIncoming{featureDisallowIncoming}; + static uint256 getCheckIndex(AccountID const& account, std::uint32_t uSequence) { @@ -293,6 +295,100 @@ class Check_test : public beast::unit_test::suite BEAST_EXPECT(checksOnAccount(env, bob).size() == bobCount + 7); } + void + testCreateDisallowIncoming(FeatureBitset features) + { + testcase("Create valid with disallow incoming"); + + using namespace test::jtx; + + // test flag doesn't set unless amendment enabled + { + Env env{*this, features - disallowIncoming}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env(fset(alice, asfDisallowIncomingCheck)); + env.close(); + auto const sle = env.le(alice); + uint32_t flags = sle->getFlags(); + BEAST_EXPECT(!(flags & lsfDisallowIncomingCheck)); + } + + Account const gw{"gateway"}; + Account const alice{"alice"}; + Account const bob{"bob"}; + IOU const USD{gw["USD"]}; + + Env env{*this, features | disallowIncoming}; + + STAmount const startBalance{XRP(1000).value()}; + env.fund(startBalance, gw, alice, bob); + + /* + * Attempt to create two checks from `from` to `to` and + * require they both result in error/success code `expected` + */ + auto writeTwoChecksDI = [&env, &USD, this]( + Account const& from, + Account const& to, + TER expected) { + std::uint32_t const fromOwnerCount{ownerCount(env, from)}; + std::uint32_t const toOwnerCount{ownerCount(env, to)}; + + std::size_t const fromCkCount{checksOnAccount(env, from).size()}; + std::size_t const toCkCount{checksOnAccount(env, to).size()}; + + env(check::create(from, to, XRP(2000)), ter(expected)); + env.close(); + + env(check::create(from, to, USD(50)), ter(expected)); + env.close(); + + if (expected == tesSUCCESS) + { + BEAST_EXPECT( + checksOnAccount(env, from).size() == fromCkCount + 2); + BEAST_EXPECT(checksOnAccount(env, to).size() == toCkCount + 2); + + env.require(owners(from, fromOwnerCount + 2)); + env.require( + owners(to, to == from ? fromOwnerCount + 2 : toOwnerCount)); + return; + } + + BEAST_EXPECT(checksOnAccount(env, from).size() == fromCkCount); + BEAST_EXPECT(checksOnAccount(env, to).size() == toCkCount); + + env.require(owners(from, fromOwnerCount)); + env.require(owners(to, to == from ? fromOwnerCount : toOwnerCount)); + }; + + // enable the DisallowIncoming flag on both bob and alice + env(fset(bob, asfDisallowIncomingCheck)); + env(fset(alice, asfDisallowIncomingCheck)); + env.close(); + + // both alice and bob can't receive checks + writeTwoChecksDI(alice, bob, tecNO_PERMISSION); + writeTwoChecksDI(gw, alice, tecNO_PERMISSION); + + // remove the flag from alice but not from bob + env(fclear(alice, asfDisallowIncomingCheck)); + env.close(); + + // now bob can send alice a cheque but not visa-versa + writeTwoChecksDI(bob, alice, tesSUCCESS); + writeTwoChecksDI(alice, bob, tecNO_PERMISSION); + + // remove bob's flag too + env(fclear(bob, asfDisallowIncomingCheck)); + env.close(); + + // now they can send checks freely + writeTwoChecksDI(bob, alice, tesSUCCESS); + writeTwoChecksDI(alice, bob, tesSUCCESS); + } + void testCreateInvalid(FeatureBitset features) { @@ -2602,6 +2698,7 @@ class Check_test : public beast::unit_test::suite { testEnabled(features); testCreateValid(features); + testCreateDisallowIncoming(features); testCreateInvalid(features); testCashXRP(features); testCashIOU(features); @@ -2621,6 +2718,7 @@ class Check_test : public beast::unit_test::suite using namespace test::jtx; auto const sa = supported_amendments(); testWithFeats(sa - featureCheckCashMakesTrustLine); + testWithFeats(sa - disallowIncoming); testWithFeats(sa); testTrustLineCreation(sa); // Test with featureCheckCashMakesTrustLine diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 2fb27f8a352..42a6eb4d3ce 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -29,6 +29,8 @@ namespace ripple { class NFToken_test : public beast::unit_test::suite { + FeatureBitset const disallowIncoming{featureDisallowIncoming}; + // Helper function that returns the owner count of an account root. static std::uint32_t ownerCount(test::jtx::Env const& env, test::jtx::Account const& acct) @@ -2975,6 +2977,135 @@ class NFToken_test : public beast::unit_test::suite } } + void + testCreateOfferDestinationDisallowIncoming(FeatureBitset features) + { + testcase("Create offer destination disallow incoming"); + + using namespace test::jtx; + + // test flag doesn't set unless amendment enabled + { + Env env{*this, features - disallowIncoming}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env(fset(alice, asfDisallowIncomingNFTOffer)); + env.close(); + auto const sle = env.le(alice); + uint32_t flags = sle->getFlags(); + BEAST_EXPECT(!(flags & lsfDisallowIncomingNFTOffer)); + } + + Env env{*this, features | disallowIncoming}; + + Account const issuer{"issuer"}; + Account const minter{"minter"}; + Account const buyer{"buyer"}; + Account const alice{"alice"}; + + env.fund(XRP(1000), issuer, minter, buyer, alice); + + env(token::setMinter(issuer, minter)); + env.close(); + + uint256 const nftokenID = + token::getNextID(env, issuer, 0, tfTransferable); + env(token::mint(minter, 0), + token::issuer(issuer), + txflags(tfTransferable)); + env.close(); + + // enable flag + env(fset(buyer, asfDisallowIncomingNFTOffer)); + env.close(); + + // a sell offer from the minter to the buyer should be rejected + { + env(token::createOffer(minter, nftokenID, drops(1)), + token::destination(buyer), + txflags(tfSellNFToken), + ter(tecNO_PERMISSION)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 0); + } + + // disable the flag + env(fclear(buyer, asfDisallowIncomingNFTOffer)); + env.close(); + + // create offer (allowed now) then cancel + { + uint256 const offerIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + + env(token::createOffer(minter, nftokenID, drops(1)), + token::destination(buyer), + txflags(tfSellNFToken)); + env.close(); + + env(token::cancelOffer(minter, {offerIndex})); + env.close(); + } + + // create offer, enable flag, then cancel + { + uint256 const offerIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + + env(token::createOffer(minter, nftokenID, drops(1)), + token::destination(buyer), + txflags(tfSellNFToken)); + env.close(); + + env(fset(buyer, asfDisallowIncomingNFTOffer)); + env.close(); + + env(token::cancelOffer(minter, {offerIndex})); + env.close(); + + env(fclear(buyer, asfDisallowIncomingNFTOffer)); + env.close(); + } + + // create offer then transfer + { + uint256 const offerIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + + env(token::createOffer(minter, nftokenID, drops(1)), + token::destination(buyer), + txflags(tfSellNFToken)); + env.close(); + + env(token::acceptSellOffer(buyer, offerIndex)); + env.close(); + } + + // buyer now owns the token + + // enable flag again + env(fset(buyer, asfDisallowIncomingNFTOffer)); + env.close(); + + // a random offer to buy the token + { + env(token::createOffer(alice, nftokenID, drops(1)), + token::owner(buyer), + ter(tecNO_PERMISSION)); + env.close(); + } + + // minter offer to buy the token + { + env(token::createOffer(minter, nftokenID, drops(1)), + token::owner(buyer), + ter(tecNO_PERMISSION)); + env.close(); + } + } + void testCreateOfferExpiration(FeatureBitset features) { @@ -4929,6 +5060,7 @@ class NFToken_test : public beast::unit_test::suite testMintTaxon(features); testMintURI(features); testCreateOfferDestination(features); + testCreateOfferDestinationDisallowIncoming(features); testCreateOfferExpiration(features); testCancelOffers(features); testCancelTooManyOffers(features); @@ -4949,6 +5081,7 @@ class NFToken_test : public beast::unit_test::suite FeatureBitset const fixNFTDir{fixNFTokenDirV1}; testWithFeats(all - fixNFTDir); + testWithFeats(all - disallowIncoming); testWithFeats(all); } }; diff --git a/src/test/app/PayChan_test.cpp b/src/test/app/PayChan_test.cpp index cf600a9fc87..2a8ea360e6c 100644 --- a/src/test/app/PayChan_test.cpp +++ b/src/test/app/PayChan_test.cpp @@ -32,6 +32,8 @@ namespace ripple { namespace test { struct PayChan_test : public beast::unit_test::suite { + FeatureBitset const disallowIncoming{featureDisallowIncoming}; + static uint256 channel( jtx::Account const& account, @@ -175,12 +177,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testSimple() + testSimple(FeatureBitset features) { testcase("simple"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); auto USDA = alice["USD"]; @@ -350,7 +352,91 @@ struct PayChan_test : public beast::unit_test::suite } void - testCancelAfter() + testDisallowIncoming(FeatureBitset features) + { + testcase("Disallow Incoming Flag"); + using namespace jtx; + + // test flag doesn't set unless amendment enabled + { + Env env{*this, features - disallowIncoming}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env(fset(alice, asfDisallowIncomingPayChan)); + env.close(); + auto const sle = env.le(alice); + uint32_t flags = sle->getFlags(); + BEAST_EXPECT(!(flags & lsfDisallowIncomingPayChan)); + } + + using namespace std::literals::chrono_literals; + Env env{*this, features | disallowIncoming}; + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const cho = Account("cho"); + env.fund(XRP(10000), alice, bob, cho); + auto const pk = alice.pk(); + auto const settleDelay = 100s; + + // set flag on bob only + env(fset(bob, asfDisallowIncomingPayChan)); + env.close(); + + // channel creation from alice to bob is disallowed + { + auto const chan = channel(alice, bob, env.seq(alice)); + env(create(alice, bob, XRP(1000), settleDelay, pk), + ter(tecNO_PERMISSION)); + BEAST_EXPECT(!channelExists(*env.current(), chan)); + } + + // set flag on alice also + env(fset(alice, asfDisallowIncomingPayChan)); + env.close(); + + // channel creation from bob to alice is now disallowed + { + auto const chan = channel(bob, alice, env.seq(bob)); + env(create(bob, alice, XRP(1000), settleDelay, pk), + ter(tecNO_PERMISSION)); + BEAST_EXPECT(!channelExists(*env.current(), chan)); + } + + // remove flag from bob + env(fclear(bob, asfDisallowIncomingPayChan)); + env.close(); + + // now the channel between alice and bob can exist + { + auto const chan = channel(alice, bob, env.seq(alice)); + env(create(alice, bob, XRP(1000), settleDelay, pk), + ter(tesSUCCESS)); + BEAST_EXPECT(channelExists(*env.current(), chan)); + } + + // a channel from cho to alice isn't allowed + { + auto const chan = channel(cho, alice, env.seq(cho)); + env(create(cho, alice, XRP(1000), settleDelay, pk), + ter(tecNO_PERMISSION)); + BEAST_EXPECT(!channelExists(*env.current(), chan)); + } + + // remove flag from alice + env(fclear(alice, asfDisallowIncomingPayChan)); + env.close(); + + // now a channel from cho to alice is allowed + { + auto const chan = channel(cho, alice, env.seq(cho)); + env(create(cho, alice, XRP(1000), settleDelay, pk), + ter(tesSUCCESS)); + BEAST_EXPECT(channelExists(*env.current(), chan)); + } + } + + void + testCancelAfter(FeatureBitset features) { testcase("cancel after"); using namespace jtx; @@ -360,7 +446,7 @@ struct PayChan_test : public beast::unit_test::suite auto const carol = Account("carol"); { // If dst claims after cancel after, channel closes - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice, bob); auto const pk = alice.pk(); auto const settleDelay = 100s; @@ -392,7 +478,7 @@ struct PayChan_test : public beast::unit_test::suite } { // Third party can close after cancel after - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice, bob, carol); auto const pk = alice.pk(); auto const settleDelay = 100s; @@ -415,12 +501,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testExpiration() + testExpiration(FeatureBitset features) { testcase("expiration"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); auto const carol = Account("carol"); @@ -481,12 +567,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testSettleDelay() + testSettleDelay(FeatureBitset features) { testcase("settle delay"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); env.fund(XRP(10000), alice, bob); @@ -541,12 +627,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testCloseDry() + testCloseDry(FeatureBitset features) { testcase("close dry"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); env.fund(XRP(10000), alice, bob); @@ -575,13 +661,13 @@ struct PayChan_test : public beast::unit_test::suite } void - testDefaultAmount() + testDefaultAmount(FeatureBitset features) { // auth amount defaults to balance if not present testcase("default amount"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); env.fund(XRP(10000), alice, bob); @@ -630,7 +716,7 @@ struct PayChan_test : public beast::unit_test::suite } void - testDisallowXRP() + testDisallowXRP(FeatureBitset features) { // auth amount defaults to balance if not present testcase("Disallow XRP"); @@ -641,7 +727,7 @@ struct PayChan_test : public beast::unit_test::suite auto const bob = Account("bob"); { // Create a channel where dst disallows XRP - Env env(*this, supported_amendments() - featureDepositAuth); + Env env(*this, features - featureDepositAuth); env.fund(XRP(10000), alice, bob); env(fset(bob, asfDisallowXRP)); auto const chan = channel(alice, bob, env.seq(alice)); @@ -652,7 +738,7 @@ struct PayChan_test : public beast::unit_test::suite { // Create a channel where dst disallows XRP. Ignore that flag, // since it's just advisory. - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice, bob); env(fset(bob, asfDisallowXRP)); auto const chan = channel(alice, bob, env.seq(alice)); @@ -663,7 +749,7 @@ struct PayChan_test : public beast::unit_test::suite { // Claim to a channel where dst disallows XRP // (channel is created before disallow xrp is set) - Env env(*this, supported_amendments() - featureDepositAuth); + Env env(*this, features - featureDepositAuth); env.fund(XRP(10000), alice, bob); auto const chan = channel(alice, bob, env.seq(alice)); env(create(alice, bob, XRP(1000), 3600s, alice.pk())); @@ -677,7 +763,7 @@ struct PayChan_test : public beast::unit_test::suite // Claim to a channel where dst disallows XRP (channel is // created before disallow xrp is set). Ignore that flag // since it is just advisory. - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice, bob); auto const chan = channel(alice, bob, env.seq(alice)); env(create(alice, bob, XRP(1000), 3600s, alice.pk())); @@ -690,14 +776,14 @@ struct PayChan_test : public beast::unit_test::suite } void - testDstTag() + testDstTag(FeatureBitset features) { // auth amount defaults to balance if not present testcase("Dst Tag"); using namespace jtx; using namespace std::literals::chrono_literals; // Create a channel where dst disallows XRP - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); env.fund(XRP(10000), alice, bob); @@ -720,7 +806,7 @@ struct PayChan_test : public beast::unit_test::suite } void - testDepositAuth() + testDepositAuth(FeatureBitset features) { testcase("Deposit Authorization"); using namespace jtx; @@ -731,7 +817,7 @@ struct PayChan_test : public beast::unit_test::suite auto const carol = Account("carol"); auto USDA = alice["USD"]; { - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice, bob, carol); env(fset(bob, asfDepositAuth)); @@ -844,13 +930,13 @@ struct PayChan_test : public beast::unit_test::suite } void - testMultiple() + testMultiple(FeatureBitset features) { // auth amount defaults to balance if not present testcase("Multiple channels to the same account"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); env.fund(XRP(10000), alice, bob); @@ -867,13 +953,13 @@ struct PayChan_test : public beast::unit_test::suite } void - testAccountChannelsRPC() + testAccountChannelsRPC(FeatureBitset features) { testcase("AccountChannels RPC"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); auto const charlie = Account("charlie", KeyType::ed25519); @@ -922,7 +1008,7 @@ struct PayChan_test : public beast::unit_test::suite } void - testAccountChannelsRPCMarkers() + testAccountChannelsRPCMarkers(FeatureBitset features) { testcase("Account channels RPC markers"); @@ -941,7 +1027,7 @@ struct PayChan_test : public beast::unit_test::suite return r; }(); - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice); for (auto const& a : bobs) { @@ -1038,7 +1124,7 @@ struct PayChan_test : public beast::unit_test::suite } void - testAccountChannelsRPCSenderOnly() + testAccountChannelsRPCSenderOnly(FeatureBitset features) { // Check that the account_channels command only returns channels owned // by the account @@ -1049,7 +1135,7 @@ struct PayChan_test : public beast::unit_test::suite auto const alice = Account("alice"); auto const bob = Account("bob"); - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice, bob); // Create a channel from alice to bob and from bob to alice @@ -1075,12 +1161,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testAuthVerifyRPC() + testAuthVerifyRPC(FeatureBitset features) { testcase("PayChan Auth/Verify RPC"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); auto const charlie = Account("charlie", KeyType::ed25519); @@ -1415,12 +1501,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testOptionalFields() + testOptionalFields(FeatureBitset features) { testcase("Optional Fields"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); auto const carol = Account("carol"); @@ -1466,12 +1552,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testMalformedPK() + testMalformedPK(FeatureBitset features) { testcase("malformed pk"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); auto USDA = alice["USD"]; @@ -1536,7 +1622,7 @@ struct PayChan_test : public beast::unit_test::suite } void - testMetaAndOwnership() + testMetaAndOwnership(FeatureBitset features) { testcase("Metadata & Ownership"); @@ -1565,8 +1651,7 @@ struct PayChan_test : public beast::unit_test::suite { // Test without adding the paychan to the recipient's owner // directory - Env env( - *this, supported_amendments() - fixPayChanRecipientOwnerDir); + Env env(*this, features - fixPayChanRecipientOwnerDir); env.fund(XRP(10000), alice, bob); env(create(alice, bob, XRP(1000), settleDelay, pk)); env.close(); @@ -1587,7 +1672,7 @@ struct PayChan_test : public beast::unit_test::suite { // Test with adding the paychan to the recipient's owner directory - Env env(*this); + Env env{*this, features}; env.fund(XRP(10000), alice, bob); env(create(alice, bob, XRP(1000), settleDelay, pk)); env.close(); @@ -1609,8 +1694,7 @@ struct PayChan_test : public beast::unit_test::suite { // Test removing paychans created before adding to the recipient's // owner directory - Env env( - *this, supported_amendments() - fixPayChanRecipientOwnerDir); + Env env(*this, features - fixPayChanRecipientOwnerDir); env.fund(XRP(10000), alice, bob); // create the channel before the amendment activates env(create(alice, bob, XRP(1000), settleDelay, pk)); @@ -1644,7 +1728,7 @@ struct PayChan_test : public beast::unit_test::suite } void - testAccountDelete() + testAccountDelete(FeatureBitset features) { testcase("Account Delete"); using namespace test::jtx; @@ -1678,8 +1762,8 @@ struct PayChan_test : public beast::unit_test::suite for (bool const withOwnerDirFix : {false, true}) { auto const amd = withOwnerDirFix - ? supported_amendments() - : supported_amendments() - fixPayChanRecipientOwnerDir; + ? features + : features - fixPayChanRecipientOwnerDir; Env env{*this, amd}; env.fund(XRP(10000), alice, bob, carol); env.close(); @@ -1771,8 +1855,7 @@ struct PayChan_test : public beast::unit_test::suite { // test resurrected account - Env env{ - *this, supported_amendments() - fixPayChanRecipientOwnerDir}; + Env env{*this, features - fixPayChanRecipientOwnerDir}; env.fund(XRP(10000), alice, bob, carol); env.close(); auto const feeDrops = env.current()->fees().base; @@ -1878,12 +1961,12 @@ struct PayChan_test : public beast::unit_test::suite } void - testUsingTickets() + testUsingTickets(FeatureBitset features) { testcase("using tickets"); using namespace jtx; using namespace std::literals::chrono_literals; - Env env(*this); + Env env{*this, features}; auto const alice = Account("alice"); auto const bob = Account("bob"); auto USDA = alice["USD"]; @@ -2039,28 +2122,39 @@ struct PayChan_test : public beast::unit_test::suite BEAST_EXPECT(env.seq(bob) == bobSeq); } + void + testWithFeats(FeatureBitset features) + { + testSimple(features); + testDisallowIncoming(features); + testCancelAfter(features); + testSettleDelay(features); + testExpiration(features); + testCloseDry(features); + testDefaultAmount(features); + testDisallowXRP(features); + testDstTag(features); + testDepositAuth(features); + testMultiple(features); + testAccountChannelsRPC(features); + testAccountChannelsRPCMarkers(features); + testAccountChannelsRPCSenderOnly(features); + testAuthVerifyRPC(features); + testOptionalFields(features); + testMalformedPK(features); + testMetaAndOwnership(features); + testAccountDelete(features); + testUsingTickets(features); + } + +public: void run() override { - testSimple(); - testCancelAfter(); - testSettleDelay(); - testExpiration(); - testCloseDry(); - testDefaultAmount(); - testDisallowXRP(); - testDstTag(); - testDepositAuth(); - testMultiple(); - testAccountChannelsRPC(); - testAccountChannelsRPCMarkers(); - testAccountChannelsRPCSenderOnly(); - testAuthVerifyRPC(); - testOptionalFields(); - testMalformedPK(); - testMetaAndOwnership(); - testAccountDelete(); - testUsingTickets(); + using namespace test::jtx; + FeatureBitset const all{supported_amendments()}; + testWithFeats(all - disallowIncoming); + testWithFeats(all); } }; diff --git a/src/test/app/SetTrust_test.cpp b/src/test/app/SetTrust_test.cpp index 45a9e5c767e..fce9c4295c2 100644 --- a/src/test/app/SetTrust_test.cpp +++ b/src/test/app/SetTrust_test.cpp @@ -26,9 +26,14 @@ namespace test { class SetTrust_test : public beast::unit_test::suite { + FeatureBitset const disallowIncoming{featureDisallowIncoming}; + public: void - testFreeTrustlines(bool thirdLineCreatesLE, bool createOnHighAcct) + testFreeTrustlines( + FeatureBitset features, + bool thirdLineCreatesLE, + bool createOnHighAcct) { if (thirdLineCreatesLE) testcase("Allow two free trustlines"); @@ -36,7 +41,7 @@ class SetTrust_test : public beast::unit_test::suite testcase("Dynamic reserve for trustline"); using namespace jtx; - Env env(*this); + Env env(*this, features); auto const gwA = Account{"gwA"}; auto const gwB = Account{"gwB"}; @@ -107,14 +112,14 @@ class SetTrust_test : public beast::unit_test::suite } void - testTicketSetTrust() + testTicketSetTrust(FeatureBitset features) { testcase("SetTrust using a ticket"); using namespace jtx; // Verify that TrustSet transactions can use tickets. - Env env{*this}; + Env env{*this, features}; auto const gw = Account{"gateway"}; auto const alice = Account{"alice"}; auto const USD = gw["USD"]; @@ -152,12 +157,12 @@ class SetTrust_test : public beast::unit_test::suite } void - testMalformedTransaction() + testMalformedTransaction(FeatureBitset features) { testcase("SetTrust checks for malformed transactions"); using namespace jtx; - Env env{*this}; + Env env{*this, features}; auto const gw = Account{"gateway"}; auto const alice = Account{"alice"}; @@ -199,14 +204,17 @@ class SetTrust_test : public beast::unit_test::suite } void - testModifyQualityOfTrustline(bool createQuality, bool createOnHighAcct) + testModifyQualityOfTrustline( + FeatureBitset features, + bool createQuality, + bool createOnHighAcct) { testcase << "SetTrust " << (createQuality ? "creates" : "removes") << " quality of trustline for " << (createOnHighAcct ? "high" : "low") << " account"; using namespace jtx; - Env env{*this}; + Env env{*this, features}; auto const alice = Account{"alice"}; auto const bob = Account{"bob"}; @@ -249,20 +257,119 @@ class SetTrust_test : public beast::unit_test::suite } void - run() override + testDisallowIncoming(FeatureBitset features) + { + testcase("Create trustline with disallow incoming"); + + using namespace test::jtx; + + // test flag doesn't set unless amendment enabled + { + Env env{*this, features - disallowIncoming}; + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env(fset(alice, asfDisallowIncomingTrustline)); + env.close(); + auto const sle = env.le(alice); + uint32_t flags = sle->getFlags(); + BEAST_EXPECT(!(flags & lsfDisallowIncomingTrustline)); + } + + Env env{*this, features | disallowIncoming}; + + auto const gw = Account{"gateway"}; + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + auto const USD = gw["USD"]; + + env.fund(XRP(10000), gw, alice, bob); + env.close(); + + // Set flag on gateway + env(fset(gw, asfDisallowIncomingTrustline)); + env.close(); + + // Create a trustline which will fail + env(trust(alice, USD(1000)), ter(tecNO_PERMISSION)); + env.close(); + + // Unset the flag + env(fclear(gw, asfDisallowIncomingTrustline)); + env.close(); + + // Create a trustline which will now succeed + env(trust(alice, USD(1000))); + env.close(); + + // Now the payment succeeds. + env(pay(gw, alice, USD(200))); + env.close(); + + // Set flag on gateway again + env(fset(gw, asfDisallowIncomingTrustline)); + env.close(); + + // Destroy the balance by sending it back + env(pay(gw, alice, USD(200))); + env.close(); + + // The trustline still exists in default state + // So a further payment should work + env(pay(gw, alice, USD(200))); + env.close(); + + // Also set the flag on bob + env(fset(bob, asfDisallowIncomingTrustline)); + env.close(); + + // But now bob can't open a trustline because he didn't already have one + env(trust(bob, USD(1000)), ter(tecNO_PERMISSION)); + env.close(); + + // The gateway also can't open this trustline because bob has the flag + // set + env(trust(gw, bob["USD"](1000)), ter(tecNO_PERMISSION)); + env.close(); + + // Unset the flag only on the gateway + env(fclear(gw, asfDisallowIncomingTrustline)); + env.close(); + + // Now bob can open a trustline + env(trust(bob, USD(1000))); + env.close(); + + // And the gateway can send bob a balance + env(pay(gw, bob, USD(200))); + env.close(); + } + + void + testWithFeats(FeatureBitset features) { - testFreeTrustlines(true, false); - testFreeTrustlines(false, true); - testFreeTrustlines(false, true); + testFreeTrustlines(features, true, false); + testFreeTrustlines(features, false, true); + testFreeTrustlines(features, false, true); // true, true case doesn't matter since creating a trustline ledger // entry requires reserve from the creator // independent of hi/low account ids for endpoints - testTicketSetTrust(); - testMalformedTransaction(); - testModifyQualityOfTrustline(false, false); - testModifyQualityOfTrustline(false, true); - testModifyQualityOfTrustline(true, false); - testModifyQualityOfTrustline(true, true); + testTicketSetTrust(features); + testMalformedTransaction(features); + testModifyQualityOfTrustline(features, false, false); + testModifyQualityOfTrustline(features, false, true); + testModifyQualityOfTrustline(features, true, false); + testModifyQualityOfTrustline(features, true, true); + testDisallowIncoming(features); + } + +public: + void + run() override + { + using namespace test::jtx; + auto const sa = supported_amendments(); + testWithFeats(sa - disallowIncoming); + testWithFeats(sa); } }; BEAST_DEFINE_TESTSUITE(SetTrust, app, ripple); diff --git a/src/test/rpc/AccountSet_test.cpp b/src/test/rpc/AccountSet_test.cpp index 8e1ec790b12..b3ca4c9f017 100644 --- a/src/test/rpc/AccountSet_test.cpp +++ b/src/test/rpc/AccountSet_test.cpp @@ -75,6 +75,7 @@ class AccountSet_test : public beast::unit_test::suite // elsewhere. continue; } + if (flag == asfAuthorizedNFTokenMinter) { // The asfAuthorizedNFTokenMinter flag requires the @@ -82,8 +83,18 @@ class AccountSet_test : public beast::unit_test::suite // the transaction. It is tested elsewhere. continue; } - else if ( - std::find(goodFlags.begin(), goodFlags.end(), flag) != + + if (flag == asfDisallowIncomingCheck || + flag == asfDisallowIncomingPayChan || + flag == asfDisallowIncomingNFTOffer || + flag == asfDisallowIncomingTrustline) + { + // These flags are part of the DisallowIncoming amendment + // and are tested elsewhere + continue; + } + + if (std::find(goodFlags.begin(), goodFlags.end(), flag) != goodFlags.end()) { // Good flag From fda9e9a7eedf1ab8e9bd2a9aa1ad97d6c69eafd1 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 21 Dec 2022 12:41:16 -0600 Subject: [PATCH 075/508] Fix Doxygen workflow (#4372) --- .github/workflows/doxygen.yml | 11 ++- .github/workflows/nix.yml | 2 +- Builds/CMake/RippledDocs.cmake | 156 ++++++++++++++++----------------- CMakeLists.txt | 7 +- 4 files changed, 93 insertions(+), 83 deletions(-) diff --git a/.github/workflows/doxygen.yml b/.github/workflows/doxygen.yml index 9a56185c52c..db98018753d 100644 --- a/.github/workflows/doxygen.yml +++ b/.github/workflows/doxygen.yml @@ -1,4 +1,5 @@ name: Build and publish Doxygen documentation +# To test this workflow, push your changes to your fork's `develop` branch. on: push: branches: @@ -11,12 +12,18 @@ jobs: image: docker://rippleci/rippled-ci-builder:2944b78d22db steps: - name: checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 + - name: check environment + run: | + echo ${PATH} | tr ':' '\n' + cmake --version + doxygen --version + env - name: build run: | mkdir build cd build - cmake -DBoost_NO_BOOST_CMAKE=ON .. + cmake -Donly_docs=TRUE .. cmake --build . --target docs --parallel $(nproc) - name: publish uses: peaceiris/actions-gh-pages@v3 diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index a37ea9f132a..34030b3429b 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -74,7 +74,7 @@ jobs: run: | mkdir ${build_dir} cd ${build_dir} - conan install .. --build missing --settings build_type=${{ matrix.configuration }} --profile:build default --profile:host default + conan install .. --build missing --settings build_type=${{ matrix.configuration }} - name: configure run: | cd ${build_dir} diff --git a/Builds/CMake/RippledDocs.cmake b/Builds/CMake/RippledDocs.cmake index 883fd616256..e7c42942a77 100644 --- a/Builds/CMake/RippledDocs.cmake +++ b/Builds/CMake/RippledDocs.cmake @@ -2,85 +2,83 @@ docs target (optional) #]===================================================================] -# Early return if the `docs` directory is missing, -# e.g. when we are building a Conan package. -if(NOT EXISTS docs) +option(with_docs "Include the docs target?" FALSE) + +if(NOT (with_docs OR only_docs)) return() endif() -if (tests) - find_package (Doxygen) - if (NOT TARGET Doxygen::doxygen) - message (STATUS "doxygen executable not found -- skipping docs target") - return () - endif () - - set (doxygen_output_directory "${CMAKE_BINARY_DIR}/docs") - set (doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src") - set (doxygen_index_file "${doxygen_output_directory}/html/index.html") - set (doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile") - - file (GLOB_RECURSE doxygen_input - docs/*.md - src/ripple/*.h - src/ripple/*.cpp - src/ripple/*.md - src/test/*.h - src/test/*.md - Builds/*/README.md) - list (APPEND doxygen_input - README.md - RELEASENOTES.md - src/README.md) - set (dependencies "${doxygen_input}" "${doxyfile}") - - function (verbose_find_path variable name) - # find_path sets a CACHE variable, so don't try using a "local" variable. - find_path (${variable} "${name}" ${ARGN}) - if (NOT ${variable}) - message (NOTICE "could not find ${name}") - else () - message (STATUS "found ${name}: ${${variable}}/${name}") - endif () - endfunction () - - verbose_find_path (doxygen_plantuml_jar_path plantuml.jar PATH_SUFFIXES share/plantuml) - verbose_find_path (doxygen_dot_path dot) - - # https://en.cppreference.com/w/Cppreference:Archives - # https://stackoverflow.com/questions/60822559/how-to-move-a-file-download-from-configure-step-to-build-step - set (download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake") - file (WRITE - "${download_script}" - "file (DOWNLOAD \ - http://upload.cppreference.com/mwiki/images/b/b2/html_book_20190607.zip \ - ${CMAKE_BINARY_DIR}/docs/cppreference.zip \ - EXPECTED_HASH MD5=82b3a612d7d35a83e3cb1195a63689ab \ - )\n \ - execute_process ( \ - COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \ - )\n" - ) - set (tagfile "${CMAKE_BINARY_DIR}/docs/cppreference-doxygen-web.tag.xml") - add_custom_command ( - OUTPUT "${tagfile}" - COMMAND "${CMAKE_COMMAND}" -P "${download_script}" - WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/docs" - ) - set (doxygen_tagfiles "${tagfile}=http://en.cppreference.com/w/") - - add_custom_command ( - OUTPUT "${doxygen_index_file}" - COMMAND "${CMAKE_COMMAND}" -E env - "DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}" - "DOXYGEN_INCLUDE_PATH=${doxygen_include_path}" - "DOXYGEN_TAGFILES=${doxygen_tagfiles}" - "DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}" - "DOXYGEN_DOT_PATH=${doxygen_dot_path}" - "${DOXYGEN_EXECUTABLE}" "${doxyfile}" - WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" - DEPENDS "${dependencies}" "${tagfile}") - add_custom_target (docs - DEPENDS "${doxygen_index_file}" - SOURCES "${dependencies}") -endif () +find_package(Doxygen) +if(NOT TARGET Doxygen::doxygen) + message(STATUS "doxygen executable not found -- skipping docs target") + return() +endif() + +set(doxygen_output_directory "${CMAKE_BINARY_DIR}/docs") +set(doxygen_include_path "${CMAKE_CURRENT_SOURCE_DIR}/src") +set(doxygen_index_file "${doxygen_output_directory}/html/index.html") +set(doxyfile "${CMAKE_CURRENT_SOURCE_DIR}/docs/Doxyfile") + +file(GLOB_RECURSE doxygen_input + docs/*.md + src/ripple/*.h + src/ripple/*.cpp + src/ripple/*.md + src/test/*.h + src/test/*.md + Builds/*/README.md) +list(APPEND doxygen_input + README.md + RELEASENOTES.md + src/README.md) +set(dependencies "${doxygen_input}" "${doxyfile}") + +function(verbose_find_path variable name) + # find_path sets a CACHE variable, so don't try using a "local" variable. + find_path(${variable} "${name}" ${ARGN}) + if(NOT ${variable}) + message(NOTICE "could not find ${name}") + else() + message(STATUS "found ${name}: ${${variable}}/${name}") + endif() +endfunction() + +verbose_find_path(doxygen_plantuml_jar_path plantuml.jar PATH_SUFFIXES share/plantuml) +verbose_find_path(doxygen_dot_path dot) + +# https://en.cppreference.com/w/Cppreference:Archives +# https://stackoverflow.com/questions/60822559/how-to-move-a-file-download-from-configure-step-to-build-step +set(download_script "${CMAKE_BINARY_DIR}/docs/download-cppreference.cmake") +file(WRITE + "${download_script}" + "file(DOWNLOAD \ + http://upload.cppreference.com/mwiki/images/b/b2/html_book_20190607.zip \ + ${CMAKE_BINARY_DIR}/docs/cppreference.zip \ + EXPECTED_HASH MD5=82b3a612d7d35a83e3cb1195a63689ab \ + )\n \ + execute_process( \ + COMMAND \"${CMAKE_COMMAND}\" -E tar -xf cppreference.zip \ + )\n" +) +set(tagfile "${CMAKE_BINARY_DIR}/docs/cppreference-doxygen-web.tag.xml") +add_custom_command( + OUTPUT "${tagfile}" + COMMAND "${CMAKE_COMMAND}" -P "${download_script}" + WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/docs" +) +set(doxygen_tagfiles "${tagfile}=http://en.cppreference.com/w/") + +add_custom_command( + OUTPUT "${doxygen_index_file}" + COMMAND "${CMAKE_COMMAND}" -E env + "DOXYGEN_OUTPUT_DIRECTORY=${doxygen_output_directory}" + "DOXYGEN_INCLUDE_PATH=${doxygen_include_path}" + "DOXYGEN_TAGFILES=${doxygen_tagfiles}" + "DOXYGEN_PLANTUML_JAR_PATH=${doxygen_plantuml_jar_path}" + "DOXYGEN_DOT_PATH=${doxygen_dot_path}" + "${DOXYGEN_EXECUTABLE}" "${doxyfile}" + WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" + DEPENDS "${dependencies}" "${tagfile}") +add_custom_target(docs + DEPENDS "${doxygen_index_file}" + SOURCES "${dependencies}") diff --git a/CMakeLists.txt b/CMakeLists.txt index a3696e89d04..da2ea1a5307 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -58,6 +58,12 @@ endif () include(RippledCompiler) include(RippledInterface) +option(only_docs "Include only the docs target?" FALSE) +include(RippledDocs) +if(only_docs) + return() +endif() + ### include(deps/Boost) @@ -119,5 +125,4 @@ include(RippledCore) include(RippledInstall) include(RippledCov) include(RippledMultiConfig) -include(RippledDocs) include(RippledValidatorKeys) From 6f8750316c3adb67c3bfba1dc2bb4665bd42c37b Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Tue, 3 Jan 2023 07:24:45 -1000 Subject: [PATCH 076/508] RPC tooBusy response has 503 HTTP status if "ripplerpc": "3.0": (#4143) Fixes #4005 Makes it possible for internal RPC Error Codes to associate themselves with a non-OK (200) HTTP status code. There are quite a number of RPC responses in addition to tooBusy that now have non-OK HTTP status codes. The new return HTTP return codes are only enabled by including "ripplerpc": "3.0" or higher in the original request. Otherwise the historical value, 200, continues to be returned. This ensures that this is not a breaking change. --- src/ripple/net/impl/RPCCall.cpp | 11 +- src/ripple/protocol/ErrorCodes.h | 26 +++- src/ripple/protocol/impl/ErrorCodes.cpp | 180 +++++++++++------------ src/ripple/rpc/impl/ServerHandlerImp.cpp | 26 +++- src/ripple/server/impl/JSONRPCUtil.cpp | 17 ++- src/test/rpc/LedgerRPC_test.cpp | 2 +- 6 files changed, 154 insertions(+), 108 deletions(-) diff --git a/src/ripple/net/impl/RPCCall.cpp b/src/ripple/net/impl/RPCCall.cpp index eb4906f3af7..b475afe9dfb 100644 --- a/src/ripple/net/impl/RPCCall.cpp +++ b/src/ripple/net/impl/RPCCall.cpp @@ -1396,16 +1396,7 @@ struct RPCCallImp // callbackFuncP. // Receive reply - if (iStatus == 401) - Throw( - "incorrect rpcuser or rpcpassword (authorization failed)"); - else if ( - (iStatus >= 400) && (iStatus != 400) && (iStatus != 404) && - (iStatus != 500)) // ? - Throw( - std::string("server returned HTTP error ") + - std::to_string(iStatus)); - else if (strData.empty()) + if (strData.empty()) Throw("no response from server"); // Parse reply diff --git a/src/ripple/protocol/ErrorCodes.h b/src/ripple/protocol/ErrorCodes.h index 98a8cf43a39..ee33eee0604 100644 --- a/src/ripple/protocol/ErrorCodes.h +++ b/src/ripple/protocol/ErrorCodes.h @@ -163,12 +163,15 @@ enum warning_code_i { namespace RPC { -/** Maps an rpc error code to its token and default message. */ +/** Maps an rpc error code to its token, default message, and HTTP status. */ struct ErrorInfo { // Default ctor needed to produce an empty std::array during constexpr eval. constexpr ErrorInfo() - : code(rpcUNKNOWN), token("unknown"), message("An unknown error code.") + : code(rpcUNKNOWN) + , token("unknown") + , message("An unknown error code.") + , http_status(200) { } @@ -176,13 +179,26 @@ struct ErrorInfo error_code_i code_, char const* token_, char const* message_) - : code(code_), token(token_), message(message_) + : code(code_), token(token_), message(message_), http_status(200) + { + } + + constexpr ErrorInfo( + error_code_i code_, + char const* token_, + char const* message_, + int http_status_) + : code(code_) + , token(token_) + , message(message_) + , http_status(http_status_) { } error_code_i code; Json::StaticString token; Json::StaticString message; + int http_status; }; /** Returns an ErrorInfo that reflects the error code. */ @@ -332,6 +348,10 @@ not_validator_error() bool contains_error(Json::Value const& json); +/** Returns http status that corresponds to the error code. */ +int +error_code_http_status(error_code_i code); + } // namespace RPC /** Returns a single string with the contents of an RPC error. */ diff --git a/src/ripple/protocol/impl/ErrorCodes.cpp b/src/ripple/protocol/impl/ErrorCodes.cpp index e4a9acf4677..bb3b2d47a89 100644 --- a/src/ripple/protocol/impl/ErrorCodes.cpp +++ b/src/ripple/protocol/impl/ErrorCodes.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include @@ -26,105 +27,96 @@ namespace RPC { namespace detail { -// clang-format off // Unordered array of ErrorInfos, so we don't have to maintain the list // ordering by hand. // // This array will be omitted from the object file; only the sorted version // will remain in the object file. But the string literals will remain. -constexpr static ErrorInfo unorderedErrorInfos[]{ - {rpcACT_MALFORMED, "actMalformed", "Account malformed."}, - {rpcACT_NOT_FOUND, "actNotFound", "Account not found."}, - {rpcALREADY_MULTISIG, "alreadyMultisig", "Already multisigned."}, - {rpcALREADY_SINGLE_SIG, "alreadySingleSig", "Already single-signed."}, - {rpcAMENDMENT_BLOCKED, "amendmentBlocked", "Amendment blocked, need upgrade."}, - {rpcEXPIRED_VALIDATOR_LIST, "unlBlocked", "Validator list expired."}, - {rpcATX_DEPRECATED, "deprecated", "Use the new API or specify a ledger range."}, - {rpcBAD_KEY_TYPE, "badKeyType", "Bad key type."}, - {rpcBAD_FEATURE, "badFeature", "Feature unknown or invalid."}, - {rpcBAD_ISSUER, "badIssuer", "Issuer account malformed."}, - {rpcBAD_MARKET, "badMarket", "No such market."}, - {rpcBAD_SECRET, "badSecret", "Secret does not match account."}, - {rpcBAD_SEED, "badSeed", "Disallowed seed."}, - {rpcBAD_SYNTAX, "badSyntax", "Syntax error."}, - {rpcCHANNEL_MALFORMED, "channelMalformed", "Payment channel is malformed."}, - {rpcCHANNEL_AMT_MALFORMED, "channelAmtMalformed", "Payment channel amount is malformed."}, - {rpcCOMMAND_MISSING, "commandMissing", "Missing command entry."}, - {rpcDB_DESERIALIZATION, "dbDeserialization", "Database deserialization error."}, - {rpcDST_ACT_MALFORMED, "dstActMalformed", "Destination account is malformed."}, - {rpcDST_ACT_MISSING, "dstActMissing", "Destination account not provided."}, - {rpcDST_ACT_NOT_FOUND, "dstActNotFound", "Destination account not found."}, - {rpcDST_AMT_MALFORMED, "dstAmtMalformed", "Destination amount/currency/issuer is malformed."}, - {rpcDST_AMT_MISSING, "dstAmtMissing", "Destination amount/currency/issuer is missing."}, - {rpcDST_ISR_MALFORMED, "dstIsrMalformed", "Destination issuer is malformed."}, - {rpcEXCESSIVE_LGR_RANGE, "excessiveLgrRange", "Ledger range exceeds 1000."}, - {rpcFORBIDDEN, "forbidden", "Bad credentials."}, - {rpcFAILED_TO_FORWARD, "failedToForward", "Failed to forward request to p2p node"}, - {rpcHIGH_FEE, "highFee", "Current transaction fee exceeds your limit."}, - {rpcINTERNAL, "internal", "Internal error."}, - {rpcINVALID_LGR_RANGE, "invalidLgrRange", "Ledger range is invalid."}, - {rpcINVALID_PARAMS, "invalidParams", "Invalid parameters."}, - {rpcJSON_RPC, "json_rpc", "JSON-RPC transport error."}, - {rpcLGR_IDXS_INVALID, "lgrIdxsInvalid", "Ledger indexes invalid."}, - {rpcLGR_IDX_MALFORMED, "lgrIdxMalformed", "Ledger index malformed."}, - {rpcLGR_NOT_FOUND, "lgrNotFound", "Ledger not found."}, - {rpcLGR_NOT_VALIDATED, "lgrNotValidated", "Ledger not validated."}, - {rpcMASTER_DISABLED, "masterDisabled", "Master key is disabled."}, - {rpcNOT_ENABLED, "notEnabled", "Not enabled in configuration."}, - {rpcNOT_IMPL, "notImpl", "Not implemented."}, - {rpcNOT_READY, "notReady", "Not ready to handle this request."}, - {rpcNOT_SUPPORTED, "notSupported", "Operation not supported."}, - {rpcNO_CLOSED, "noClosed", "Closed ledger is unavailable."}, - {rpcNO_CURRENT, "noCurrent", "Current ledger is unavailable."}, - {rpcNOT_SYNCED, "notSynced", "Not synced to the network."}, - {rpcNO_EVENTS, "noEvents", "Current transport does not support events."}, - {rpcNO_NETWORK, "noNetwork", "Not synced to the network."}, - {rpcNO_PERMISSION, "noPermission", "You don't have permission for this command."}, - {rpcNO_PF_REQUEST, "noPathRequest", "No pathfinding request in progress."}, - {rpcPUBLIC_MALFORMED, "publicMalformed", "Public key is malformed."}, - {rpcREPORTING_UNSUPPORTED, "reportingUnsupported", "Requested operation not supported by reporting mode server"}, - {rpcSIGNING_MALFORMED, "signingMalformed", "Signing of transaction is malformed."}, - {rpcSLOW_DOWN, "slowDown", "You are placing too much load on the server."}, - {rpcSRC_ACT_MALFORMED, "srcActMalformed", "Source account is malformed."}, - {rpcSRC_ACT_MISSING, "srcActMissing", "Source account not provided."}, - {rpcSRC_ACT_NOT_FOUND, "srcActNotFound", "Source account not found."}, - {rpcSRC_CUR_MALFORMED, "srcCurMalformed", "Source currency is malformed."}, - {rpcSRC_ISR_MALFORMED, "srcIsrMalformed", "Source issuer is malformed."}, - {rpcSTREAM_MALFORMED, "malformedStream", "Stream malformed."}, - {rpcTOO_BUSY, "tooBusy", "The server is too busy to help you now."}, - {rpcTXN_NOT_FOUND, "txnNotFound", "Transaction not found."}, - {rpcUNKNOWN_COMMAND, "unknownCmd", "Unknown method."}, - {rpcSENDMAX_MALFORMED, "sendMaxMalformed", "SendMax amount malformed."}, - {rpcOBJECT_NOT_FOUND, "objectNotFound", "The requested object was not found."}}; -// clang-format on - -// C++ does not allow you to return an array from a function. You must -// return an object which may in turn contain an array. The following -// struct is simply defined so the enclosed array can be returned from a -// constexpr function. // -// In C++17 this struct can be replaced by a std::array. But in C++14 -// the constexpr methods of a std::array are not sufficient to perform the -// necessary work at compile time. -template -struct ErrorInfoArray -{ - // Visual Studio doesn't treat a templated aggregate as an aggregate. - // So, for Visual Studio, we define a constexpr default constructor. - constexpr ErrorInfoArray() : infos{} - { - } +// There's a certain amount of tension in determining the correct HTTP +// status to associate with a given RPC error. Initially all RPC errors +// returned 200 (OK). And that's the default behavior if no HTTP status code +// is specified below. +// +// The codes currently selected target the load balancer fail-over use case. +// If a query fails on one node but is likely to have a positive outcome +// on a different node, then the failure should return a 4xx/5xx range +// status code. - ErrorInfo infos[N]; -}; +// clang-format off +constexpr static ErrorInfo unorderedErrorInfos[]{ + {rpcACT_MALFORMED, "actMalformed", "Account malformed."}, + {rpcACT_NOT_FOUND, "actNotFound", "Account not found."}, + {rpcALREADY_MULTISIG, "alreadyMultisig", "Already multisigned."}, + {rpcALREADY_SINGLE_SIG, "alreadySingleSig", "Already single-signed."}, + {rpcAMENDMENT_BLOCKED, "amendmentBlocked", "Amendment blocked, need upgrade.", 503}, + {rpcEXPIRED_VALIDATOR_LIST, "unlBlocked", "Validator list expired.", 503}, + {rpcATX_DEPRECATED, "deprecated", "Use the new API or specify a ledger range.", 400}, + {rpcBAD_KEY_TYPE, "badKeyType", "Bad key type.", 400}, + {rpcBAD_FEATURE, "badFeature", "Feature unknown or invalid.", 500}, + {rpcBAD_ISSUER, "badIssuer", "Issuer account malformed.", 400}, + {rpcBAD_MARKET, "badMarket", "No such market.", 404}, + {rpcBAD_SECRET, "badSecret", "Secret does not match account.", 403}, + {rpcBAD_SEED, "badSeed", "Disallowed seed.", 403}, + {rpcBAD_SYNTAX, "badSyntax", "Syntax error.", 400}, + {rpcCHANNEL_MALFORMED, "channelMalformed", "Payment channel is malformed.", 400}, + {rpcCHANNEL_AMT_MALFORMED, "channelAmtMalformed", "Payment channel amount is malformed.", 400}, + {rpcCOMMAND_MISSING, "commandMissing", "Missing command entry.", 400}, + {rpcDB_DESERIALIZATION, "dbDeserialization", "Database deserialization error.", 502}, + {rpcDST_ACT_MALFORMED, "dstActMalformed", "Destination account is malformed.", 400}, + {rpcDST_ACT_MISSING, "dstActMissing", "Destination account not provided.", 400}, + {rpcDST_ACT_NOT_FOUND, "dstActNotFound", "Destination account not found.", 404}, + {rpcDST_AMT_MALFORMED, "dstAmtMalformed", "Destination amount/currency/issuer is malformed.", 400}, + {rpcDST_AMT_MISSING, "dstAmtMissing", "Destination amount/currency/issuer is missing.", 400}, + {rpcDST_ISR_MALFORMED, "dstIsrMalformed", "Destination issuer is malformed.", 400}, + {rpcEXCESSIVE_LGR_RANGE, "excessiveLgrRange", "Ledger range exceeds 1000.", 400}, + {rpcFORBIDDEN, "forbidden", "Bad credentials.", 403}, + {rpcFAILED_TO_FORWARD, "failedToForward", "Failed to forward request to p2p node", 503}, + {rpcHIGH_FEE, "highFee", "Current transaction fee exceeds your limit.", 402}, + {rpcINTERNAL, "internal", "Internal error.", 500}, + {rpcINVALID_LGR_RANGE, "invalidLgrRange", "Ledger range is invalid.", 400}, + {rpcINVALID_PARAMS, "invalidParams", "Invalid parameters.", 400}, + {rpcJSON_RPC, "json_rpc", "JSON-RPC transport error.", 500}, + {rpcLGR_IDXS_INVALID, "lgrIdxsInvalid", "Ledger indexes invalid.", 400}, + {rpcLGR_IDX_MALFORMED, "lgrIdxMalformed", "Ledger index malformed.", 400}, + {rpcLGR_NOT_FOUND, "lgrNotFound", "Ledger not found.", 404}, + {rpcLGR_NOT_VALIDATED, "lgrNotValidated", "Ledger not validated.", 202}, + {rpcMASTER_DISABLED, "masterDisabled", "Master key is disabled.", 403}, + {rpcNOT_ENABLED, "notEnabled", "Not enabled in configuration.", 501}, + {rpcNOT_IMPL, "notImpl", "Not implemented.", 501}, + {rpcNOT_READY, "notReady", "Not ready to handle this request.", 503}, + {rpcNOT_SUPPORTED, "notSupported", "Operation not supported.", 501}, + {rpcNO_CLOSED, "noClosed", "Closed ledger is unavailable.", 503}, + {rpcNO_CURRENT, "noCurrent", "Current ledger is unavailable.", 503}, + {rpcNOT_SYNCED, "notSynced", "Not synced to the network.", 503}, + {rpcNO_EVENTS, "noEvents", "Current transport does not support events.", 405}, + {rpcNO_NETWORK, "noNetwork", "Not synced to the network.", 503}, + {rpcNO_PERMISSION, "noPermission", "You don't have permission for this command.", 401}, + {rpcNO_PF_REQUEST, "noPathRequest", "No pathfinding request in progress.", 404}, + {rpcOBJECT_NOT_FOUND, "objectNotFound", "The requested object was not found.", 404}, + {rpcPUBLIC_MALFORMED, "publicMalformed", "Public key is malformed.", 400}, + {rpcREPORTING_UNSUPPORTED, "reportingUnsupported", "Requested operation not supported by reporting mode server", 405}, + {rpcSENDMAX_MALFORMED, "sendMaxMalformed", "SendMax amount malformed.", 400}, + {rpcSIGNING_MALFORMED, "signingMalformed", "Signing of transaction is malformed.", 400}, + {rpcSLOW_DOWN, "slowDown", "You are placing too much load on the server.", 429}, + {rpcSRC_ACT_MALFORMED, "srcActMalformed", "Source account is malformed.", 400}, + {rpcSRC_ACT_MISSING, "srcActMissing", "Source account not provided.", 400}, + {rpcSRC_ACT_NOT_FOUND, "srcActNotFound", "Source account not found.", 404}, + {rpcSRC_CUR_MALFORMED, "srcCurMalformed", "Source currency is malformed.", 400}, + {rpcSRC_ISR_MALFORMED, "srcIsrMalformed", "Source issuer is malformed.", 400}, + {rpcSTREAM_MALFORMED, "malformedStream", "Stream malformed.", 400}, + {rpcTOO_BUSY, "tooBusy", "The server is too busy to help you now.", 503}, + {rpcTXN_NOT_FOUND, "txnNotFound", "Transaction not found.", 404}, + {rpcUNKNOWN_COMMAND, "unknownCmd", "Unknown method.", 405}}; +// clang-format on // Sort and validate unorderedErrorInfos at compile time. Should be // converted to consteval when get to C++20. template constexpr auto -sortErrorInfos(ErrorInfo const (&unordered)[N]) -> ErrorInfoArray +sortErrorInfos(ErrorInfo const (&unordered)[N]) -> std::array { - ErrorInfoArray ret; + std::array ret = {}; for (ErrorInfo const& info : unordered) { @@ -135,12 +127,10 @@ sortErrorInfos(ErrorInfo const (&unordered)[N]) -> ErrorInfoArray static_assert(rpcSUCCESS == 0, "Unexpected error_code_i layout."); int const index{info.code - 1}; - if (ret.infos[index].code != rpcUNKNOWN) + if (ret[index].code != rpcUNKNOWN) throw(std::invalid_argument("Duplicate error_code_i in list")); - ret.infos[index].code = info.code; - ret.infos[index].token = info.token; - ret.infos[index].message = info.message; + ret[index] = info; } // Verify that all entries are filled in starting with 1 and proceeding @@ -150,7 +140,7 @@ sortErrorInfos(ErrorInfo const (&unordered)[N]) -> ErrorInfoArray // rpcUNKNOWN. But other than that all entries should match their index. int codeCount{0}; int expect{rpcBAD_SYNTAX - 1}; - for (ErrorInfo const& info : ret.infos) + for (ErrorInfo const& info : ret) { ++expect; if (info.code == rpcUNKNOWN) @@ -181,7 +171,7 @@ get_error_info(error_code_i code) { if (code <= rpcSUCCESS || code > rpcLAST) return detail::unknownError; - return detail::sortedErrorInfos.infos[code - 1]; + return detail::sortedErrorInfos[code - 1]; } Json::Value @@ -208,6 +198,12 @@ contains_error(Json::Value const& json) return false; } +int +error_code_http_status(error_code_i code) +{ + return get_error_info(code).http_status; +} + } // namespace RPC std::string diff --git a/src/ripple/rpc/impl/ServerHandlerImp.cpp b/src/ripple/rpc/impl/ServerHandlerImp.cpp index cb70fdcab4f..f269283b83a 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.cpp +++ b/src/ripple/rpc/impl/ServerHandlerImp.cpp @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -970,6 +971,29 @@ ServerHandlerImp::processRequest( } } } + + // If we're returning an error_code, use that to determine the HTTP status. + int const httpStatus = [&reply]() { + // This feature is enabled with ripplerpc version 3.0 and above. + // Before ripplerpc version 3.0 always return 200. + if (reply.isMember(jss::ripplerpc) && + reply[jss::ripplerpc].isString() && + reply[jss::ripplerpc].asString() >= "3.0") + { + // If there's an error_code, use that to determine the HTTP Status. + if (reply.isMember(jss::error) && + reply[jss::error].isMember(jss::error_code) && + reply[jss::error][jss::error_code].isInt()) + { + int const errCode = reply[jss::error][jss::error_code].asInt(); + return RPC::error_code_http_status( + static_cast(errCode)); + } + } + // Return OK. + return 200; + }(); + auto response = to_string(reply); rpc_time_.notify(std::chrono::duration_cast( @@ -988,7 +1012,7 @@ ServerHandlerImp::processRequest( stream << "Reply: " << response.substr(0, maxSize); } - HTTPReply(200, response, output, rpcJ); + HTTPReply(httpStatus, response, output, rpcJ); } //------------------------------------------------------------------------------ diff --git a/src/ripple/server/impl/JSONRPCUtil.cpp b/src/ripple/server/impl/JSONRPCUtil.cpp index f5bb815a959..12d12829ca9 100644 --- a/src/ripple/server/impl/JSONRPCUtil.cpp +++ b/src/ripple/server/impl/JSONRPCUtil.cpp @@ -61,7 +61,7 @@ HTTPReply( { JLOG(j.trace()) << "HTTP Reply " << nStatus << " " << content; - if (nStatus == 401) + if (content.empty() && nStatus == 401) { output("HTTP/1.0 401 Authorization Required\r\n"); output(getHTTPHeaderTimestamp()); @@ -100,18 +100,33 @@ HTTPReply( case 200: output("HTTP/1.1 200 OK\r\n"); break; + case 202: + output("HTTP/1.1 202 Accepted\r\n"); + break; case 400: output("HTTP/1.1 400 Bad Request\r\n"); break; + case 401: + output("HTTP/1.1 401 Authorization Required\r\n"); + break; case 403: output("HTTP/1.1 403 Forbidden\r\n"); break; case 404: output("HTTP/1.1 404 Not Found\r\n"); break; + case 405: + output("HTTP/1.1 405 Method Not Allowed\r\n"); + break; + case 429: + output("HTTP/1.1 429 Too Many Requests\r\n"); + break; case 500: output("HTTP/1.1 500 Internal Server Error\r\n"); break; + case 501: + output("HTTP/1.1 501 Not Implemented\r\n"); + break; case 503: output("HTTP/1.1 503 Server is overloaded\r\n"); break; diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 2580c4bfef1..5494a81da63 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -1675,7 +1675,7 @@ class LedgerRPC_test : public beast::unit_test::suite void testLedgerAccountsOption() { - testcase("Ledger Request, Accounts Option"); + testcase("Ledger Request, Accounts Hashes"); using namespace test::jtx; Env env{*this}; From 61d8c7a85b752c926bea9b5645f34689399f4ae1 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Wed, 4 Jan 2023 15:45:19 -0800 Subject: [PATCH 077/508] Add a unit test for invalid memos (#4287) --- Builds/CMake/RippledCore.cmake | 1 + src/test/protocol/Memo_test.cpp | 123 ++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 src/test/protocol/Memo_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 710ebb00aab..4bad3a87b4a 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -909,6 +909,7 @@ if (tests) src/test/protocol/InnerObjectFormats_test.cpp src/test/protocol/Issue_test.cpp src/test/protocol/Hooks_test.cpp + src/test/protocol/Memo_test.cpp src/test/protocol/PublicKey_test.cpp src/test/protocol/Quality_test.cpp src/test/protocol/STAccount_test.cpp diff --git a/src/test/protocol/Memo_test.cpp b/src/test/protocol/Memo_test.cpp new file mode 100644 index 00000000000..b39482e42d0 --- /dev/null +++ b/src/test/protocol/Memo_test.cpp @@ -0,0 +1,123 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2022 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace ripple { + +class Memo_test : public beast::unit_test::suite +{ +public: + void + testMemos() + { + testcase("Test memos"); + + using namespace test::jtx; + Account alice{"alice"}; + + Env env(*this); + env.fund(XRP(10000), alice); + env.close(); + + // Lambda that returns a valid JTx with a memo that we can hack up. + // This is the basis for building tests of invalid states. + auto makeJtxWithMemo = [&env, &alice]() { + JTx example = noop(alice); + memo const exampleMemo{"tic", "tac", "toe"}; + exampleMemo(env, example); + return example; + }; + + // A valid memo. + env(makeJtxWithMemo()); + env.close(); + + { + // Make sure that too big a memo is flagged as invalid. + JTx memoSize = makeJtxWithMemo(); + memoSize.jv[sfMemos.jsonName][0u][sfMemo.jsonName] + [sfMemoData.jsonName] = std::string(2020, '0'); + env(memoSize, ter(temINVALID)); + + // This memo is just barely small enough. + memoSize.jv[sfMemos.jsonName][0u][sfMemo.jsonName] + [sfMemoData.jsonName] = std::string(2018, '1'); + env(memoSize); + } + { + // Put a non-Memo in the Memos array. + JTx memoNonMemo = noop(alice); + auto& jv = memoNonMemo.jv; + auto& ma = jv[sfMemos.jsonName]; + auto& mi = ma[ma.size()]; + auto& m = mi[sfCreatedNode.jsonName]; // CreatedNode in Memos + m[sfMemoData.jsonName] = "3030303030"; + + env(memoNonMemo, ter(temINVALID)); + } + { + // Put an invalid field in a Memo object. + JTx memoExtra = makeJtxWithMemo(); + memoExtra + .jv[sfMemos.jsonName][0u][sfMemo.jsonName][sfFlags.jsonName] = + 13; + env(memoExtra, ter(temINVALID)); + } + { + // Put a character that is not allowed in a URL in a MemoType field. + JTx memoBadChar = makeJtxWithMemo(); + memoBadChar.jv[sfMemos.jsonName][0u][sfMemo.jsonName] + [sfMemoType.jsonName] = + strHex(std::string_view("ONE Date: Thu, 5 Jan 2023 18:27:53 -0600 Subject: [PATCH 078/508] Update build instructions (#4376) Document minimum compiler version; how to choose compiler; how to fix missing `std::result_of`; how to create Conan profile. --- BUILD.md | 84 +++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 77 insertions(+), 7 deletions(-) diff --git a/BUILD.md b/BUILD.md index c686c0a51b4..76ed7f4595b 100644 --- a/BUILD.md +++ b/BUILD.md @@ -23,9 +23,21 @@ git checkout develop ## Platforms -We do not recommend Windows for rippled production use at this time. Currently, -the Ubuntu platform has received the highest level of quality assurance, -testing, and support. Additionally, 32-bit Windows development is not supported. +rippled is written in the C++20 dialect and includes the `` header. +The [minimum compiler versions][2] that can compile this dialect are given +below: + +| Compiler | Minimum Version +|---|--- +| GCC | 10 +| Clang | 13 +| Apple Clang | 13.1.6 +| MSVC | 19.23 + +We do not recommend Windows for rippled production use at this time. +As of January 2023, the Ubuntu platform has received the highest level of +quality assurance, testing, and support. +Additionally, 32-bit Windows development is not supported. Visual Studio 2022 is not yet supported. This is because rippled is not compatible with [Boost][] versions 1.78 or 1.79, @@ -50,7 +62,7 @@ there is a crash course at the end of this document. You'll need to compile in the C++20 dialect: ``` -conan profile update settings.cppstd=20 default +conan profile update settings.compiler.cppstd=20 default ``` Linux developers will commonly have a default Conan [profile][] that compiles @@ -73,6 +85,25 @@ architecture: conan profile update settings.arch=x86_64 default ``` +If you have multiple compilers installed on your platform, +then you'll need to make sure that Conan and CMake select the one you want to +use. +This setting will set the correct variables (`CMAKE__COMPILER`) in the +generated CMake toolchain file: + +``` +conan profile update 'conf.tools.build:compiler_executables={"c": "", "cpp": ""}' default +``` + +It should choose the compiler for dependencies as well, +but not all of them have a Conan recipe that respects this setting (yet). +For the rest, you can set these environment variables: + +``` +conan profile update env.CC= default +conan profile update env.CXX= default +``` + ## How to build and test @@ -190,20 +221,52 @@ with their default values in parentheses. - `reporting` (OFF): Build the reporting mode feature. - `tests` (ON): Build tests. - `unity` (ON): Configure a [unity build][5]. +- `san` (): Enable a sanitizer with Clang. Choices are `thread` and `address`. ### Troubleshooting -If you get a linker error like the one below suggesting that you recompile +#### Conan + +If you find trouble building dependencies after changing Conan settings, +then you should retry after removing the Conan cache: + +``` +rm -rf ~/.conan/data +``` + + +#### no std::result_of + +If your compiler version is recent enough to have removed `std::result_of` as +part of C++20, e.g. Apple Clang 15.0, +then you might need to add a preprocessor definition to your bulid: + +``` +conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default +conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default +conan profile update 'tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default +conan profile update 'tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default +``` + + +#### recompile with -fPIC + +``` +/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o): + requires unsupported dynamic reloc 11; recompile with -fPIC +``` + +If you get a linker error like the one above suggesting that you recompile Boost with position-independent code, the reason is most likely that Conan downloaded a bad binary distribution of the dependency. For now, this seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC for Linux. The solution is to build the dependency locally by passing `--build boost` -when calling `conan install`. +when calling `conan install`: ``` -/usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/dc8aedd23a0f0a773a5fcdcfe1ae3e89c4205978/lib/libboost_container.a(alloc_lib.o): requires unsupported dynamic reloc 11; recompile with -fPIC +conan install --build boost ... ``` @@ -336,9 +399,16 @@ For options, each package recipe defines its own defaults. You can pass every parameter to Conan on the command line, but it is more convenient to put them in a [profile][profile]. **All we must do to properly configure Conan is edit and pass the profile.** +By default, Conan will use the profile named "default". +You can let Conan create the default profile with this command: + +``` +conan profile new default --detect +``` [1]: https://github.com/conan-io/conan-center-index/issues/13168 +[2]: https://en.cppreference.com/w/cpp/compiler_support/20 [5]: https://en.wikipedia.org/wiki/Unity_build [build_type]: https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html [runtime]: https://cmake.org/cmake/help/latest/variable/CMAKE_MSVC_RUNTIME_LIBRARY.html From b0e0f319a1a125def7250149fad13fb85dc6dd20 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Thu, 5 Jan 2023 18:28:37 -0600 Subject: [PATCH 079/508] Make NodeToShardRPC a manual test (#4379) Right now it is flaky, which creates noise in our automated testing. --- src/test/rpc/NodeToShardRPC_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/test/rpc/NodeToShardRPC_test.cpp b/src/test/rpc/NodeToShardRPC_test.cpp index 867f2cfe364..7736d776995 100644 --- a/src/test/rpc/NodeToShardRPC_test.cpp +++ b/src/test/rpc/NodeToShardRPC_test.cpp @@ -409,6 +409,6 @@ class NodeToShardRPC_test : public beast::unit_test::suite } }; -BEAST_DEFINE_TESTSUITE(NodeToShardRPC, rpc, ripple); +BEAST_DEFINE_TESTSUITE_MANUAL(NodeToShardRPC, rpc, ripple); } // namespace test } // namespace ripple From 0ce15e0e358996b5371d2adc83e911cc7a3a7225 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 11 Jan 2023 23:03:38 -0600 Subject: [PATCH 080/508] Update BUILD.md (#4383) Help readers of BUILD.md by moving the crash course to the top --- BUILD.md | 259 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 136 insertions(+), 123 deletions(-) diff --git a/BUILD.md b/BUILD.md index 76ed7f4595b..7bb2d4d8ebb 100644 --- a/BUILD.md +++ b/BUILD.md @@ -1,3 +1,122 @@ +## A crash course in CMake and Conan + +To better understand how to use Conan, +we should first understand _why_ we use Conan, +and to understand that, +we need to understand how we use CMake. + + +### CMake + +Technically, you don't need CMake to build this project. +You could manually compile every translation unit into an object file, +using the right compiler options, +and then manually link all those objects together, +using the right linker options. +However, that is very tedious and error-prone, +which is why we lean on tools like CMake. + +We have written CMake configuration files +([`CMakeLists.txt`](./CMakeLists.txt) and friends) +for this project so that CMake can be used to correctly compile and link +all of the translation units in it. +Or rather, CMake will generate files for a separate build system +(e.g. Make, Ninja, Visual Studio, Xcode, etc.) +that compile and link all of the translation units. +Even then, CMake has parameters, some of which are platform-specific. +In CMake's parlance, parameters are specially-named **variables** like +[`CMAKE_BUILD_TYPE`][build_type] or +[`CMAKE_MSVC_RUNTIME_LIBRARY`][runtime]. +Parameters include: + +- what build system to generate files for +- where to find the compiler and linker +- where to find dependencies, e.g. libraries and headers +- how to link dependencies, e.g. any special compiler or linker flags that + need to be used with them, including preprocessor definitions +- how to compile translation units, e.g. with optimizations, debug symbols, + position-independent code, etc. +- on Windows, which runtime library to link with + +For some of these parameters, like the build system and compiler, +CMake goes through a complicated search process to choose default values. +For others, like the dependencies, +_we_ had written in the CMake configuration files of this project +our own complicated process to choose defaults. +For most developers, things "just worked"... until they didn't, and then +you were left trying to debug one of these complicated processes, instead of +choosing and manually passing the parameter values yourself. + +You can pass every parameter to CMake on the command line, +but writing out these parameters every time we want to configure CMake is +a pain. +Most humans prefer to put them into a configuration file, once, that +CMake can read every time it is configured. +For CMake, that file is a [toolchain file][toolchain]. + + +### Conan + +These next few paragraphs on Conan are going to read much like the ones above +for CMake. + +Technically, you don't need Conan to build this project. +You could manually download, configure, build, and install all of the +dependencies yourself, and then pass all of the parameters necessary for +CMake to link to those dependencies. +To guarantee ABI compatibility, you must be sure to use the same set of +compiler and linker options for all dependencies _and_ this project. +However, that is very tedious and error-prone, which is why we lean on tools +like Conan. + +We have written a Conan configuration file ([`conanfile.py`](./conanfile.py)) +so that Conan can be used to correctly download, configure, build, and install +all of the dependencies for this project, +using a single set of compiler and linker options for all of them. +It generates files that contain almost all of the parameters that CMake +expects. +Those files include: + +- A single toolchain file. +- For every dependency, a CMake [package configuration file][pcf], + [package version file][pvf], and for every build type, a package + targets file. + Together, these files implement version checking and define `IMPORTED` + targets for the dependencies. + +The toolchain file itself amends the search path +([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package] +will [discover][search] the generated package configuration files. + +**Nearly all we must do to properly configure CMake is pass the toolchain +file.** +What CMake parameters are left out? +You'll still need to pick a build system generator, +and if you choose a single-configuration generator, +you'll need to pass the `CMAKE_BUILD_TYPE`, +which should match the `build_type` setting you gave to Conan. + +Even then, Conan has parameters, some of which are platform-specific. +In Conan's parlance, parameters are either settings or options. +**Settings** are shared by all packages, e.g. the build type. +**Options** are specific to a given package, e.g. whether to build and link +OpenSSL as a shared library. + +For settings, Conan goes through a complicated search process to choose +defaults. +For options, each package recipe defines its own defaults. + +You can pass every parameter to Conan on the command line, +but it is more convenient to put them in a [profile][profile]. +**All we must do to properly configure Conan is edit and pass the profile.** +By default, Conan will use the profile named "default". +You can let Conan create the default profile with this command: + +``` +conan profile new default --detect +``` + + ## Branches For a stable release, choose the `master` branch or one of the [tagged @@ -52,9 +171,21 @@ Until then, we advise Windows developers to use Visual Studio 2019. ## Prerequisites To build this package, you will need Python (>= 3.7), -[Conan][] (>= 1.52), and [CMake][] (>= 3.16). -If you are unfamiliar with Conan, -there is a crash course at the end of this document. +[Conan][] (>= 1.55), and [CMake][] (>= 3.16). + +> **Warning** +> The commands in this document are not meant to be blindly copied and pasted. +> This document is written for multiple audiences, +> meaning that your particular circumstances may require some commands and not +> others. +> You should never run any commands without understanding what they do +> and why you are running them. +> +> These instructions assume a basic familiarity with Conan and CMake. +> If you are unfamiliar with Conan, +> then please read the [crash course](#a-crash-course-in-cmake-and-conan) +> at the beginning of this document, +> or the official [Getting Started][3] walkthrough. [Conan]: https://conan.io/downloads.html [CMake]: https://cmake.org/download/ @@ -137,7 +268,7 @@ cmake --build . --config Debug ./Debug/rippled --unittest ``` -Here we explain the individual steps: +Now to explain the individual steps in each example: 1. Export our [Conan recipe for RocksDB](./external/rocksdb). @@ -288,127 +419,9 @@ working: package. -## A crash course in CMake and Conan - -To better understand how to use Conan, -we should first understand _why_ we use Conan, -and to understand that, -we need to understand how we use CMake. - - -### CMake - -Technically, you don't need CMake to build this project. -You could manually compile every translation unit into an object file, -using the right compiler options, -and then manually link all those objects together, -using the right linker options. -However, that is very tedious and error-prone, -which is why we lean on tools like CMake. - -We have written CMake configuration files -([`CMakeLists.txt`](./CMakeLists.txt) and friends) -for this project so that CMake can be used to correctly compile and link -all of the translation units in it. -Or rather, CMake will generate files for a separate build system -(e.g. Make, Ninja, Visual Studio, Xcode, etc.) -that compile and link all of the translation units. -Even then, CMake has parameters, some of which are platform-specific. -In CMake's parlance, parameters are specially-named **variables** like -[`CMAKE_BUILD_TYPE`][build_type] or -[`CMAKE_MSVC_RUNTIME_LIBRARY`][runtime]. -Parameters include: - -- what build system to generate files for -- where to find the compiler and linker -- where to find dependencies, e.g. libraries and headers -- how to link dependencies, e.g. any special compiler or linker flags that - need to be used with them, including preprocessor definitions -- how to compile translation units, e.g. with optimizations, debug symbols, - position-independent code, etc. -- on Windows, which runtime library to link with - -For some of these parameters, like the build system and compiler, -CMake goes through a complicated search process to choose default values. -For others, like the dependencies, -_we_ had written in the CMake configuration files of this project -our own complicated process to choose defaults. -For most developers, things "just worked"... until they didn't, and then -you were left trying to debug one of these complicated processes, instead of -choosing and manually passing the parameter values yourself. - -You can pass every parameter to CMake on the command line, -but writing out these parameters every time we want to configure CMake is -a pain. -Most humans prefer to put them into a configuration file, once, that -CMake can read every time it is configured. -For CMake, that file is a [toolchain file][toolchain]. - - -### Conan - -These next few paragraphs on Conan are going to read much like the ones above -for CMake. - -Technically, you don't need Conan to build this project. -You could manually download, configure, build, and install all of the -dependencies yourself, and then pass all of the parameters necessary for -CMake to link to those dependencies. -To guarantee ABI compatibility, you must be sure to use the same set of -compiler and linker options for all dependencies _and_ this project. -However, that is very tedious and error-prone, which is why we lean on tools -like Conan. - -We have written a Conan configuration file ([`conanfile.py`](./conanfile.py)) -so that Conan can be used to correctly download, configure, build, and install -all of the dependencies for this project, -using a single set of compiler and linker options for all of them. -It generates files that contain almost all of the parameters that CMake -expects. -Those files include: - -- A single toolchain file. -- For every dependency, a CMake [package configuration file][pcf], - [package version file][pvf], and for every build type, a package - targets file. - Together, these files implement version checking and define `IMPORTED` - targets for the dependencies. - -The toolchain file itself amends the search path -([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package] -will [discover][search] the generated package configuration files. - -**Nearly all we must do to properly configure CMake is pass the toolchain -file.** -What CMake parameters are left out? -You'll still need to pick a build system generator, -and if you choose a single-configuration generator, -you'll need to pass the `CMAKE_BUILD_TYPE`, -which should match the `build_type` setting you gave to Conan. - -Even then, Conan has parameters, some of which are platform-specific. -In Conan's parlance, parameters are either settings or options. -**Settings** are shared by all packages, e.g. the build type. -**Options** are specific to a given package, e.g. whether to build and link -OpenSSL as a shared library. - -For settings, Conan goes through a complicated search process to choose -defaults. -For options, each package recipe defines its own defaults. - -You can pass every parameter to Conan on the command line, -but it is more convenient to put them in a [profile][profile]. -**All we must do to properly configure Conan is edit and pass the profile.** -By default, Conan will use the profile named "default". -You can let Conan create the default profile with this command: - -``` -conan profile new default --detect -``` - - [1]: https://github.com/conan-io/conan-center-index/issues/13168 [2]: https://en.cppreference.com/w/cpp/compiler_support/20 +[3]: https://docs.conan.io/en/latest/getting_started.html [5]: https://en.wikipedia.org/wiki/Unity_build [build_type]: https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html [runtime]: https://cmake.org/cmake/help/latest/variable/CMAKE_MSVC_RUNTIME_LIBRARY.html From e4b17d1cf2c43e33128dd70b6036fe10f38b0c0d Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 2 Feb 2023 16:20:35 -0800 Subject: [PATCH 081/508] `XRPFees`: Fee setting and handling improvements (#4247) * Introduces amendment `XRPFees` * Convert fee voting and protocol messages to use XRPAmounts * Includes Validations, Change transactions, the "Fees" ledger object, and subscription messages * Improve handling of 0 drop reference fee with TxQ. For use with networks that do not want to require fees * Note that fee escalation logic is still in place, which may cause the open ledger fee to rise if the network is busy. 0 drop transactions will still queue, and fee escalation can be effectively disabled by modifying the configuration on all nodes * Change default network reserves to match Mainnet * Name the new SFields *Drops (not *XRP) * Reserve SField IDs for Hooks * Clarify comments explaining the ttFEE transaction field validation --- src/ripple/app/consensus/RCLConsensus.cpp | 3 +- src/ripple/app/ledger/Ledger.cpp | 71 ++++-- src/ripple/app/misc/FeeVote.h | 8 +- src/ripple/app/misc/FeeVoteImpl.cpp | 254 ++++++++++++++------- src/ripple/app/misc/LoadFeeTrack.h | 2 +- src/ripple/app/misc/NetworkOPs.cpp | 27 ++- src/ripple/app/misc/TxQ.h | 2 +- src/ripple/app/misc/impl/LoadFeeTrack.cpp | 60 +---- src/ripple/app/misc/impl/TxQ.cpp | 64 ++++-- src/ripple/app/tx/applySteps.h | 2 +- src/ripple/app/tx/impl/ApplyContext.cpp | 2 +- src/ripple/app/tx/impl/ApplyContext.h | 4 +- src/ripple/app/tx/impl/Change.cpp | 68 +++++- src/ripple/app/tx/impl/Change.h | 4 +- src/ripple/app/tx/impl/DeleteAccount.cpp | 15 +- src/ripple/app/tx/impl/DeleteAccount.h | 2 +- src/ripple/app/tx/impl/Escrow.cpp | 7 +- src/ripple/app/tx/impl/Escrow.h | 2 +- src/ripple/app/tx/impl/SetRegularKey.cpp | 4 +- src/ripple/app/tx/impl/SetRegularKey.h | 2 +- src/ripple/app/tx/impl/Transactor.cpp | 8 +- src/ripple/app/tx/impl/Transactor.h | 6 +- src/ripple/app/tx/impl/applySteps.cpp | 8 +- src/ripple/basics/FeeUnits.h | 5 - src/ripple/core/Config.h | 5 +- src/ripple/ledger/ReadView.h | 10 - src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/SField.h | 5 + src/ripple/protocol/SystemParameters.h | 8 + src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/LedgerFormats.cpp | 13 +- src/ripple/protocol/impl/SField.cpp | 7 + src/ripple/protocol/impl/STValidation.cpp | 5 + src/ripple/protocol/impl/TxFormats.cpp | 13 +- src/ripple/protocol/jss.h | 3 + src/ripple/rpc/handlers/NoRippleCheck.cpp | 2 +- src/ripple/rpc/impl/TransactionSign.cpp | 9 +- src/test/app/AccountDelete_test.cpp | 5 +- src/test/app/FeeVote_test.cpp | 21 +- src/test/app/LoadFeeTrack_test.cpp | 25 +- src/test/app/PseudoTx_test.cpp | 34 ++- src/test/app/TxQ_test.cpp | 155 ++++++++++++- src/test/basics/FeeUnits_test.cpp | 40 ++-- src/test/ledger/Invariants_test.cpp | 2 +- src/test/rpc/AccountTx_test.cpp | 5 +- src/test/rpc/Subscribe_test.cpp | 31 ++- 46 files changed, 693 insertions(+), 339 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index aec747e094c..12a150a4a54 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -839,7 +839,8 @@ RCLConsensus::Adaptor::validate( if (ledger.ledger_->isVotingLedger()) { // Fees: - feeVote_->doValidation(ledger.ledger_->fees(), v); + feeVote_->doValidation( + ledger.ledger_->fees(), ledger.ledger_->rules(), v); // Amendments // FIXME: pass `v` and have the function insert the array diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 71311448505..7757dac53bf 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -591,29 +591,9 @@ Ledger::setup(Config const& config) { bool ret = true; - fees_.base = config.FEE_DEFAULT; - fees_.units = config.TRANSACTION_FEE_BASE; - fees_.reserve = config.FEE_ACCOUNT_RESERVE; - fees_.increment = config.FEE_OWNER_RESERVE; - try { - if (auto const sle = read(keylet::fees())) - { - // VFALCO NOTE Why getFieldIndex and not isFieldPresent? - - if (sle->getFieldIndex(sfBaseFee) != -1) - fees_.base = sle->getFieldU64(sfBaseFee); - - if (sle->getFieldIndex(sfReferenceFeeUnits) != -1) - fees_.units = sle->getFieldU32(sfReferenceFeeUnits); - - if (sle->getFieldIndex(sfReserveBase) != -1) - fees_.reserve = sle->getFieldU32(sfReserveBase); - - if (sle->getFieldIndex(sfReserveIncrement) != -1) - fees_.increment = sle->getFieldU32(sfReserveIncrement); - } + rules_ = makeRulesGivenLedger(*this, config.features); } catch (SHAMapMissingNode const&) { @@ -624,9 +604,56 @@ Ledger::setup(Config const& config) Rethrow(); } + fees_.base = config.FEE_DEFAULT; + fees_.reserve = config.FEE_ACCOUNT_RESERVE; + fees_.increment = config.FEE_OWNER_RESERVE; + try { - rules_ = makeRulesGivenLedger(*this, config.features); + if (auto const sle = read(keylet::fees())) + { + bool oldFees = false; + bool newFees = false; + { + auto const baseFee = sle->at(~sfBaseFee); + auto const reserveBase = sle->at(~sfReserveBase); + auto const reserveIncrement = sle->at(~sfReserveIncrement); + if (baseFee) + fees_.base = *baseFee; + if (reserveBase) + fees_.reserve = *reserveBase; + if (reserveIncrement) + fees_.increment = *reserveIncrement; + oldFees = baseFee || reserveBase || reserveIncrement; + } + { + auto const baseFeeXRP = sle->at(~sfBaseFeeDrops); + auto const reserveBaseXRP = sle->at(~sfReserveBaseDrops); + auto const reserveIncrementXRP = + sle->at(~sfReserveIncrementDrops); + auto assign = [&ret]( + XRPAmount& dest, + std::optional const& src) { + if (src) + { + if (src->native()) + dest = src->xrp(); + else + ret = false; + } + }; + assign(fees_.base, baseFeeXRP); + assign(fees_.reserve, reserveBaseXRP); + assign(fees_.increment, reserveIncrementXRP); + newFees = baseFeeXRP || reserveBaseXRP || reserveIncrementXRP; + } + if (oldFees && newFees) + // Should be all of one or the other, but not both + ret = false; + if (!rules_.enabled(featureXRPFees) && newFees) + // Can't populate the new fees before the amendment is enabled + ret = false; + } } catch (SHAMapMissingNode const&) { diff --git a/src/ripple/app/misc/FeeVote.h b/src/ripple/app/misc/FeeVote.h index d8948a150b3..4fff64f7de3 100644 --- a/src/ripple/app/misc/FeeVote.h +++ b/src/ripple/app/misc/FeeVote.h @@ -42,9 +42,6 @@ class FeeVote /** The cost of a reference transaction in drops. */ XRPAmount reference_fee{10}; - /** The cost of a reference transaction in fee units. */ - static constexpr FeeUnit32 reference_fee_units{10}; - /** The account reserve requirement in drops. */ XRPAmount account_reserve{10 * DROPS_PER_XRP}; @@ -60,7 +57,10 @@ class FeeVote @param baseValidation */ virtual void - doValidation(Fees const& lastFees, STValidation& val) = 0; + doValidation( + Fees const& lastFees, + Rules const& rules, + STValidation& val) = 0; /** Cast our local vote on the fee. diff --git a/src/ripple/app/misc/FeeVoteImpl.cpp b/src/ripple/app/misc/FeeVoteImpl.cpp index faa8a259543..73d98fbd58a 100644 --- a/src/ripple/app/misc/FeeVoteImpl.cpp +++ b/src/ripple/app/misc/FeeVoteImpl.cpp @@ -33,51 +33,57 @@ class VotableValue { private: using value_type = XRPAmount; - value_type const mCurrent; // The current setting - value_type const mTarget; // The setting we want - std::map mVoteMap; + value_type const current_; // The current setting + value_type const target_; // The setting we want + std::map voteMap_; public: VotableValue(value_type current, value_type target) - : mCurrent(current), mTarget(target) + : current_(current), target_(target) { // Add our vote - ++mVoteMap[mTarget]; + ++voteMap_[target_]; } void addVote(value_type vote) { - ++mVoteMap[vote]; + ++voteMap_[vote]; } void noVote() { - addVote(mCurrent); + addVote(current_); } value_type + current() const + { + return current_; + } + + std::pair getVotes() const; }; auto -VotableValue::getVotes() const -> value_type +VotableValue::getVotes() const -> std::pair { - value_type ourVote = mCurrent; + value_type ourVote = current_; int weight = 0; - for (auto const& [key, val] : mVoteMap) + for (auto const& [key, val] : voteMap_) { // Take most voted value between current and target, inclusive - if ((key <= std::max(mTarget, mCurrent)) && - (key >= std::min(mTarget, mCurrent)) && (val > weight)) + if ((key <= std::max(target_, current_)) && + (key >= std::min(target_, current_)) && (val > weight)) { ourVote = key; weight = val; } } - return ourVote; + return {ourVote, ourVote != current_}; } } // namespace detail @@ -94,7 +100,8 @@ class FeeVoteImpl : public FeeVote FeeVoteImpl(Setup const& setup, beast::Journal journal); void - doValidation(Fees const& lastFees, STValidation& val) override; + doValidation(Fees const& lastFees, Rules const& rules, STValidation& val) + override; void doVoting( @@ -111,36 +118,78 @@ FeeVoteImpl::FeeVoteImpl(Setup const& setup, beast::Journal journal) } void -FeeVoteImpl::doValidation(Fees const& lastFees, STValidation& v) +FeeVoteImpl::doValidation( + Fees const& lastFees, + Rules const& rules, + STValidation& v) { // Values should always be in a valid range (because the voting process // will ignore out-of-range values) but if we detect such a case, we do // not send a value. - if (lastFees.base != target_.reference_fee) - { - JLOG(journal_.info()) - << "Voting for base fee of " << target_.reference_fee; - - if (auto const f = target_.reference_fee.dropsAs()) - v.setFieldU64(sfBaseFee, *f); - } - - if (lastFees.accountReserve(0) != target_.account_reserve) + if (rules.enabled(featureXRPFees)) { - JLOG(journal_.info()) - << "Voting for base reserve of " << target_.account_reserve; + auto vote = [&v, this]( + auto const current, + XRPAmount target, + const char* name, + auto const& sfield) { + if (current != target) + { + JLOG(journal_.info()) + << "Voting for " << name << " of " << target; - if (auto const f = target_.account_reserve.dropsAs()) - v.setFieldU32(sfReserveBase, *f); + v[sfield] = target; + } + }; + vote(lastFees.base, target_.reference_fee, "base fee", sfBaseFeeDrops); + vote( + lastFees.accountReserve(0), + target_.account_reserve, + "base reserve", + sfReserveBaseDrops); + vote( + lastFees.increment, + target_.owner_reserve, + "reserve increment", + sfReserveIncrementDrops); } - - if (lastFees.increment != target_.owner_reserve) + else { - JLOG(journal_.info()) - << "Voting for reserve increment of " << target_.owner_reserve; + auto to32 = [](XRPAmount target) { + return target.dropsAs(); + }; + auto to64 = [](XRPAmount target) { + return target.dropsAs(); + }; + auto vote = [&v, this]( + auto const current, + XRPAmount target, + auto const& convertCallback, + const char* name, + auto const& sfield) { + if (current != target) + { + JLOG(journal_.info()) + << "Voting for " << name << " of " << target; - if (auto const f = target_.owner_reserve.dropsAs()) - v.setFieldU32(sfReserveIncrement, *f); + if (auto const f = convertCallback(target)) + v[sfield] = *f; + } + }; + + vote(lastFees.base, target_.reference_fee, to64, "base fee", sfBaseFee); + vote( + lastFees.accountReserve(0), + target_.account_reserve, + to32, + "base reserve", + sfReserveBase); + vote( + lastFees.increment, + target_.owner_reserve, + to32, + "reserve increment", + sfReserveIncrement); } } @@ -151,7 +200,7 @@ FeeVoteImpl::doVoting( std::shared_ptr const& initialPosition) { // LCL must be flag ledger - assert(isFlagLedger(lastClosedLedger->seq())); + assert(lastClosedLedger && isFlagLedger(lastClosedLedger->seq())); detail::VotableValue baseFeeVote( lastClosedLedger->fees().base, target_.reference_fee); @@ -162,80 +211,111 @@ FeeVoteImpl::doVoting( detail::VotableValue incReserveVote( lastClosedLedger->fees().increment, target_.owner_reserve); - for (auto const& val : set) + auto const& rules = lastClosedLedger->rules(); + if (rules.enabled(featureXRPFees)) { - if (val->isTrusted()) + auto doVote = [](std::shared_ptr const& val, + detail::VotableValue& value, + SF_AMOUNT const& xrpField) { + if (auto const field = ~val->at(~xrpField); + field && field->native()) + { + auto const vote = field->xrp(); + if (isLegalAmountSigned(vote)) + value.addVote(vote); + else + value.noVote(); + } + else + { + value.noVote(); + } + }; + + for (auto const& val : set) { - if (val->isFieldPresent(sfBaseFee)) + if (!val->isTrusted()) + continue; + doVote(val, baseFeeVote, sfBaseFeeDrops); + doVote(val, baseReserveVote, sfReserveBaseDrops); + doVote(val, incReserveVote, sfReserveIncrementDrops); + } + } + else + { + auto doVote = [](std::shared_ptr const& val, + detail::VotableValue& value, + auto const& valueField) { + if (auto const field = val->at(~valueField)) { using xrptype = XRPAmount::value_type; - auto const vote = val->getFieldU64(sfBaseFee); + auto const vote = *field; if (vote <= std::numeric_limits::max() && - isLegalAmount(XRPAmount{unsafe_cast(vote)})) - baseFeeVote.addVote( + isLegalAmountSigned(XRPAmount{unsafe_cast(vote)})) + value.addVote( XRPAmount{unsafe_cast(vote)}); else // Invalid amounts will be treated as if they're // not provided. Don't throw because this value is // provided by an external entity. - baseFeeVote.noVote(); - } - else - { - baseFeeVote.noVote(); - } - - if (val->isFieldPresent(sfReserveBase)) - { - baseReserveVote.addVote( - XRPAmount{val->getFieldU32(sfReserveBase)}); + value.noVote(); } else { - baseReserveVote.noVote(); + value.noVote(); } + }; - if (val->isFieldPresent(sfReserveIncrement)) - { - incReserveVote.addVote( - XRPAmount{val->getFieldU32(sfReserveIncrement)}); - } - else - { - incReserveVote.noVote(); - } + for (auto const& val : set) + { + if (!val->isTrusted()) + continue; + doVote(val, baseFeeVote, sfBaseFee); + doVote(val, baseReserveVote, sfReserveBase); + doVote(val, incReserveVote, sfReserveIncrement); } } // choose our positions - // If any of the values are invalid, send the current values. - auto const baseFee = baseFeeVote.getVotes().dropsAs( - lastClosedLedger->fees().base); - auto const baseReserve = baseReserveVote.getVotes().dropsAs( - lastClosedLedger->fees().accountReserve(0)); - auto const incReserve = incReserveVote.getVotes().dropsAs( - lastClosedLedger->fees().increment); - constexpr FeeUnit32 feeUnits = Setup::reference_fee_units; + // TODO: Use structured binding once LLVM issue + // https://github.com/llvm/llvm-project/issues/48582 + // is fixed. + auto const baseFee = baseFeeVote.getVotes(); + auto const baseReserve = baseReserveVote.getVotes(); + auto const incReserve = incReserveVote.getVotes(); + auto const seq = lastClosedLedger->info().seq + 1; // add transactions to our position - if ((baseFee != lastClosedLedger->fees().base) || - (baseReserve != lastClosedLedger->fees().accountReserve(0)) || - (incReserve != lastClosedLedger->fees().increment)) + if (baseFee.second || baseReserve.second || incReserve.second) { - JLOG(journal_.warn()) << "We are voting for a fee change: " << baseFee - << "/" << baseReserve << "/" << incReserve; - - STTx feeTx( - ttFEE, - [seq, baseFee, baseReserve, incReserve, feeUnits](auto& obj) { - obj[sfAccount] = AccountID(); - obj[sfLedgerSequence] = seq; - obj[sfBaseFee] = baseFee; - obj[sfReserveBase] = baseReserve; - obj[sfReserveIncrement] = incReserve; - obj[sfReferenceFeeUnits] = feeUnits.fee(); - }); + JLOG(journal_.warn()) + << "We are voting for a fee change: " << baseFee.first << "/" + << baseReserve.first << "/" << incReserve.first; + + STTx feeTx(ttFEE, [=, &rules](auto& obj) { + obj[sfAccount] = AccountID(); + obj[sfLedgerSequence] = seq; + if (rules.enabled(featureXRPFees)) + { + obj[sfBaseFeeDrops] = baseFee.first; + obj[sfReserveBaseDrops] = baseReserve.first; + obj[sfReserveIncrementDrops] = incReserve.first; + } + else + { + // Without the featureXRPFees amendment, these fields are + // required. + obj[sfBaseFee] = + baseFee.first.dropsAs(baseFeeVote.current()); + obj[sfReserveBase] = baseReserve.first.dropsAs( + baseReserveVote.current()); + obj[sfReserveIncrement] = + incReserve.first.dropsAs( + incReserveVote.current()); + obj[sfReferenceFeeUnits] = Config::FEE_UNITS_DEPRECATED; + } + }); uint256 txID = feeTx.getTransactionID(); diff --git a/src/ripple/app/misc/LoadFeeTrack.h b/src/ripple/app/misc/LoadFeeTrack.h index 0109468cb99..d670c0b7e11 100644 --- a/src/ripple/app/misc/LoadFeeTrack.h +++ b/src/ripple/app/misc/LoadFeeTrack.h @@ -161,7 +161,7 @@ class LoadFeeTrack final // Scale using load as well as base rate XRPAmount scaleFeeLoad( - FeeUnit64 fee, + XRPAmount fee, LoadFeeTrack const& feeTrack, Fees const& fees, bool bUnlimited); diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 8dff1af7b2b..610b8e71adc 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -2171,15 +2171,30 @@ NetworkOPsImp::pubValidation(std::shared_ptr const& val) if (auto const loadFee = (*val)[~sfLoadFee]) jvObj[jss::load_fee] = *loadFee; - if (auto const baseFee = (*val)[~sfBaseFee]) + if (auto const baseFee = val->at(~sfBaseFee)) jvObj[jss::base_fee] = static_cast(*baseFee); - if (auto const reserveBase = (*val)[~sfReserveBase]) + if (auto const reserveBase = val->at(~sfReserveBase)) jvObj[jss::reserve_base] = *reserveBase; - if (auto const reserveInc = (*val)[~sfReserveIncrement]) + if (auto const reserveInc = val->at(~sfReserveIncrement)) jvObj[jss::reserve_inc] = *reserveInc; + // (The ~ operator converts the Proxy to a std::optional, which + // simplifies later operations) + if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops); + baseFeeXRP && baseFeeXRP->native()) + jvObj[jss::base_fee_drops] = baseFeeXRP->xrp().jsonClipped(); + + if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops); + reserveBaseXRP && reserveBaseXRP->native()) + jvObj[jss::reserve_base_drops] = + reserveBaseXRP->xrp().jsonClipped(); + + if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops); + reserveIncXRP && reserveIncXRP->native()) + jvObj[jss::reserve_inc_drops] = reserveIncXRP->xrp().jsonClipped(); + for (auto i = mStreamMaps[sValidations].begin(); i != mStreamMaps[sValidations].end();) { @@ -2883,7 +2898,8 @@ NetworkOPsImp::pubLedger(std::shared_ptr const& lpAccepted) jvObj[jss::ledger_time] = Json::Value::UInt( lpAccepted->info().closeTime.time_since_epoch().count()); - jvObj[jss::fee_ref] = lpAccepted->fees().units.jsonClipped(); + if (!lpAccepted->rules().enabled(featureXRPFees)) + jvObj[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvObj[jss::fee_base] = lpAccepted->fees().base.jsonClipped(); jvObj[jss::reserve_base] = lpAccepted->fees().accountReserve(0).jsonClipped(); @@ -3889,7 +3905,8 @@ NetworkOPsImp::subLedger(InfoSub::ref isrListener, Json::Value& jvResult) jvResult[jss::ledger_hash] = to_string(lpClosed->info().hash); jvResult[jss::ledger_time] = Json::Value::UInt( lpClosed->info().closeTime.time_since_epoch().count()); - jvResult[jss::fee_ref] = lpClosed->fees().units.jsonClipped(); + if (!lpClosed->rules().enabled(featureXRPFees)) + jvResult[jss::fee_ref] = Config::FEE_UNITS_DEPRECATED; jvResult[jss::fee_base] = lpClosed->fees().base.jsonClipped(); jvResult[jss::reserve_base] = lpClosed->fees().accountReserve(0).jsonClipped(); diff --git a/src/ripple/app/misc/TxQ.h b/src/ripple/app/misc/TxQ.h index 7e004ec7267..69b6d264825 100644 --- a/src/ripple/app/misc/TxQ.h +++ b/src/ripple/app/misc/TxQ.h @@ -860,7 +860,7 @@ setup_TxQ(Config const&); template XRPAmount -toDrops(FeeLevel const& level, XRPAmount const& baseFee) +toDrops(FeeLevel const& level, XRPAmount baseFee) { if (auto const drops = mulDiv(level, baseFee, TxQ::baseLevel); drops.first) return drops.second; diff --git a/src/ripple/app/misc/impl/LoadFeeTrack.cpp b/src/ripple/app/misc/impl/LoadFeeTrack.cpp index 01445d4e580..11679c9a66e 100644 --- a/src/ripple/app/misc/impl/LoadFeeTrack.cpp +++ b/src/ripple/app/misc/impl/LoadFeeTrack.cpp @@ -87,30 +87,13 @@ LoadFeeTrack::lowerLocalFee() // Scale using load as well as base rate XRPAmount scaleFeeLoad( - FeeUnit64 fee, + XRPAmount fee, LoadFeeTrack const& feeTrack, Fees const& fees, bool bUnlimited) { if (fee == 0) - return XRPAmount{0}; - - // Normally, types with different units wouldn't be mathematically - // compatible. This function is an exception. - auto lowestTerms = [](auto& a, auto& b) { - auto value = [](auto val) { - if constexpr (std::is_arithmetic_v) - return val; - else - return val.value(); - }; - - if (auto const g = std::gcd(value(a), value(b))) - { - a = value(a) / g; - b = value(b) / g; - } - }; + return fee; // Collect the fee rates auto [feeFactor, uRemFee] = feeTrack.getScalingFactors(); @@ -120,45 +103,12 @@ scaleFeeLoad( if (bUnlimited && (feeFactor > uRemFee) && (feeFactor < (4 * uRemFee))) feeFactor = uRemFee; - XRPAmount baseFee{fees.base}; // Compute: - // fee = fee * baseFee * feeFactor / (fees.units * lftNormalFee); + // fee = fee * feeFactor / (lftNormalFee); // without overflow, and as accurately as possible - // The denominator of the fraction we're trying to compute. - // fees.units and lftNormalFee are both 32 bit, - // so the multiplication can't overflow. - auto den = FeeUnit64{fees.units} * - safe_cast(feeTrack.getLoadBase()); - // Reduce fee * baseFee * feeFactor / (fees.units * lftNormalFee) - // to lowest terms. - lowestTerms(fee, den); - lowestTerms(baseFee, den); - lowestTerms(feeFactor, den); - - // fee and baseFee are 64 bit, feeFactor is 32 bit - // Order fee and baseFee largest first - // Normally, these types wouldn't be comparable or swappable. - // This function is an exception. - if (fee.value() < baseFee.value()) - { - auto tmp = fee.value(); - fee = baseFee.value(); - baseFee = tmp; - } - // double check - assert(fee.value() >= baseFee.value()); - - // If baseFee * feeFactor overflows, the final result will overflow - XRPAmount const baseFeeOverflow{ - std::numeric_limits::max() / feeFactor}; - if (baseFee > baseFeeOverflow) - { - Throw("scaleFeeLoad"); - } - baseFee *= feeFactor; - - auto const result = mulDiv(fee, baseFee, den); + auto const result = mulDiv( + fee, feeFactor, safe_cast(feeTrack.getLoadBase())); if (!result.first) Throw("scaleFeeLoad"); return result.second; diff --git a/src/ripple/app/misc/impl/TxQ.cpp b/src/ripple/app/misc/impl/TxQ.cpp index 59559cf24c6..8424b1d29af 100644 --- a/src/ripple/app/misc/impl/TxQ.cpp +++ b/src/ripple/app/misc/impl/TxQ.cpp @@ -38,14 +38,18 @@ static FeeLevel64 getFeeLevelPaid(ReadView const& view, STTx const& tx) { auto const [baseFee, effectiveFeePaid] = [&view, &tx]() { - XRPAmount baseFee = view.fees().toDrops(calculateBaseFee(view, tx)); + XRPAmount baseFee = calculateBaseFee(view, tx); XRPAmount feePaid = tx[sfFee].xrp(); - // If baseFee is 0 then the cost of a basic transaction is free. - XRPAmount const ref = baseFee.signum() > 0 - ? XRPAmount{0} - : calculateDefaultBaseFee(view, tx); - return std::pair{baseFee + ref, feePaid + ref}; + // If baseFee is 0 then the cost of a basic transaction is free, but we + // need the effective fee level to be non-zero. + XRPAmount const mod = [&view, &tx, baseFee]() { + if (baseFee.signum() > 0) + return XRPAmount{0}; + auto def = calculateDefaultBaseFee(view, tx); + return def.signum() == 0 ? XRPAmount{1} : def; + }(); + return std::pair{baseFee + mod, feePaid + mod}; }(); assert(baseFee.signum() > 0); @@ -1072,19 +1076,27 @@ TxQ::apply( LastLedgerSeq and MaybeTx::retriesRemaining. */ auto const balance = (*sleAccount)[sfBalance].xrp(); - /* Get the minimum possible reserve. If fees exceed + /* Get the minimum possible account reserve. If it + is at least 10 * the base fee, and fees exceed this amount, the transaction can't be queued. - Considering that typical fees are several orders + + Currently typical fees are several orders of magnitude smaller than any current or expected - future reserve, this calculation is simpler than + future reserve. This calculation is simpler than trying to figure out the potential changes to the ownerCount that may occur to the account - as a result of these transactions, and removes + as a result of these transactions, and removes any need to account for other transactions that may affect the owner count while these are queued. + + However, in case the account reserve is on a + comparable scale to the base fee, ignore the + reserve. Only check the account balance. */ auto const reserve = view.fees().accountReserve(0); - if (totalFee >= balance || totalFee >= reserve) + auto const base = view.fees().base; + if (totalFee >= balance || + (reserve > 10 * base && totalFee >= reserve)) { // Drop the current transaction JLOG(j_.trace()) << "Ignoring transaction " << transactionID @@ -1104,7 +1116,10 @@ TxQ::apply( // inserted in the middle from fouling up later transactions. auto const potentialTotalSpend = totalFee + std::min(balance - std::min(balance, reserve), potentialSpend); - assert(potentialTotalSpend > XRPAmount{0}); + assert( + potentialTotalSpend > XRPAmount{0} || + (potentialTotalSpend == XRPAmount{0} && + multiTxn->applyView.fees().base == 0)); sleBump->setFieldAmount(sfBalance, balance - potentialTotalSpend); // The transaction's sequence/ticket will be valid when the other // transactions in the queue have been processed. If the tx has a @@ -1758,7 +1773,7 @@ TxQ::getTxRequiredFeeAndSeq( std::lock_guard lock(mutex_); auto const snapshot = feeMetrics_.getSnapshot(); - auto const baseFee = view.fees().toDrops(calculateBaseFee(view, *tx)); + auto const baseFee = calculateBaseFee(view, *tx); auto const fee = FeeMetrics::scaleFeeLevel(snapshot, view); auto const sle = view.read(keylet::account(account)); @@ -1834,15 +1849,26 @@ TxQ::doRPC(Application& app) const levels[jss::open_ledger_level] = to_string(metrics.openLedgerFeeLevel); auto const baseFee = view->fees().base; + // If the base fee is 0 drops, but escalation has kicked in, treat the + // base fee as if it is 1 drop, which makes the rest of the math + // work. + auto const effectiveBaseFee = [&baseFee, &metrics]() { + if (!baseFee && metrics.openLedgerFeeLevel != metrics.referenceFeeLevel) + return XRPAmount{1}; + return baseFee; + }(); auto& drops = ret[jss::drops] = Json::Value(); - drops[jss::base_fee] = - to_string(toDrops(metrics.referenceFeeLevel, baseFee)); - drops[jss::minimum_fee] = - to_string(toDrops(metrics.minProcessingFeeLevel, baseFee)); + drops[jss::base_fee] = to_string(baseFee); drops[jss::median_fee] = to_string(toDrops(metrics.medFeeLevel, baseFee)); - drops[jss::open_ledger_fee] = to_string( - toDrops(metrics.openLedgerFeeLevel - FeeLevel64{1}, baseFee) + 1); + drops[jss::minimum_fee] = to_string(toDrops( + metrics.minProcessingFeeLevel, + metrics.txCount >= metrics.txQMaxSize ? effectiveBaseFee : baseFee)); + auto openFee = toDrops(metrics.openLedgerFeeLevel, effectiveBaseFee); + if (effectiveBaseFee && + toFeeLevel(openFee, effectiveBaseFee) < metrics.openLedgerFeeLevel) + openFee += 1; + drops[jss::open_ledger_fee] = to_string(openFee); return ret; } diff --git a/src/ripple/app/tx/applySteps.h b/src/ripple/app/tx/applySteps.h index 9993ba0c525..ede7bd8cc09 100644 --- a/src/ripple/app/tx/applySteps.h +++ b/src/ripple/app/tx/applySteps.h @@ -300,7 +300,7 @@ preclaim( @return The base fee. */ -FeeUnit64 +XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); /** Return the minimum fee that an "ordinary" transaction would pay. diff --git a/src/ripple/app/tx/impl/ApplyContext.cpp b/src/ripple/app/tx/impl/ApplyContext.cpp index eb68fe5d39c..27287241637 100644 --- a/src/ripple/app/tx/impl/ApplyContext.cpp +++ b/src/ripple/app/tx/impl/ApplyContext.cpp @@ -33,7 +33,7 @@ ApplyContext::ApplyContext( OpenView& base, STTx const& tx_, TER preclaimResult_, - FeeUnit64 baseFee_, + XRPAmount baseFee_, ApplyFlags flags, beast::Journal journal_) : app(app_) diff --git a/src/ripple/app/tx/impl/ApplyContext.h b/src/ripple/app/tx/impl/ApplyContext.h index 1a47826610e..415054ef4bb 100644 --- a/src/ripple/app/tx/impl/ApplyContext.h +++ b/src/ripple/app/tx/impl/ApplyContext.h @@ -40,14 +40,14 @@ class ApplyContext OpenView& base, STTx const& tx, TER preclaimResult, - FeeUnit64 baseFee, + XRPAmount baseFee, ApplyFlags flags, beast::Journal = beast::Journal{beast::Journal::getNullSink()}); Application& app; STTx const& tx; TER const preclaimResult; - FeeUnit64 const baseFee; + XRPAmount const baseFee; beast::Journal const journal; ApplyView& diff --git a/src/ripple/app/tx/impl/Change.cpp b/src/ripple/app/tx/impl/Change.cpp index 93ed1a04f92..b36ae88a75e 100644 --- a/src/ripple/app/tx/impl/Change.cpp +++ b/src/ripple/app/tx/impl/Change.cpp @@ -90,8 +90,46 @@ Change::preclaim(PreclaimContext const& ctx) switch (ctx.tx.getTxnType()) { - case ttAMENDMENT: case ttFEE: + if (ctx.view.rules().enabled(featureXRPFees)) + { + // The ttFEE transaction format defines these fields as + // optional, but once the XRPFees feature is enabled, they are + // required. + if (!ctx.tx.isFieldPresent(sfBaseFeeDrops) || + !ctx.tx.isFieldPresent(sfReserveBaseDrops) || + !ctx.tx.isFieldPresent(sfReserveIncrementDrops)) + return temMALFORMED; + // The ttFEE transaction format defines these fields as + // optional, but once the XRPFees feature is enabled, they are + // forbidden. + if (ctx.tx.isFieldPresent(sfBaseFee) || + ctx.tx.isFieldPresent(sfReferenceFeeUnits) || + ctx.tx.isFieldPresent(sfReserveBase) || + ctx.tx.isFieldPresent(sfReserveIncrement)) + return temMALFORMED; + } + else + { + // The ttFEE transaction format formerly defined these fields + // as required. When the XRPFees feature was implemented, they + // were changed to be optional. Until the feature has been + // enabled, they are required. + if (!ctx.tx.isFieldPresent(sfBaseFee) || + !ctx.tx.isFieldPresent(sfReferenceFeeUnits) || + !ctx.tx.isFieldPresent(sfReserveBase) || + !ctx.tx.isFieldPresent(sfReserveIncrement)) + return temMALFORMED; + // The ttFEE transaction format defines these fields as + // optional, but without the XRPFees feature, they are + // forbidden. + if (ctx.tx.isFieldPresent(sfBaseFeeDrops) || + ctx.tx.isFieldPresent(sfReserveBaseDrops) || + ctx.tx.isFieldPresent(sfReserveIncrementDrops)) + return temDISABLED; + } + return tesSUCCESS; + case ttAMENDMENT: case ttUNL_MODIFY: return tesSUCCESS; default: @@ -315,13 +353,27 @@ Change::applyFee() feeObject = std::make_shared(k); view().insert(feeObject); } - - feeObject->setFieldU64(sfBaseFee, ctx_.tx.getFieldU64(sfBaseFee)); - feeObject->setFieldU32( - sfReferenceFeeUnits, ctx_.tx.getFieldU32(sfReferenceFeeUnits)); - feeObject->setFieldU32(sfReserveBase, ctx_.tx.getFieldU32(sfReserveBase)); - feeObject->setFieldU32( - sfReserveIncrement, ctx_.tx.getFieldU32(sfReserveIncrement)); + auto set = [](SLE::pointer& feeObject, STTx const& tx, auto const& field) { + feeObject->at(field) = tx[field]; + }; + if (view().rules().enabled(featureXRPFees)) + { + set(feeObject, ctx_.tx, sfBaseFeeDrops); + set(feeObject, ctx_.tx, sfReserveBaseDrops); + set(feeObject, ctx_.tx, sfReserveIncrementDrops); + // Ensure the old fields are removed + feeObject->makeFieldAbsent(sfBaseFee); + feeObject->makeFieldAbsent(sfReferenceFeeUnits); + feeObject->makeFieldAbsent(sfReserveBase); + feeObject->makeFieldAbsent(sfReserveIncrement); + } + else + { + set(feeObject, ctx_.tx, sfBaseFee); + set(feeObject, ctx_.tx, sfReferenceFeeUnits); + set(feeObject, ctx_.tx, sfReserveBase); + set(feeObject, ctx_.tx, sfReserveIncrement); + } view().update(feeObject); diff --git a/src/ripple/app/tx/impl/Change.h b/src/ripple/app/tx/impl/Change.h index 0ee7067b323..f366a5754ce 100644 --- a/src/ripple/app/tx/impl/Change.h +++ b/src/ripple/app/tx/impl/Change.h @@ -46,10 +46,10 @@ class Change : public Transactor void preCompute() override; - static FeeUnit64 + static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx) { - return FeeUnit64{0}; + return XRPAmount{0}; } static TER diff --git a/src/ripple/app/tx/impl/DeleteAccount.cpp b/src/ripple/app/tx/impl/DeleteAccount.cpp index da2244bca5e..3d9d83c0d35 100644 --- a/src/ripple/app/tx/impl/DeleteAccount.cpp +++ b/src/ripple/app/tx/impl/DeleteAccount.cpp @@ -52,20 +52,11 @@ DeleteAccount::preflight(PreflightContext const& ctx) return preflight2(ctx); } -FeeUnit64 +XRPAmount DeleteAccount::calculateBaseFee(ReadView const& view, STTx const& tx) { - // The fee required for AccountDelete is one owner reserve. But the - // owner reserve is stored in drops. We need to convert it to fee units. - Fees const& fees{view.fees()}; - std::pair const mulDivResult{ - mulDiv(fees.increment, safe_cast(fees.units), fees.base)}; - if (mulDivResult.first) - return mulDivResult.second; - - // If mulDiv returns false then overflow happened. Punt by using the - // standard calculation. - return Transactor::calculateBaseFee(view, tx); + // The fee required for AccountDelete is one owner reserve. + return view.fees().increment; } namespace { diff --git a/src/ripple/app/tx/impl/DeleteAccount.h b/src/ripple/app/tx/impl/DeleteAccount.h index b0dbaa5bc7e..0f298bb8596 100644 --- a/src/ripple/app/tx/impl/DeleteAccount.h +++ b/src/ripple/app/tx/impl/DeleteAccount.h @@ -38,7 +38,7 @@ class DeleteAccount : public Transactor static NotTEC preflight(PreflightContext const& ctx); - static FeeUnit64 + static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); static TER diff --git a/src/ripple/app/tx/impl/Escrow.cpp b/src/ripple/app/tx/impl/Escrow.cpp index 7486dfaca4b..f8860ae4b56 100644 --- a/src/ripple/app/tx/impl/Escrow.cpp +++ b/src/ripple/app/tx/impl/Escrow.cpp @@ -338,15 +338,14 @@ EscrowFinish::preflight(PreflightContext const& ctx) return tesSUCCESS; } -FeeUnit64 +XRPAmount EscrowFinish::calculateBaseFee(ReadView const& view, STTx const& tx) { - FeeUnit64 extraFee{0}; + XRPAmount extraFee{0}; if (auto const fb = tx[~sfFulfillment]) { - extraFee += - safe_cast(view.fees().units) * (32 + (fb->size() / 16)); + extraFee += view.fees().base * (32 + (fb->size() / 16)); } return Transactor::calculateBaseFee(view, tx) + extraFee; diff --git a/src/ripple/app/tx/impl/Escrow.h b/src/ripple/app/tx/impl/Escrow.h index 20e57c85f67..c0f6333604b 100644 --- a/src/ripple/app/tx/impl/Escrow.h +++ b/src/ripple/app/tx/impl/Escrow.h @@ -57,7 +57,7 @@ class EscrowFinish : public Transactor static NotTEC preflight(PreflightContext const& ctx); - static FeeUnit64 + static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); TER diff --git a/src/ripple/app/tx/impl/SetRegularKey.cpp b/src/ripple/app/tx/impl/SetRegularKey.cpp index 1b5a3eedea0..34a8b7d238c 100644 --- a/src/ripple/app/tx/impl/SetRegularKey.cpp +++ b/src/ripple/app/tx/impl/SetRegularKey.cpp @@ -24,7 +24,7 @@ namespace ripple { -FeeUnit64 +XRPAmount SetRegularKey::calculateBaseFee(ReadView const& view, STTx const& tx) { auto const id = tx.getAccountID(sfAccount); @@ -39,7 +39,7 @@ SetRegularKey::calculateBaseFee(ReadView const& view, STTx const& tx) if (sle && (!(sle->getFlags() & lsfPasswordSpent))) { // flag is armed and they signed with the right account - return FeeUnit64{0}; + return XRPAmount{0}; } } } diff --git a/src/ripple/app/tx/impl/SetRegularKey.h b/src/ripple/app/tx/impl/SetRegularKey.h index 53d832c17cc..402ee436ed5 100644 --- a/src/ripple/app/tx/impl/SetRegularKey.h +++ b/src/ripple/app/tx/impl/SetRegularKey.h @@ -39,7 +39,7 @@ class SetRegularKey : public Transactor static NotTEC preflight(PreflightContext const& ctx); - static FeeUnit64 + static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); TER diff --git a/src/ripple/app/tx/impl/Transactor.cpp b/src/ripple/app/tx/impl/Transactor.cpp index 9265d365647..4c1a7e726cd 100644 --- a/src/ripple/app/tx/impl/Transactor.cpp +++ b/src/ripple/app/tx/impl/Transactor.cpp @@ -137,7 +137,7 @@ Transactor::Transactor(ApplyContext& ctx) { } -FeeUnit64 +XRPAmount Transactor::calculateBaseFee(ReadView const& view, STTx const& tx) { // Returns the fee in fee units. @@ -145,7 +145,7 @@ Transactor::calculateBaseFee(ReadView const& view, STTx const& tx) // The computation has two parts: // * The base fee, which is the same for most transactions. // * The additional cost of each multisignature on the transaction. - FeeUnit64 const baseFee = safe_cast(view.fees().units); + XRPAmount const baseFee = view.fees().base; // Each signer adds one more baseFee to the minimum required fee // for the transaction. @@ -158,7 +158,7 @@ Transactor::calculateBaseFee(ReadView const& view, STTx const& tx) XRPAmount Transactor::minimumFee( Application& app, - FeeUnit64 baseFee, + XRPAmount baseFee, Fees const& fees, ApplyFlags flags) { @@ -166,7 +166,7 @@ Transactor::minimumFee( } TER -Transactor::checkFee(PreclaimContext const& ctx, FeeUnit64 baseFee) +Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee) { if (!ctx.tx[sfFee].native()) return temBAD_FEE; diff --git a/src/ripple/app/tx/impl/Transactor.h b/src/ripple/app/tx/impl/Transactor.h index c54f5a1a365..cc280e6141d 100644 --- a/src/ripple/app/tx/impl/Transactor.h +++ b/src/ripple/app/tx/impl/Transactor.h @@ -132,13 +132,13 @@ class Transactor checkPriorTxAndLastLedger(PreclaimContext const& ctx); static TER - checkFee(PreclaimContext const& ctx, FeeUnit64 baseFee); + checkFee(PreclaimContext const& ctx, XRPAmount baseFee); static NotTEC checkSign(PreclaimContext const& ctx); // Returns the fee in fee units, not scaled for load. - static FeeUnit64 + static XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx); static TER @@ -182,7 +182,7 @@ class Transactor static XRPAmount minimumFee( Application& app, - FeeUnit64 baseFee, + XRPAmount baseFee, Fees const& fees, ApplyFlags flags); diff --git a/src/ripple/app/tx/impl/applySteps.cpp b/src/ripple/app/tx/impl/applySteps.cpp index 581a700cf75..85959862dba 100644 --- a/src/ripple/app/tx/impl/applySteps.cpp +++ b/src/ripple/app/tx/impl/applySteps.cpp @@ -254,7 +254,7 @@ invoke_preclaim(PreclaimContext const& ctx) } } -static FeeUnit64 +static XRPAmount invoke_calculateBaseFee(ReadView const& view, STTx const& tx) { switch (tx.getTxnType()) @@ -313,7 +313,7 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx) return NFTokenAcceptOffer::calculateBaseFee(view, tx); default: assert(false); - return FeeUnit64{0}; + return XRPAmount{0}; } } @@ -535,7 +535,7 @@ preclaim( } } -FeeUnit64 +XRPAmount calculateBaseFee(ReadView const& view, STTx const& tx) { return invoke_calculateBaseFee(view, tx); @@ -544,7 +544,7 @@ calculateBaseFee(ReadView const& view, STTx const& tx) XRPAmount calculateDefaultBaseFee(ReadView const& view, STTx const& tx) { - return view.fees().toDrops(Transactor::calculateBaseFee(view, tx)); + return Transactor::calculateBaseFee(view, tx); } std::pair diff --git a/src/ripple/basics/FeeUnits.h b/src/ripple/basics/FeeUnits.h index 90116eed2a1..c74524c7c71 100644 --- a/src/ripple/basics/FeeUnits.h +++ b/src/ripple/basics/FeeUnits.h @@ -454,11 +454,6 @@ mulDivU(Source1 value, Dest mul, Source2 div) } // namespace feeunit -template -using FeeUnit = feeunit::TaggedFee; -using FeeUnit32 = FeeUnit; -using FeeUnit64 = FeeUnit; - template using FeeLevel = feeunit::TaggedFee; using FeeLevel64 = FeeLevel; diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 2d440a1afd9..1e91f49263b 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -139,8 +139,9 @@ class Config : public BasicConfig // Network parameters - // The number of fee units a reference transaction costs - static constexpr FeeUnit32 TRANSACTION_FEE_BASE{10}; + // DEPRECATED - Fee units for a reference transction. + // Only provided for backwards compatibility in a couple of places + static constexpr std::uint32_t FEE_UNITS_DEPRECATED = 10; // Note: The following parameters do not relate to the UNL or trust at all // Minimum number of nodes to consider the network present diff --git a/src/ripple/ledger/ReadView.h b/src/ripple/ledger/ReadView.h index 714f8dc945d..fb9e37c7458 100644 --- a/src/ripple/ledger/ReadView.h +++ b/src/ripple/ledger/ReadView.h @@ -49,7 +49,6 @@ namespace ripple { struct Fees { XRPAmount base{0}; // Reference tx cost (drops) - FeeUnit32 units{0}; // Reference fee units XRPAmount reserve{0}; // Reserve base (drops) XRPAmount increment{0}; // Reserve increment (drops) @@ -68,15 +67,6 @@ struct Fees { return reserve + ownerCount * increment; } - - XRPAmount - toDrops(FeeUnit64 const& fee) const - { - if (auto const resultPair = mulDiv(base, fee, units); resultPair.first) - return resultPair.second; - - return XRPAmount(STAmount::cMaxNativeN); - } }; //------------------------------------------------------------------------------ diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index d4e65a31af8..b3e1dba78bd 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 54; +static constexpr std::size_t numFeatures = 55; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -341,6 +341,7 @@ extern uint256 const fixTrustLinesToSelf; extern uint256 const fixRemoveNFTokenAutoTrustLine; extern uint256 const featureImmediateOfferKilled; extern uint256 const featureDisallowIncoming; +extern uint256 const featureXRPFees; } // namespace ripple diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index 253d956408f..694eeef5cbb 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -483,6 +483,11 @@ extern SF_AMOUNT const sfRippleEscrow; extern SF_AMOUNT const sfDeliveredAmount; extern SF_AMOUNT const sfNFTokenBrokerFee; +// currency amount (fees) +extern SF_AMOUNT const sfBaseFeeDrops; +extern SF_AMOUNT const sfReserveBaseDrops; +extern SF_AMOUNT const sfReserveIncrementDrops; + // variable length (common) extern SF_VL const sfPublicKey; extern SF_VL const sfMessageKey; diff --git a/src/ripple/protocol/SystemParameters.h b/src/ripple/protocol/SystemParameters.h index 0620f5f66ca..db0c15dcad7 100644 --- a/src/ripple/protocol/SystemParameters.h +++ b/src/ripple/protocol/SystemParameters.h @@ -49,6 +49,14 @@ isLegalAmount(XRPAmount const& amount) return amount <= INITIAL_XRP; } +/** Returns true if the absolute value of the amount does not exceed the initial + * XRP in existence. */ +inline bool +isLegalAmountSigned(XRPAmount const& amount) +{ + return amount >= -INITIAL_XRP && amount <= INITIAL_XRP; +} + /* The currency code for the native currency. */ static inline std::string const& systemCurrencyCode() diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 5903603f975..2e141c11fd1 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -451,6 +451,7 @@ REGISTER_FIX (fixTrustLinesToSelf, Supported::yes, DefaultVote::no) REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, DefaultVote::yes); REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no); REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no); +REGISTER_FEATURE(XRPFees, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/ripple/protocol/impl/LedgerFormats.cpp b/src/ripple/protocol/impl/LedgerFormats.cpp index 7d5cf9d21aa..a540a5d80c0 100644 --- a/src/ripple/protocol/impl/LedgerFormats.cpp +++ b/src/ripple/protocol/impl/LedgerFormats.cpp @@ -146,10 +146,15 @@ LedgerFormats::LedgerFormats() add(jss::FeeSettings, ltFEE_SETTINGS, { - {sfBaseFee, soeREQUIRED}, - {sfReferenceFeeUnits, soeREQUIRED}, - {sfReserveBase, soeREQUIRED}, - {sfReserveIncrement, soeREQUIRED}, + // Old version uses raw numbers + {sfBaseFee, soeOPTIONAL}, + {sfReferenceFeeUnits, soeOPTIONAL}, + {sfReserveBase, soeOPTIONAL}, + {sfReserveIncrement, soeOPTIONAL}, + // New version uses Amounts + {sfBaseFeeDrops, soeOPTIONAL}, + {sfReserveBaseDrops, soeOPTIONAL}, + {sfReserveIncrementDrops, soeOPTIONAL}, }, commonFields); diff --git a/src/ripple/protocol/impl/SField.cpp b/src/ripple/protocol/impl/SField.cpp index 73098319b28..f458c2dfe54 100644 --- a/src/ripple/protocol/impl/SField.cpp +++ b/src/ripple/protocol/impl/SField.cpp @@ -234,6 +234,13 @@ CONSTRUCT_TYPED_SFIELD(sfRippleEscrow, "RippleEscrow", AMOUNT, CONSTRUCT_TYPED_SFIELD(sfDeliveredAmount, "DeliveredAmount", AMOUNT, 18); CONSTRUCT_TYPED_SFIELD(sfNFTokenBrokerFee, "NFTokenBrokerFee", AMOUNT, 19); +// Reserve 20 & 21 for Hooks + +// currency amount (fees) +CONSTRUCT_TYPED_SFIELD(sfBaseFeeDrops, "BaseFeeDrops", AMOUNT, 22); +CONSTRUCT_TYPED_SFIELD(sfReserveBaseDrops, "ReserveBaseDrops", AMOUNT, 23); +CONSTRUCT_TYPED_SFIELD(sfReserveIncrementDrops, "ReserveIncrementDrops", AMOUNT, 24); + // variable length (common) CONSTRUCT_TYPED_SFIELD(sfPublicKey, "PublicKey", VL, 1); CONSTRUCT_TYPED_SFIELD(sfMessageKey, "MessageKey", VL, 2); diff --git a/src/ripple/protocol/impl/STValidation.cpp b/src/ripple/protocol/impl/STValidation.cpp index 8d9b3563c35..e62a81733bd 100644 --- a/src/ripple/protocol/impl/STValidation.cpp +++ b/src/ripple/protocol/impl/STValidation.cpp @@ -58,9 +58,14 @@ STValidation::validationFormat() {sfSigningPubKey, soeREQUIRED}, {sfSignature, soeREQUIRED}, {sfConsensusHash, soeOPTIONAL}, + // featureHardenedValidations {sfCookie, soeDEFAULT}, {sfValidatedHash, soeOPTIONAL}, {sfServerVersion, soeOPTIONAL}, + // featureXRPFees + {sfBaseFeeDrops, soeOPTIONAL}, + {sfReserveBaseDrops, soeOPTIONAL}, + {sfReserveIncrementDrops, soeOPTIONAL}, }; // clang-format on diff --git a/src/ripple/protocol/impl/TxFormats.cpp b/src/ripple/protocol/impl/TxFormats.cpp index ce0d5db921f..fe42fd53e3c 100644 --- a/src/ripple/protocol/impl/TxFormats.cpp +++ b/src/ripple/protocol/impl/TxFormats.cpp @@ -155,10 +155,15 @@ TxFormats::TxFormats() ttFEE, { {sfLedgerSequence, soeOPTIONAL}, - {sfBaseFee, soeREQUIRED}, - {sfReferenceFeeUnits, soeREQUIRED}, - {sfReserveBase, soeREQUIRED}, - {sfReserveIncrement, soeREQUIRED}, + // Old version uses raw numbers + {sfBaseFee, soeOPTIONAL}, + {sfReferenceFeeUnits, soeOPTIONAL}, + {sfReserveBase, soeOPTIONAL}, + {sfReserveIncrement, soeOPTIONAL}, + // New version uses Amounts + {sfBaseFeeDrops, soeOPTIONAL}, + {sfReserveBaseDrops, soeOPTIONAL}, + {sfReserveIncrementDrops, soeOPTIONAL}, }, commonFields); diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 1c5bf8463b0..01e30dd9327 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -151,6 +151,7 @@ JSS(balance); // out: AccountLines JSS(balances); // out: GatewayBalances JSS(base); // out: LogLevel JSS(base_fee); // out: NetworkOPs +JSS(base_fee_drops); // out: NetworkOPs JSS(base_fee_xrp); // out: NetworkOPs JSS(bids); // out: Subscribe JSS(binary); // in: AccountTX, LedgerEntry, @@ -495,8 +496,10 @@ JSS(request); // RPC JSS(requested); // out: Manifest JSS(reservations); // out: Reservations JSS(reserve_base); // out: NetworkOPs +JSS(reserve_base_drops); // out: NetworkOPs JSS(reserve_base_xrp); // out: NetworkOPs JSS(reserve_inc); // out: NetworkOPs +JSS(reserve_inc_drops); // out: NetworkOPs JSS(reserve_inc_xrp); // out: NetworkOPs JSS(response); // websocket JSS(result); // RPC diff --git a/src/ripple/rpc/handlers/NoRippleCheck.cpp b/src/ripple/rpc/handlers/NoRippleCheck.cpp index a2af9845fd7..18156ea4247 100644 --- a/src/ripple/rpc/handlers/NoRippleCheck.cpp +++ b/src/ripple/rpc/handlers/NoRippleCheck.cpp @@ -45,7 +45,7 @@ fillTransaction( // Convert the reference transaction cost in fee units to drops // scaled to represent the current fee load. txArray["Fee"] = - scaleFeeLoad(fees.units, context.app.getFeeTrack(), fees, false) + scaleFeeLoad(fees.base, context.app.getFeeTrack(), fees, false) .jsonClipped(); } diff --git a/src/ripple/rpc/impl/TransactionSign.cpp b/src/ripple/rpc/impl/TransactionSign.cpp index ca24b68740e..4cf372e6b63 100644 --- a/src/ripple/rpc/impl/TransactionSign.cpp +++ b/src/ripple/rpc/impl/TransactionSign.cpp @@ -720,8 +720,7 @@ checkFee( } } - // Default fee in fee units. - FeeUnit32 const feeDefault = config.TRANSACTION_FEE_BASE; + XRPAmount const feeDefault = config.FEE_DEFAULT; auto ledger = app.openLedger().current(); // Administrative and identified endpoints are exempt from local fees. @@ -738,11 +737,7 @@ checkFee( auto const limit = [&]() { // Scale fee units to drops: - auto const drops = - mulDiv(feeDefault, ledger->fees().base, ledger->fees().units); - if (!drops.first) - Throw("mulDiv"); - auto const result = mulDiv(drops.second, mult, div); + auto const result = mulDiv(feeDefault, mult, div); if (!result.first) Throw("mulDiv"); return result.second; diff --git a/src/test/app/AccountDelete_test.cpp b/src/test/app/AccountDelete_test.cpp index 73a0ccbf9e0..2ec0b876a64 100644 --- a/src/test/app/AccountDelete_test.cpp +++ b/src/test/app/AccountDelete_test.cpp @@ -515,7 +515,10 @@ class AccountDelete_test : public beast::unit_test::suite // All it takes is a large enough XRP payment to resurrect // becky's account. Try too small a payment. - env(pay(alice, becky, XRP(9)), ter(tecNO_DST_INSUF_XRP)); + env(pay(alice, + becky, + drops(env.current()->fees().accountReserve(0)) - XRP(1)), + ter(tecNO_DST_INSUF_XRP)); env.close(); // Actually resurrect becky's account. diff --git a/src/test/app/FeeVote_test.cpp b/src/test/app/FeeVote_test.cpp index 4c2acf6297d..90dd8fa3dfc 100644 --- a/src/test/app/FeeVote_test.cpp +++ b/src/test/app/FeeVote_test.cpp @@ -29,13 +29,14 @@ class FeeVote_test : public beast::unit_test::suite void testSetup() { + FeeVote::Setup const defaultSetup; { // defaults Section config; auto setup = setup_FeeVote(config); - BEAST_EXPECT(setup.reference_fee == 10); - BEAST_EXPECT(setup.account_reserve == 10 * DROPS_PER_XRP); - BEAST_EXPECT(setup.owner_reserve == 2 * DROPS_PER_XRP); + BEAST_EXPECT(setup.reference_fee == defaultSetup.reference_fee); + BEAST_EXPECT(setup.account_reserve == defaultSetup.account_reserve); + BEAST_EXPECT(setup.owner_reserve == defaultSetup.owner_reserve); } { Section config; @@ -56,9 +57,9 @@ class FeeVote_test : public beast::unit_test::suite "owner_reserve = foo"}); // Illegal values are ignored, and the defaults left unchanged auto setup = setup_FeeVote(config); - BEAST_EXPECT(setup.reference_fee == 10); - BEAST_EXPECT(setup.account_reserve == 10 * DROPS_PER_XRP); - BEAST_EXPECT(setup.owner_reserve == 2 * DROPS_PER_XRP); + BEAST_EXPECT(setup.reference_fee == defaultSetup.reference_fee); + BEAST_EXPECT(setup.account_reserve == defaultSetup.account_reserve); + BEAST_EXPECT(setup.owner_reserve == defaultSetup.owner_reserve); } { Section config; @@ -68,7 +69,7 @@ class FeeVote_test : public beast::unit_test::suite "owner_reserve = -1234"}); // Illegal values are ignored, and the defaults left unchanged auto setup = setup_FeeVote(config); - BEAST_EXPECT(setup.reference_fee == 10); + BEAST_EXPECT(setup.reference_fee == defaultSetup.reference_fee); BEAST_EXPECT( setup.account_reserve == static_cast(-1234567)); BEAST_EXPECT( @@ -86,9 +87,9 @@ class FeeVote_test : public beast::unit_test::suite "owner_reserve = " + big64}); // Illegal values are ignored, and the defaults left unchanged auto setup = setup_FeeVote(config); - BEAST_EXPECT(setup.reference_fee == 10); - BEAST_EXPECT(setup.account_reserve == 10 * DROPS_PER_XRP); - BEAST_EXPECT(setup.owner_reserve == 2 * DROPS_PER_XRP); + BEAST_EXPECT(setup.reference_fee == defaultSetup.reference_fee); + BEAST_EXPECT(setup.account_reserve == defaultSetup.account_reserve); + BEAST_EXPECT(setup.owner_reserve == defaultSetup.owner_reserve); } } diff --git a/src/test/app/LoadFeeTrack_test.cpp b/src/test/app/LoadFeeTrack_test.cpp index d34531bd7bf..cc0b1c19529 100644 --- a/src/test/app/LoadFeeTrack_test.cpp +++ b/src/test/app/LoadFeeTrack_test.cpp @@ -36,55 +36,52 @@ class LoadFeeTrack_test : public beast::unit_test::suite Fees const fees = [&]() { Fees f; f.base = d.FEE_DEFAULT; - f.units = d.TRANSACTION_FEE_BASE; f.reserve = 200 * DROPS_PER_XRP; f.increment = 50 * DROPS_PER_XRP; return f; }(); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{0}, l, fees, false) == XRPAmount{0}); + scaleFeeLoad(XRPAmount{0}, l, fees, false) == XRPAmount{0}); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{10000}, l, fees, false) == + scaleFeeLoad(XRPAmount{10000}, l, fees, false) == XRPAmount{10000}); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{1}, l, fees, false) == XRPAmount{1}); + scaleFeeLoad(XRPAmount{1}, l, fees, false) == XRPAmount{1}); } { Fees const fees = [&]() { Fees f; f.base = d.FEE_DEFAULT * 10; - f.units = d.TRANSACTION_FEE_BASE; f.reserve = 200 * DROPS_PER_XRP; f.increment = 50 * DROPS_PER_XRP; return f; }(); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{0}, l, fees, false) == XRPAmount{0}); + scaleFeeLoad(XRPAmount{0}, l, fees, false) == XRPAmount{0}); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{10000}, l, fees, false) == - XRPAmount{100000}); + scaleFeeLoad(XRPAmount{10000}, l, fees, false) == + XRPAmount{10000}); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{1}, l, fees, false) == XRPAmount{10}); + scaleFeeLoad(XRPAmount{1}, l, fees, false) == XRPAmount{1}); } { Fees const fees = [&]() { Fees f; f.base = d.FEE_DEFAULT; - f.units = d.TRANSACTION_FEE_BASE * 10; f.reserve = 200 * DROPS_PER_XRP; f.increment = 50 * DROPS_PER_XRP; return f; }(); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{0}, l, fees, false) == XRPAmount{0}); + scaleFeeLoad(XRPAmount{0}, l, fees, false) == XRPAmount{0}); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{10000}, l, fees, false) == - XRPAmount{1000}); + scaleFeeLoad(XRPAmount{10000}, l, fees, false) == + XRPAmount{10000}); BEAST_EXPECT( - scaleFeeLoad(FeeUnit64{1}, l, fees, false) == XRPAmount{0}); + scaleFeeLoad(XRPAmount{1}, l, fees, false) == XRPAmount{1}); } } }; diff --git a/src/test/app/PseudoTx_test.cpp b/src/test/app/PseudoTx_test.cpp index d76b66f0a99..78ca7cc05b1 100644 --- a/src/test/app/PseudoTx_test.cpp +++ b/src/test/app/PseudoTx_test.cpp @@ -16,6 +16,7 @@ //============================================================================== #include +#include #include #include #include @@ -27,17 +28,26 @@ namespace test { struct PseudoTx_test : public beast::unit_test::suite { std::vector - getPseudoTxs(std::uint32_t seq) + getPseudoTxs(Rules const& rules, std::uint32_t seq) { std::vector res; res.emplace_back(STTx(ttFEE, [&](auto& obj) { obj[sfAccount] = AccountID(); obj[sfLedgerSequence] = seq; - obj[sfBaseFee] = 0; - obj[sfReserveBase] = 0; - obj[sfReserveIncrement] = 0; - obj[sfReferenceFeeUnits] = 0; + if (rules.enabled(featureXRPFees)) + { + obj[sfBaseFeeDrops] = XRPAmount{0}; + obj[sfReserveBaseDrops] = XRPAmount{0}; + obj[sfReserveIncrementDrops] = XRPAmount{0}; + } + else + { + obj[sfBaseFee] = 0; + obj[sfReserveBase] = 0; + obj[sfReserveIncrement] = 0; + obj[sfReferenceFeeUnits] = 0; + } })); res.emplace_back(STTx(ttAMENDMENT, [&](auto& obj) { @@ -66,12 +76,13 @@ struct PseudoTx_test : public beast::unit_test::suite } void - testPrevented() + testPrevented(FeatureBitset features) { using namespace jtx; - Env env(*this); + Env env(*this, features); - for (auto const& stx : getPseudoTxs(env.closed()->seq() + 1)) + for (auto const& stx : + getPseudoTxs(env.closed()->rules(), env.closed()->seq() + 1)) { std::string reason; BEAST_EXPECT(isPseudoTx(stx)); @@ -101,7 +112,12 @@ struct PseudoTx_test : public beast::unit_test::suite void run() override { - testPrevented(); + using namespace test::jtx; + FeatureBitset const all{supported_amendments()}; + FeatureBitset const xrpFees{featureXRPFees}; + + testPrevented(all - featureXRPFees); + testPrevented(all); testAllowed(); } }; diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index f3170c9a27b..e25c9f60de1 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -144,9 +144,15 @@ class TxQ1_test : public beast::unit_test::suite auto const& view = *env.current(); auto metrics = env.app().getTxQ().getMetrics(view); + auto const base = [&view]() { + auto base = view.fees().base; + if (!base) + base += 1; + return base; + }(); // Don't care about the overflow flag - return fee(toDrops(metrics.openLedgerFeeLevel, view.fees().base) + 1); + return fee(toDrops(metrics.openLedgerFeeLevel, base) + 1); } static std::unique_ptr @@ -189,7 +195,6 @@ class TxQ1_test : public beast::unit_test::suite std::size_t expectedPerLedger, std::size_t ledgersInQueue, std::uint32_t base, - std::uint32_t units, std::uint32_t reserve, std::uint32_t increment) { @@ -219,7 +224,6 @@ class TxQ1_test : public beast::unit_test::suite checkMetrics(__LINE__, env, 0, flagMaxQueue, 0, expectedPerLedger, 256); auto const fees = env.current()->fees(); BEAST_EXPECT(fees.base == XRPAmount{base}); - BEAST_EXPECT(fees.units == FeeUnit64{units}); BEAST_EXPECT(fees.reserve == XRPAmount{reserve}); BEAST_EXPECT(fees.increment == XRPAmount{increment}); @@ -1095,7 +1099,7 @@ class TxQ1_test : public beast::unit_test::suite checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); // ledgers in queue is 2 because of makeConfig - auto const initQueueMax = initFee(env, 3, 2, 10, 10, 200, 50); + auto const initQueueMax = initFee(env, 3, 2, 10, 200, 50); // Create several accounts while the fee is cheap so they all apply. env.fund(drops(2000), noripple(alice)); @@ -1742,7 +1746,7 @@ class TxQ1_test : public beast::unit_test::suite auto queued = ter(terQUEUED); // ledgers in queue is 2 because of makeConfig - auto const initQueueMax = initFee(env, 3, 2, 10, 10, 200, 50); + auto const initQueueMax = initFee(env, 3, 2, 10, 200, 50); BEAST_EXPECT(env.current()->fees().base == 10); @@ -2137,7 +2141,7 @@ class TxQ1_test : public beast::unit_test::suite // queued before the open ledger fee approached the reserve, // which would unnecessarily slow down this test. // ledgers in queue is 2 because of makeConfig - auto const initQueueMax = initFee(env, 3, 2, 10, 10, 200, 50); + auto const initQueueMax = initFee(env, 3, 2, 10, 200, 50); auto limit = 3; @@ -4785,6 +4789,144 @@ class TxQ1_test : public beast::unit_test::suite } } + void + testZeroReferenceFee() + { + testcase("Zero reference fee"); + using namespace jtx; + + Account const alice("alice"); + auto const queued = ter(terQUEUED); + + Env env( + *this, + makeConfig( + {{"minimum_txn_in_ledger_standalone", "3"}}, + {{"reference_fee", "0"}, + {"account_reserve", "0"}, + {"owner_reserve", "0"}})); + + BEAST_EXPECT(env.current()->fees().base == 10); + + checkMetrics(__LINE__, env, 0, std::nullopt, 0, 3, 256); + + // ledgers in queue is 2 because of makeConfig + auto const initQueueMax = initFee(env, 3, 2, 0, 0, 0); + + BEAST_EXPECT(env.current()->fees().base == 0); + + { + auto const fee = env.rpc("fee"); + + if (BEAST_EXPECT(fee.isMember(jss::result)) && + BEAST_EXPECT(!RPC::contains_error(fee[jss::result]))) + { + auto const& result = fee[jss::result]; + + BEAST_EXPECT(result.isMember(jss::levels)); + auto const& levels = result[jss::levels]; + BEAST_EXPECT( + levels.isMember(jss::median_level) && + levels[jss::median_level] == "128000"); + BEAST_EXPECT( + levels.isMember(jss::minimum_level) && + levels[jss::minimum_level] == "256"); + BEAST_EXPECT( + levels.isMember(jss::open_ledger_level) && + levels[jss::open_ledger_level] == "256"); + BEAST_EXPECT( + levels.isMember(jss::reference_level) && + levels[jss::reference_level] == "256"); + + auto const& drops = result[jss::drops]; + BEAST_EXPECT( + drops.isMember(jss::base_fee) && + drops[jss::base_fee] == "0"); + BEAST_EXPECT( + drops.isMember(jss::median_fee) && + drops[jss::base_fee] == "0"); + BEAST_EXPECT( + drops.isMember(jss::minimum_fee) && + drops[jss::base_fee] == "0"); + BEAST_EXPECT( + drops.isMember(jss::open_ledger_fee) && + drops[jss::base_fee] == "0"); + } + } + + checkMetrics(__LINE__, env, 0, initQueueMax, 0, 3, 256); + + // The noripple is to reduce the number of transactions required to + // fund the accounts. There is no rippling in this test. + env.fund(XRP(100000), noripple(alice)); + + checkMetrics(__LINE__, env, 0, initQueueMax, 1, 3, 256); + + env.close(); + + checkMetrics(__LINE__, env, 0, 6, 0, 3, 256); + + fillQueue(env, alice); + + checkMetrics(__LINE__, env, 0, 6, 4, 3, 256); + + env(noop(alice), openLedgerFee(env)); + + checkMetrics(__LINE__, env, 0, 6, 5, 3, 256); + + auto aliceSeq = env.seq(alice); + env(noop(alice), queued); + + checkMetrics(__LINE__, env, 1, 6, 5, 3, 256); + + env(noop(alice), seq(aliceSeq + 1), fee(10), queued); + + checkMetrics(__LINE__, env, 2, 6, 5, 3, 256); + + { + auto const fee = env.rpc("fee"); + + if (BEAST_EXPECT(fee.isMember(jss::result)) && + BEAST_EXPECT(!RPC::contains_error(fee[jss::result]))) + { + auto const& result = fee[jss::result]; + + BEAST_EXPECT(result.isMember(jss::levels)); + auto const& levels = result[jss::levels]; + BEAST_EXPECT( + levels.isMember(jss::median_level) && + levels[jss::median_level] == "128000"); + BEAST_EXPECT( + levels.isMember(jss::minimum_level) && + levels[jss::minimum_level] == "256"); + BEAST_EXPECT( + levels.isMember(jss::open_ledger_level) && + levels[jss::open_ledger_level] == "355555"); + BEAST_EXPECT( + levels.isMember(jss::reference_level) && + levels[jss::reference_level] == "256"); + + auto const& drops = result[jss::drops]; + BEAST_EXPECT( + drops.isMember(jss::base_fee) && + drops[jss::base_fee] == "0"); + BEAST_EXPECT( + drops.isMember(jss::median_fee) && + drops[jss::median_fee] == "0"); + BEAST_EXPECT( + drops.isMember(jss::minimum_fee) && + drops[jss::minimum_fee] == "0"); + BEAST_EXPECT( + drops.isMember(jss::open_ledger_fee) && + drops[jss::open_ledger_fee] == "1389"); + } + } + + env.close(); + + checkMetrics(__LINE__, env, 0, 10, 2, 5, 256); + } + void run() override { @@ -4825,6 +4967,7 @@ class TxQ1_test : public beast::unit_test::suite testReexecutePreflight(); testQueueFullDropPenalty(); testCancelQueuedOffers(); + testZeroReferenceFee(); } }; diff --git a/src/test/basics/FeeUnits_test.cpp b/src/test/basics/FeeUnits_test.cpp index bcb265c36b3..85527423c58 100644 --- a/src/test/basics/FeeUnits_test.cpp +++ b/src/test/basics/FeeUnits_test.cpp @@ -30,6 +30,8 @@ class feeunits_test : public beast::unit_test::suite void testTypes() { + using FeeLevel32 = FeeLevel; + { XRPAmount x{100}; BEAST_EXPECT(x.drops() == 100); @@ -45,8 +47,8 @@ class feeunits_test : public beast::unit_test::suite BEAST_EXPECT( (std::is_same_v)); - FeeUnit32 f{10}; - FeeUnit32 baseFee{100}; + FeeLevel32 f{10}; + FeeLevel32 baseFee{100}; auto drops = mulDiv(baseFee, x, f).second; @@ -65,8 +67,8 @@ class feeunits_test : public beast::unit_test::suite BEAST_EXPECT( (std::is_same_v)); - FeeUnit64 f{10}; - FeeUnit64 baseFee{100}; + FeeLevel64 f{10}; + FeeLevel64 baseFee{100}; auto drops = mulDiv(baseFee, x, f).second; @@ -102,22 +104,24 @@ class feeunits_test : public beast::unit_test::suite testJson() { // Json value functionality + using FeeLevel32 = FeeLevel; + { - FeeUnit32 x{std::numeric_limits::max()}; + FeeLevel32 x{std::numeric_limits::max()}; auto y = x.jsonClipped(); BEAST_EXPECT(y.type() == Json::uintValue); BEAST_EXPECT(y == Json::Value{x.fee()}); } { - FeeUnit32 x{std::numeric_limits::min()}; + FeeLevel32 x{std::numeric_limits::min()}; auto y = x.jsonClipped(); BEAST_EXPECT(y.type() == Json::uintValue); BEAST_EXPECT(y == Json::Value{x.fee()}); } { - FeeUnit64 x{std::numeric_limits::max()}; + FeeLevel64 x{std::numeric_limits::max()}; auto y = x.jsonClipped(); BEAST_EXPECT(y.type() == Json::uintValue); BEAST_EXPECT( @@ -125,7 +129,7 @@ class feeunits_test : public beast::unit_test::suite } { - FeeUnit64 x{std::numeric_limits::min()}; + FeeLevel64 x{std::numeric_limits::min()}; auto y = x.jsonClipped(); BEAST_EXPECT(y.type() == Json::uintValue); BEAST_EXPECT(y == Json::Value{0}); @@ -167,15 +171,17 @@ class feeunits_test : public beast::unit_test::suite { // Explicitly test every defined function for the TaggedFee class // since some of them are templated, but not used anywhere else. + using FeeLevel32 = FeeLevel; + { - auto make = [&](auto x) -> FeeUnit64 { return x; }; - auto explicitmake = [&](auto x) -> FeeUnit64 { - return FeeUnit64{x}; + auto make = [&](auto x) -> FeeLevel64 { return x; }; + auto explicitmake = [&](auto x) -> FeeLevel64 { + return FeeLevel64{x}; }; - FeeUnit64 defaulted; + FeeLevel64 defaulted; (void)defaulted; - FeeUnit64 test{0}; + FeeLevel64 test{0}; BEAST_EXPECT(test.fee() == 0); test = explicitmake(beast::zero); @@ -187,13 +193,13 @@ class feeunits_test : public beast::unit_test::suite test = explicitmake(100u); BEAST_EXPECT(test.fee() == 100); - FeeUnit64 const targetSame{200u}; - FeeUnit32 const targetOther{300u}; + FeeLevel64 const targetSame{200u}; + FeeLevel32 const targetOther{300u}; test = make(targetSame); BEAST_EXPECT(test.fee() == 200); BEAST_EXPECT(test == targetSame); - BEAST_EXPECT(test < FeeUnit64{1000}); - BEAST_EXPECT(test > FeeUnit64{100}); + BEAST_EXPECT(test < FeeLevel64{1000}); + BEAST_EXPECT(test > FeeLevel64{100}); test = make(targetOther); BEAST_EXPECT(test.fee() == 300); BEAST_EXPECT(test == targetOther); diff --git a/src/test/ledger/Invariants_test.cpp b/src/test/ledger/Invariants_test.cpp index f07f3392245..898376bdaf7 100644 --- a/src/test/ledger/Invariants_test.cpp +++ b/src/test/ledger/Invariants_test.cpp @@ -64,7 +64,7 @@ class Invariants_test : public beast::unit_test::suite ov, tx, tesSUCCESS, - safe_cast(env.current()->fees().units), + env.current()->fees().base, tapNONE, jlog}; diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 1d537d47791..75147875d1a 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -547,7 +547,10 @@ class AccountTx_test : public beast::unit_test::suite // All it takes is a large enough XRP payment to resurrect // becky's account. Try too small a payment. - env(pay(alice, becky, XRP(9)), ter(tecNO_DST_INSUF_XRP)); + env(pay(alice, + becky, + drops(env.current()->fees().accountReserve(0)) - XRP(1)), + ter(tecNO_DST_INSUF_XRP)); env.close(); // Actually resurrect becky's account. diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 1a0773f26ec..783f5eb7e38 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -326,11 +327,11 @@ class Subscribe_test : public beast::unit_test::suite } void - testValidations() + testValidations(FeatureBitset features) { using namespace jtx; - Env env{*this, envconfig(validator, "")}; + Env env{*this, envconfig(validator, ""), features}; auto& cfg = env.app().config(); if (!BEAST_EXPECT(cfg.section(SECTION_VALIDATION_SEED).empty())) return; @@ -410,10 +411,25 @@ class Subscribe_test : public beast::unit_test::suite if (jv.isMember(jss::server_version) != isFlagLedger) return false; - if (jv.isMember(jss::reserve_base) != isFlagLedger) + bool xrpFees = env.closed()->rules().enabled(featureXRPFees); + if ((!xrpFees && + jv.isMember(jss::reserve_base) != isFlagLedger) || + (xrpFees && jv.isMember(jss::reserve_base))) return false; - if (jv.isMember(jss::reserve_inc) != isFlagLedger) + if ((!xrpFees && + jv.isMember(jss::reserve_inc) != isFlagLedger) || + (xrpFees && jv.isMember(jss::reserve_inc))) + return false; + + if ((xrpFees && + jv.isMember(jss::reserve_base_drops) != isFlagLedger) || + (!xrpFees && jv.isMember(jss::reserve_base_drops))) + return false; + + if ((xrpFees && + jv.isMember(jss::reserve_inc_drops) != isFlagLedger) || + (!xrpFees && jv.isMember(jss::reserve_inc_drops))) return false; return true; @@ -1140,11 +1156,16 @@ class Subscribe_test : public beast::unit_test::suite void run() override { + using namespace test::jtx; + FeatureBitset const all{supported_amendments()}; + FeatureBitset const xrpFees{featureXRPFees}; + testServer(); testLedger(); testTransactions(); testManifests(); - testValidations(); + testValidations(all - xrpFees); + testValidations(all); testSubErrors(true); testSubErrors(false); testSubByUrl(); From 31e7e5a56e36ffeefaec39325dfcd04a94d80086 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Sun, 5 Feb 2023 18:08:26 -0600 Subject: [PATCH 082/508] Revise CONTRIBUTING (#4382) - Wrap text at 80 columns. - Match capitalization of GitHub usernames. - Prescribe more rules for pull requests. - Link more reference documentation. --- CONTRIBUTING.md | 185 +++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 151 insertions(+), 34 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 06b9d622cbb..4a09ad4b991 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,31 +1,129 @@ +The XRP Ledger has many and diverse stakeholders, and everyone deserves +a chance to contribute meaningful changes to the code that runs the XRPL. + # Contributing -The XRP Ledger has many and diverse stakeholders, and everyone deserves a chance to contribute meaningful changes to the code that runs the XRPL. -To contribute, please: -1. Fork the repository under your own user. -2. Create a new branch on which to write your changes. Please note that changes which alter transaction processing must be composed via and guarded using [Amendments](https://xrpl.org/amendments.html). Changes which are _read only_ i.e. RPC, or changes which are only refactors and maintain the existing behaviour do not need to be made through an Amendment. -3. Write and test your code. -4. Ensure that your code compiles with the provided build engine and update the provided build engine as part of your PR where needed and where appropriate. -5. Write test cases for your code and include those in `src/test` such that they are runnable from the command line using `./rippled -u`. (Some changes will not be able to be tested this way.) -6. Ensure your code passes automated checks (e.g. clang-format and levelization.) -7. Squash your commits (i.e. rebase) into as few commits as is reasonable to describe your changes at a high level (typically a single commit for a small change.) -8. Open a PR to the main repository onto the _develop_ branch, and follow the provided template. - -# Major Changes -If your code change is a major feature, a breaking change or in some other way makes a significant alteration to the way the XRPL will operate, then you must first write an XLS document (XRP Ledger Standard) describing your change. -To do this: -1. Go to [XLS Standards](https://github.com/XRPLF/XRPL-Standards/discussions). -2. Choose the next available standard number. -3. Open a discussion with the appropriate title to propose your draft standard. -4. Link your XLS in your PR. + +We assume you are familiar with the general practice of [making contributions +on GitHub][1]. +This file includes only special instructions specific to this project. + + +## Before you start + +All of your contributions must be developed in your personal +[fork](https://github.com/XRPLF/rippled/fork). +No personal branches may ever be pushed to the [main project][rippled]. +These are the only branches that may ever exist in the main project: + +- `develop`: The latest set of unreleased features, and the most common + starting point for contributions. +- `release`: The latest release candidate. +- `master`: The latest stable release. +- `gh-pages`: The documentation for this project, built by Doxygen. + +The tip of each branch must be signed. +In order for GitHub to sign a squashed commit that it builds from your pull +request, +all of your commits must be signed, +and GitHub must know your verifying key. +Please walk through the excellent documentation from GitHub to set +up [signature verification][signing]. + +[rippled]: https://github.com/XRPLF/rippled +[signing]: https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification + + +## Major contributions + +If your contribution is a major feature or breaking change, +then you must first write an XRP Ledger Standard (XLS) describing it. +Go to [XRPL-Standards](https://github.com/XRPLF/XRPL-Standards/discussions), +choose the next available standard number, and +open a discussion with an appropriate title to propose your draft standard. + +When you submit a pull request, please link the corresponding XLS in the +description. +An XLS still in draft status is considered a work-in-progress and open for +discussion. +Please do not submit a pull request before allowing due time for questions, +suggestions, and changes to the XLS draft. +It is the responsibility of the XLS author to update the draft to match the +final implementation when its corresponding pull request is merged. + + +## Before making a pull request + +Changes that alter transaction processing must be guarded by an +[Amendment](https://xrpl.org/amendments.html). +All other changes that maintain the existing behavior do not need an +Amendment. + +Ensure that your code compiles according to the build instructions in +[`BUILD.md`](./BUILD.md). +If you create new source files, they must go under `src/ripple`. +You will need to add them to one of the +[source lists](./Builds/CMake/RippledCore.cmake) in CMake. + +Please write tests for your code. +If you create new test source files, they must go under `src/test`. +You will need to add them to one of the +[source lists](./Builds/CMake/RippledCore.cmake) in CMake. +If your test can be run offline, in under 60 seconds, then it can be an +automatic test run by `rippled --unittest`. +Otherwise, it must be a manual test. + +The source must be formatted according to the style guide below. + +Header includes must be [levelized](./Builds/levelization). + + +## Pull requests + +Pull requests must target the `develop` branch.[^1] + +[^1]: There are exceptions to this policy for hotfixes, but no one consulting + this document will be in that situation. + +Changes to pull requests must be added as new commits. +You may **never force push a branch in a pull request** (e.g. after a rebase). +This preserves the ability for reviewers to filter changes since their last +review. + +A pull request must obtain **approvals from at least two reviewers** before it +can be considered for merge by a Maintainer. +Maintainers retain discretion to require more approvals if they feel the +credibility of the existing approvals is insufficient. + +Pull requests must be merged by [squash-and-merge][2] +to preserve a linear history for the `develop` branch. + # Style guide -This is a non-exhaustive list of recommended style guidelines. These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments. + +This is a non-exhaustive list of recommended style guidelines. +These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments. + ## Formatting -All code must conform to `clang-format` version 10, unless the result would be unreasonably difficult to read or maintain. -To change your code to conform use `clang-format -i `. + +All code must conform to `clang-format` version 10, +according to the settings in [`.clang-format`](./.clang-format), +unless the result would be unreasonably difficult to read or maintain. +To demarcate lines that should be left as-is, surround them with comments like +this: + +``` +// clang-format off +... +// clang-format on +``` + +You can format individual files in place by running `clang-format -i ...` +from any directory within this project. + ## Avoid + 1. Proliferation of nearly identical code. 2. Proliferation of new files and classes. 3. Complex inheritance and complex OOP patterns. @@ -35,7 +133,9 @@ To change your code to conform use `clang-format -i `. 7. CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments. 8. Importing new libraries unless there is a very good reason to do so. + ## Seek to + 9. Extend functionality of existing code rather than creating new code. 10. Prefer readability over terseness where important logic is concerned. 11. Inline functions that are not used or are not likely to be used elsewhere in the codebase. @@ -43,25 +143,42 @@ To change your code to conform use `clang-format -i `. 13. Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders. 14. Provide as many comments as you feel that a competent programmer would need to understand what your code does. + # Maintainers -Maintainers are ecosystem participants with elevated access to the repository. They are able to push new code, make decisions on when a release should be made, etc. -## Code Review -New contributors' PRs must be reviewed by at least two of the maintainers. Well established prior contributors can be reviewed by a single maintainer. +Maintainers are ecosystem participants with elevated access to the repository. +They are able to push new code, make decisions on when a release should be +made, etc. + -## Adding and Removing -New maintainers can be proposed by two existing maintainers, subject to a vote by a quorum of the existing maintainers. A minimum of 50% support and a 50% participation is required. In the event of a tie vote, the addition of the new maintainer will be rejected. +## Adding and removing -Existing maintainers can resign, or be subject to a vote for removal at the behest of two existing maintainers. A minimum of 60% agreement and 50% participation are required. The XRP Ledger Foundation will have the ability, for cause, to remove an existing maintainer without a vote. +New maintainers can be proposed by two existing maintainers, subject to a vote +by a quorum of the existing maintainers. +A minimum of 50% support and a 50% participation is required. +In the event of a tie vote, the addition of the new maintainer will be +rejected. + +Existing maintainers can resign, or be subject to a vote for removal at the +behest of two existing maintainers. +A minimum of 60% agreement and 50% participation are required. +The XRP Ledger Foundation will have the ability, for cause, to remove an +existing maintainer without a vote. + + +## Current Maintainers -## Existing Maintainers * [JoelKatz](https://github.com/JoelKatz) (Ripple) -* [Manojsdoshi](https://github.com/manojsdoshi) (Ripple) -* [N3tc4t](https://github.com/n3tc4t) (XRPL Labs) -* [Nikolaos D Bougalis](https://github.com/nbougalis) -* [Nixer89](https://github.com/nixer89) (XRP Ledger Foundation) +* [manojsdoshi](https://github.com/manojsdoshi) (Ripple) +* [n3tc4t](https://github.com/n3tc4t) (XRPL Labs) +* [Nik Bougalis](https://github.com/nbougalis) +* [nixer89](https://github.com/nixer89) (XRP Ledger Foundation) * [RichardAH](https://github.com/RichardAH) (XRPL Labs + XRP Ledger Foundation) -* [Seelabs](https://github.com/seelabs) (Ripple) +* [seelabs](https://github.com/seelabs) (Ripple) * [Silkjaer](https://github.com/Silkjaer) (XRP Ledger Foundation) * [WietseWind](https://github.com/WietseWind) (XRPL Labs + XRP Ledger Foundation) -* [Ximinez](https://github.com/ximinez) (Ripple) +* [Ed Hennis](https://github.com/ximinez) (Ripple) + + +[1]: https://docs.github.com/en/get-started/quickstart/contributing-to-projects +[2]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/incorporating-changes-from-a-pull-request/about-pull-request-merges#squash-and-merge-your-commits From 0ee63b7c7b3f0096316aa329b4405b90fae3f85d Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Wed, 13 Apr 2022 16:01:52 -0400 Subject: [PATCH 083/508] AMM Add Number class and associated algorithms --- Builds/CMake/RippledCore.cmake | 3 + src/ripple/basics/Number.h | 322 +++++++++++++++++ src/ripple/basics/impl/Number.cpp | 582 ++++++++++++++++++++++++++++++ src/test/basics/Number_test.cpp | 143 ++++++++ 4 files changed, 1050 insertions(+) create mode 100644 src/ripple/basics/Number.h create mode 100644 src/ripple/basics/impl/Number.cpp create mode 100644 src/test/basics/Number_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 4bad3a87b4a..dca4720e381 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -50,6 +50,7 @@ target_sources (xrpl_core PRIVATE src/ripple/basics/impl/FileUtilities.cpp src/ripple/basics/impl/IOUAmount.cpp src/ripple/basics/impl/Log.cpp + src/ripple/basics/impl/Number.cpp src/ripple/basics/impl/StringUtilities.cpp #[===============================[ main sources: @@ -153,6 +154,7 @@ install ( src/ripple/basics/LocalValue.h src/ripple/basics/Log.h src/ripple/basics/MathUtilities.h + src/ripple/basics/Number.h src/ripple/basics/safe_cast.h src/ripple/basics/Slice.h src/ripple/basics/spinlock.h @@ -737,6 +739,7 @@ if (tests) src/test/basics/FileUtilities_test.cpp src/test/basics/IOUAmount_test.cpp src/test/basics/KeyCache_test.cpp + src/test/basics/Number_test.cpp src/test/basics/PerfLog_test.cpp src/test/basics/RangeSet_test.cpp src/test/basics/scope_test.cpp diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h new file mode 100644 index 00000000000..607db3ff889 --- /dev/null +++ b/src/ripple/basics/Number.h @@ -0,0 +1,322 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2022 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_NUMBER_H_INCLUDED +#define RIPPLE_BASICS_NUMBER_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { + +class Number; + +std::string +to_string(Number const& amount); + +class Number +{ + using rep = std::int64_t; + rep mantissa_{0}; + int exponent_{-2'147'483'648}; + +public: + struct unchecked + { + explicit unchecked() = default; + }; + + explicit Number() = default; + + Number(rep mantissa); + explicit Number(rep mantissa, int exponent); + explicit constexpr Number(rep mantissa, int exponent, unchecked) noexcept; + + Number(IOUAmount const& x); + + constexpr rep + mantissa() const noexcept; + constexpr int + exponent() const noexcept; + + constexpr Number + operator+() const noexcept; + constexpr Number + operator-() const noexcept; + Number& + operator++(); + Number + operator++(int); + Number& + operator--(); + Number + operator--(int); + + Number& + operator+=(Number const& x); + Number& + operator-=(Number const& x); + + Number& + operator*=(Number const& x); + Number& + operator/=(Number const& x); + + explicit operator IOUAmount() const; + + friend constexpr bool + operator==(Number const& x, Number const& y) noexcept + { + return x.mantissa_ == y.mantissa_ && x.exponent_ == y.exponent_; + } + + friend constexpr bool + operator!=(Number const& x, Number const& y) noexcept + { + return !(x == y); + } + + friend constexpr bool + operator<(Number const& x, Number const& y) noexcept + { + // If the two amounts have different signs (zero is treated as positive) + // then the comparison is true iff the left is negative. + bool const lneg = x.mantissa_ < 0; + bool const rneg = y.mantissa_ < 0; + + if (lneg != rneg) + return lneg; + + // Both have same sign and the left is zero: the right must be + // greater than 0. + if (x.mantissa_ == 0) + return y.mantissa_ > 0; + + // Both have same sign, the right is zero and the left is non-zero. + if (y.mantissa_ == 0) + return false; + + // Both have the same sign, compare by exponents: + if (x.exponent_ > y.exponent_) + return lneg; + if (x.exponent_ < y.exponent_) + return !lneg; + + // If equal exponents, compare mantissas + return x.mantissa_ < y.mantissa_; + } + + friend constexpr bool + operator>(Number const& x, Number const& y) noexcept + { + return y < x; + } + + friend constexpr bool + operator<=(Number const& x, Number const& y) noexcept + { + return !(y < x); + } + + friend constexpr bool + operator>=(Number const& x, Number const& y) noexcept + { + return !(x < y); + } + + friend std::ostream& + operator<<(std::ostream& os, Number const& x) + { + return os << to_string(x); + } + +private: + void + normalize(); + constexpr bool + isnormal() const noexcept; + + // The range for the mantissa when normalized + constexpr static std::int64_t minMantissa = 1'000'000'000'000'000LL; + constexpr static std::int64_t maxMantissa = 9'999'999'999'999'999LL; + + // The range for the exponent when normalized + constexpr static int minExponent = -32768; + constexpr static int maxExponent = 32768; + + class guard; +}; + +inline constexpr Number::Number(rep mantissa, int exponent, unchecked) noexcept + : mantissa_{mantissa}, exponent_{exponent} +{ +} + +inline Number::Number(rep mantissa, int exponent) + : mantissa_{mantissa}, exponent_{exponent} +{ + normalize(); +} + +inline Number::Number(rep mantissa) : Number{mantissa, 0} +{ +} + +inline Number::Number(IOUAmount const& x) : Number{x.mantissa(), x.exponent()} +{ +} + +inline constexpr Number::rep +Number::mantissa() const noexcept +{ + return mantissa_; +} + +inline constexpr int +Number::exponent() const noexcept +{ + return exponent_; +} + +inline constexpr Number +Number::operator+() const noexcept +{ + return *this; +} + +inline constexpr Number +Number::operator-() const noexcept +{ + auto x = *this; + x.mantissa_ = -x.mantissa_; + return x; +} + +inline Number& +Number::operator++() +{ + *this += Number{1000000000000000, -15, unchecked{}}; + return *this; +} + +inline Number +Number::operator++(int) +{ + auto x = *this; + ++(*this); + return x; +} + +inline Number& +Number::operator--() +{ + *this -= Number{1000000000000000, -15, unchecked{}}; + return *this; +} + +inline Number +Number::operator--(int) +{ + auto x = *this; + --(*this); + return x; +} + +inline Number& +Number::operator-=(Number const& x) +{ + return *this += -x; +} + +inline Number +operator+(Number const& x, Number const& y) +{ + auto z = x; + z += y; + return z; +} + +inline Number +operator-(Number const& x, Number const& y) +{ + auto z = x; + z -= y; + return z; +} + +inline Number +operator*(Number const& x, Number const& y) +{ + auto z = x; + z *= y; + return z; +} + +inline Number +operator/(Number const& x, Number const& y) +{ + auto z = x; + z /= y; + return z; +} + +inline Number::operator IOUAmount() const +{ + return IOUAmount{mantissa(), exponent()}; +} + +inline constexpr bool +Number::isnormal() const noexcept +{ + auto const abs_m = mantissa_ < 0 ? -mantissa_ : mantissa_; + return minMantissa <= abs_m && abs_m <= maxMantissa && + minExponent <= exponent_ && exponent_ <= maxExponent; +} + +inline constexpr Number +abs(Number x) noexcept +{ + if (x < Number{}) + x = -x; + return x; +} + +// Returns f^n +// Uses a log_2(n) number of mulitiplications + +Number +power(Number f, unsigned n); + +// Returns f^(1/d) +// Uses Newton–Raphson iterations until the result stops changing +// to find the root of the polynomial g(x) = x^d - f + +Number +root(Number f, unsigned d); + +// Returns f^(n/d) + +Number +power(Number f, unsigned n, unsigned d); + +} // namespace ripple + +#endif // RIPPLE_BASICS_NUMBER_H_INCLUDED diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp new file mode 100644 index 00000000000..1f056a2d93c --- /dev/null +++ b/src/ripple/basics/impl/Number.cpp @@ -0,0 +1,582 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2022 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include + +#ifdef _MSVC_LANG +#include +using uint128_t = boost::multiprecision::uint128_t; +#else // !defined(_MSVC_LANG) +using uint128_t = __uint128_t; +#endif // !defined(_MSVC_LANG) + +namespace ripple { + +// guard + +class Number::guard +{ + std::uint64_t digits_; + std::uint8_t xbit_ : 1; + std::uint8_t sbit_ : 1; // TODO : get rid of + +public: + explicit guard() : digits_{0}, xbit_{0}, sbit_{0} + { + } + + void + set_positive() noexcept; + void + set_negative() noexcept; + bool + is_negative() const noexcept; + void + push(unsigned d) noexcept; + unsigned + pop() noexcept; + int + round() noexcept; +}; + +inline void +Number::guard::set_positive() noexcept +{ + sbit_ = 0; +} + +inline void +Number::guard::set_negative() noexcept +{ + sbit_ = 1; +} + +inline bool +Number::guard::is_negative() const noexcept +{ + return sbit_ == 1; +} + +inline void +Number::guard::push(unsigned d) noexcept +{ + xbit_ = xbit_ || (digits_ & 0x0000'0000'0000'000F) != 0; + digits_ >>= 4; + digits_ |= (d & 0x0000'0000'0000'000FULL) << 60; +} + +inline unsigned +Number::guard::pop() noexcept +{ + unsigned d = (digits_ & 0xF000'0000'0000'0000) >> 60; + digits_ <<= 4; + return d; +} + +int +Number::guard::round() noexcept +{ + if (digits_ > 0x5000'0000'0000'0000) + return 1; + if (digits_ < 0x5000'0000'0000'0000) + return -1; + if (xbit_) + return 1; + return 0; +} + +// Number + +constexpr Number one{1000000000000000, -15, Number::unchecked{}}; + +void +Number::normalize() +{ + if (mantissa_ == 0) + { + *this = Number{}; + return; + } + bool const negative = (mantissa_ < 0); + if (negative) + mantissa_ = -mantissa_; + auto m = static_cast>(mantissa_); + while ((m < minMantissa) && (exponent_ > minExponent)) + { + m *= 10; + --exponent_; + } + while (m > maxMantissa) + { + if (exponent_ >= maxExponent) + throw std::overflow_error("Number::normalize 1"); + m /= 10; + ++exponent_; + } + mantissa_ = m; + if ((exponent_ < minExponent) || (mantissa_ < minMantissa)) + { + *this = Number{}; + return; + } + + if (exponent_ > maxExponent) + throw std::overflow_error("Number::normalize 2"); + + if (negative) + mantissa_ = -mantissa_; +} + +Number& +Number::operator+=(Number const& y) +{ + if (y == Number{}) + return *this; + if (*this == Number{}) + { + *this = y; + return *this; + } + if (*this == -y) + { + *this = Number{}; + return *this; + } + assert(isnormal() && y.isnormal()); + auto xm = mantissa(); + auto xe = exponent(); + int xn = 1; + if (xm < 0) + { + xm = -xm; + xn = -1; + } + auto ym = y.mantissa(); + auto ye = y.exponent(); + int yn = 1; + if (ym < 0) + { + ym = -ym; + yn = -1; + } + guard g; + if (xe < ye) + { + if (xn == -1) + g.set_negative(); + do + { + g.push(xm % 10); + xm /= 10; + ++xe; + } while (xe < ye); + } + else if (xe > ye) + { + if (yn == -1) + g.set_negative(); + do + { + g.push(ym % 10); + ym /= 10; + ++ye; + } while (xe > ye); + } + if (xn == yn) + { + xm += ym; + if (xm > maxMantissa) + { + g.push(xm % 10); + xm /= 10; + ++xe; + } + auto r = g.round(); + if (r == 1 || (r == 0 && (xm & 1) == 1)) + { + ++xm; + if (xm > maxMantissa) + { + xm /= 10; + ++xe; + } + } + if (xe > maxExponent) + throw std::overflow_error("Number::addition overflow"); + } + else + { + if (xm > ym) + { + xm = xm - ym; + } + else + { + xm = ym - xm; + xe = ye; + xn = yn; + } + while (xm < minMantissa) + { + xm *= 10; + xm -= g.pop(); + --xe; + } + auto r = g.round(); + if (r == 1 || (r == 0 && (xm & 1) == 1)) + { + --xm; + if (xm < minMantissa) + { + xm *= 10; + --xe; + } + } + if (xe < minExponent) + { + xm = 0; + xe = Number{}.exponent_; + } + } + mantissa_ = xm * xn; + exponent_ = xe; + assert(isnormal()); + return *this; +} + +Number& +Number::operator*=(Number const& y) +{ + if (*this == Number{}) + return *this; + if (y == Number{}) + { + *this = y; + return *this; + } + assert(isnormal() && y.isnormal()); + auto xm = mantissa(); + auto xe = exponent(); + int xn = 1; + if (xm < 0) + { + xm = -xm; + xn = -1; + } + auto ym = y.mantissa(); + auto ye = y.exponent(); + int yn = 1; + if (ym < 0) + { + ym = -ym; + yn = -1; + } + auto zm = uint128_t(xm) * uint128_t(ym); + auto ze = xe + ye; + auto zn = xn * yn; + guard g; + while (zm > maxMantissa) + { + g.push(static_cast(zm % 10)); + zm /= 10; + ++ze; + } + xm = static_cast(zm); + xe = ze; + auto r = g.round(); + if (r == 1 || (r == 0 && (xm & 1) == 1)) + { + ++xm; + if (xm > maxMantissa) + { + xm /= 10; + ++xe; + } + } + if (xe < minExponent) + { + xm = 0; + xe = Number{}.exponent_; + } + if (xe > maxExponent) + throw std::overflow_error( + "Number::multiplication overflow : exponent is " + + std::to_string(xe)); + mantissa_ = xm * zn; + exponent_ = xe; + assert(isnormal()); + return *this; +} + +Number& +Number::operator/=(Number const& y) +{ + if (y == Number{}) + throw std::overflow_error("Number: divide by 0"); + int np = 1; + auto nm = mantissa(); + if (nm < 0) + { + nm = -nm; + np = -1; + } + int dp = 1; + auto dm = y.mantissa(); + if (dm < 0) + { + dm = -dm; + dp = -1; + } + // Divide numerator and denominator such that the + // denominator is in the range [1, 10). + const int offset = -15 - y.exponent(); + Number n{nm * (np * dp), exponent() + offset}; + Number d{dm, y.exponent() + offset}; + // Quadratic least squares fit to 1/x in the range [1, 10] + constexpr Number a0{9178756872006464, -16, unchecked{}}; + constexpr Number a1{-2149215784206187, -16, unchecked{}}; + constexpr Number a2{1405502114116773, -17, unchecked{}}; + static_assert(a0.isnormal()); + static_assert(a1.isnormal()); + static_assert(a2.isnormal()); + Number rm2{}; + Number rm1{}; + Number r = a2; + r = (a2 * d + a1) * d + a0; + do + { + rm2 = rm1; + rm1 = r; + r = r + r * (one - d * r); + } while (r != rm1 && r != rm2); + *this = n * r; + return *this; +} + +std::string +to_string(Number const& amount) +{ + // keep full internal accuracy, but make more human friendly if possible + if (amount == Number{}) + return "0"; + + auto const exponent = amount.exponent(); + auto mantissa = amount.mantissa(); + + // Use scientific notation for exponents that are too small or too large + if (((exponent != 0) && ((exponent < -25) || (exponent > -5)))) + { + std::string ret = std::to_string(mantissa); + ret.append(1, 'e'); + ret.append(std::to_string(exponent)); + return ret; + } + + bool negative = false; + + if (mantissa < 0) + { + mantissa = -mantissa; + negative = true; + } + + assert(exponent + 43 > 0); + + size_t const pad_prefix = 27; + size_t const pad_suffix = 23; + + std::string const raw_value(std::to_string(mantissa)); + std::string val; + + val.reserve(raw_value.length() + pad_prefix + pad_suffix); + val.append(pad_prefix, '0'); + val.append(raw_value); + val.append(pad_suffix, '0'); + + size_t const offset(exponent + 43); + + auto pre_from(val.begin()); + auto const pre_to(val.begin() + offset); + + auto const post_from(val.begin() + offset); + auto post_to(val.end()); + + // Crop leading zeroes. Take advantage of the fact that there's always a + // fixed amount of leading zeroes and skip them. + if (std::distance(pre_from, pre_to) > pad_prefix) + pre_from += pad_prefix; + + assert(post_to >= post_from); + + pre_from = std::find_if(pre_from, pre_to, [](char c) { return c != '0'; }); + + // Crop trailing zeroes. Take advantage of the fact that there's always a + // fixed amount of trailing zeroes and skip them. + if (std::distance(post_from, post_to) > pad_suffix) + post_to -= pad_suffix; + + assert(post_to >= post_from); + + post_to = std::find_if( + std::make_reverse_iterator(post_to), + std::make_reverse_iterator(post_from), + [](char c) { return c != '0'; }) + .base(); + + std::string ret; + + if (negative) + ret.append(1, '-'); + + // Assemble the output: + if (pre_from == pre_to) + ret.append(1, '0'); + else + ret.append(pre_from, pre_to); + + if (post_to != post_from) + { + ret.append(1, '.'); + ret.append(post_from, post_to); + } + + return ret; +} + +// Returns f^n +// Uses a log_2(n) number of mulitiplications + +Number +power(Number f, unsigned n) +{ + if (n == 0) + return one; + if (n == 1) + return f; + auto r = power(f, n / 2); + r *= r; + if (n % 2 != 0) + r *= f; + return r; +} + +// Returns f^(1/d) +// Uses Newton–Raphson iterations until the result stops changing +// to find the non-negative root of the polynomial g(x) = x^d - f + +Number +root(Number f, unsigned d) +{ + if (f == one || d == 1) + return f; + if (d == 0) + { + if (f == -one) + return one; + if (abs(f) < one) + return Number{}; + throw std::overflow_error("Number::root infinity"); + } + if (f < Number{} && d % 2 == 0) + throw std::overflow_error("Number::root nan"); + if (f == Number{}) + return f; + + // Scale f into the range (0, 1) such that f's exponent is a multiple of d + auto e = f.exponent() + 16; + auto const di = static_cast(d); + auto ex = [e = e, di = di]() // Euclidean remainder of e/d + { + int k = (e >= 0 ? e : e - (di - 1)) / di; + int k2 = e - k * di; + if (k2 == 0) + return 0; + return di - k2; + }(); + e += ex; + f = Number{f.mantissa(), f.exponent() - e}; // f /= 10^e; + bool neg = false; + if (f < Number{}) + { + neg = true; + f = -f; + } + + // Quadratic least squares curve fit of f^(1/d) in the range [0, 1] + auto const D = ((6 * di + 11) * di + 6) * di + 1; + auto const a0 = 3 * di * ((2 * di - 3) * di + 1); + auto const a1 = 24 * di * (2 * di - 1); + auto const a2 = -30 * (di - 1) * di; + Number r = ((Number{a2} * f + Number{a1}) * f + Number{a0}) / Number{D}; + if (neg) + { + f = -f; + r = -r; + } + + // Newton–Raphson iteration of f^(1/d) with initial guess r + // halt when r stops changing, checking for bouncing on the last iteration + Number rm1{}; + Number rm2{}; + do + { + rm2 = rm1; + rm1 = r; + r = (Number(d - 1) * r + f / power(r, d - 1)) / Number(d); + } while (r != rm1 && r != rm2); + + // return r * 10^(e/d) to reverse scaling + return Number{r.mantissa(), r.exponent() + e / di}; +} + +// Returns f^(n/d) + +Number +power(Number f, unsigned n, unsigned d) +{ + if (f == one) + return f; + auto g = std::gcd(n, d); + if (g == 0) + throw std::overflow_error("Number::power nan"); + if (d == 0) + { + if (f == -one) + return one; + if (abs(f) < one) + return Number{}; + if (abs(f) > one) + throw std::overflow_error("Number::power infinity"); + throw std::overflow_error("Number::power nan"); + } + if (n == 0) + return one; + n /= g; + d /= g; + if ((n % 2) == 1 && (d % 2) == 0 && f < Number{}) + throw std::overflow_error("Number::power nan"); + return root(power(f, n), d); +} + +} // namespace ripple diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp new file mode 100644 index 00000000000..570e162872e --- /dev/null +++ b/src/test/basics/Number_test.cpp @@ -0,0 +1,143 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2022 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include + +namespace ripple { + +class Number_test : public beast::unit_test::suite +{ +public: + void + testZero() + { + testcase("zero"); + + Number const z{0, 0}; + + BEAST_EXPECT(z.mantissa() == 0); + BEAST_EXPECT(z.exponent() == Number{}.exponent()); + + BEAST_EXPECT((z + z) == z); + BEAST_EXPECT((z - z) == z); + BEAST_EXPECT(z == -z); + } + + void + test_add() + { + testcase("test_add"); + Number x[]{ + Number{1'000'000'000'000'000, -15}, + Number{-1'000'000'000'000'000, -15}, + Number{-1'000'000'000'000'000, -15}, + Number{-6'555'555'555'555'555, -29}}; + Number y[]{ + Number{6'555'555'555'555'555, -29}, + Number{-6'555'555'555'555'555, -29}, + Number{6'555'555'555'555'555, -29}, + Number{1'000'000'000'000'000, -15}}; + Number z[]{ + Number{1'000'000'000'000'066, -15}, + Number{-1'000'000'000'000'066, -15}, + Number{-9'999'999'999'999'344, -16}, + Number{9'999'999'999'999'344, -16}}; + for (unsigned i = 0; i < std::size(x); ++i) + { + BEAST_EXPECT(x[i] + y[i] == z[i]); + } + } + + void + test_sub() + { + testcase("test_sub"); + Number x[]{ + Number{1'000'000'000'000'000, -15}, + Number{6'555'555'555'555'555, -29}}; + Number y[]{ + Number{6'555'555'555'555'555, -29}, + Number{1'000'000'000'000'000, -15}}; + Number z[]{ + Number{9'999'999'999'999'344, -16}, + Number{-9'999'999'999'999'344, -16}}; + for (unsigned i = 0; i < std::size(x); ++i) + { + BEAST_EXPECT(x[i] - y[i] == z[i]); + } + } + + void + test_div() + { + testcase("test_div"); + Number x[]{Number{1}, Number{1}, Number{0}}; + Number y[]{Number{2}, Number{10}, Number{100}}; + Number z[]{Number{5, -1}, Number{1, -1}, Number{0}}; + for (unsigned i = 0; i < std::size(x); ++i) + { + BEAST_EXPECT(x[i] / y[i] == z[i]); + } + } + + void + test_root() + { + testcase("test_root"); + Number x[]{Number{2}, Number{2'000'000}, Number{2, -30}}; + unsigned y[]{2, 2, 2}; + Number z[]{ + Number{1414213562373095, -15}, + Number{1414213562373095, -12}, + Number{1414213562373095, -30}}; + for (unsigned i = 0; i < std::size(x); ++i) + { + BEAST_EXPECT(root(x[i], y[i]) == z[i]); + } + } + + void + testConversions() + { + testcase("testConversions"); + + IOUAmount x{5, 6}; + Number y = x; + BEAST_EXPECT((y == Number{5, 6})); + IOUAmount z{y}; + BEAST_EXPECT(x == z); + } + + void + run() override + { + testZero(); + test_add(); + test_sub(); + test_div(); + test_root(); + testConversions(); + } +}; + +BEAST_DEFINE_TESTSUITE(Number, ripple_basics, ripple); + +} // namespace ripple From 476ee8a479fb431e21dbfbef1f13b003b49d5722 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Fri, 15 Apr 2022 16:20:52 -0400 Subject: [PATCH 084/508] Add conversions between Number, XRPAmount and int64_t * Conversions to Number are implicit * Conversions away from Number are explicit and potentially lossy * If lossy, round to nearest, and to even on tie --- src/ripple/basics/Number.h | 8 ++++ src/ripple/basics/impl/Number.cpp | 39 +++++++++++++++++ src/test/basics/Number_test.cpp | 70 +++++++++++++++++++++++++++++++ 3 files changed, 117 insertions(+) diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index 607db3ff889..50975e7d052 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -21,6 +21,7 @@ #define RIPPLE_BASICS_NUMBER_H_INCLUDED #include +#include #include #include #include @@ -51,6 +52,7 @@ class Number explicit constexpr Number(rep mantissa, int exponent, unchecked) noexcept; Number(IOUAmount const& x); + Number(XRPAmount const& x); constexpr rep mantissa() const noexcept; @@ -81,6 +83,8 @@ class Number operator/=(Number const& x); explicit operator IOUAmount() const; + explicit operator XRPAmount() const; // round to nearest, even on tie + explicit operator rep() const; // round to nearest, even on tie friend constexpr bool operator==(Number const& x, Number const& y) noexcept @@ -184,6 +188,10 @@ inline Number::Number(IOUAmount const& x) : Number{x.mantissa(), x.exponent()} { } +inline Number::Number(XRPAmount const& x) : Number{x.drops()} +{ +} + inline constexpr Number::rep Number::mantissa() const noexcept { diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index 1f056a2d93c..c9e9c51ce9e 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -374,6 +374,45 @@ Number::operator/=(Number const& y) return *this; } +Number::operator rep() const +{ + std::int64_t drops = mantissa_; + int offset = exponent_; + guard g; + if (drops != 0) + { + if (drops < 0) + { + g.set_negative(); + drops = -drops; + } + for (; offset < 0; ++offset) + { + g.push(drops % 10); + drops /= 10; + } + for (; offset > 0; --offset) + { + if (drops > std::numeric_limits::max() / 10) + throw std::runtime_error("Number::operator rep() overflow"); + drops *= 10; + } + auto r = g.round(); + if (r == 1 || (r == 0 && (drops & 1) == 1)) + { + ++drops; + } + if (g.is_negative()) + drops = -drops; + } + return drops; +} + +Number::operator XRPAmount() const +{ + return XRPAmount{static_cast(*this)}; +} + std::string to_string(Number const& amount) { diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index 570e162872e..444095bfb39 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -126,6 +126,75 @@ class Number_test : public beast::unit_test::suite BEAST_EXPECT(x == z); } + void + test_to_integer() + { + Number x[]{ + Number{0}, + Number{1}, + Number{2}, + Number{3}, + Number{-1}, + Number{-2}, + Number{-3}, + Number{10}, + Number{99}, + Number{1155}, + Number{9'999'999'999'999'999, 0}, + Number{9'999'999'999'999'999, 1}, + Number{9'999'999'999'999'999, 2}, + Number{-9'999'999'999'999'999, 2}, + Number{15, -1}, + Number{14, -1}, + Number{16, -1}, + Number{25, -1}, + Number{6, -1}, + Number{5, -1}, + Number{4, -1}, + Number{-15, -1}, + Number{-14, -1}, + Number{-16, -1}, + Number{-25, -1}, + Number{-6, -1}, + Number{-5, -1}, + Number{-4, -1}}; + std::int64_t y[]{ + 0, + 1, + 2, + 3, + -1, + -2, + -3, + 10, + 99, + 1155, + 9'999'999'999'999'999, + 99'999'999'999'999'990, + 999'999'999'999'999'900, + -999'999'999'999'999'900, + 2, + 1, + 2, + 2, + 1, + 0, + 0, + -2, + -1, + -2, + -2, + -1, + 0, + 0}; + static_assert(std::size(x) == std::size(y)); + for (unsigned u = 0; u < std::size(x); ++u) + { + auto j = static_cast(x[u]); + BEAST_EXPECT(j == y[u]); + } + } + void run() override { @@ -135,6 +204,7 @@ class Number_test : public beast::unit_test::suite test_div(); test_root(); testConversions(); + test_to_integer(); } }; From 24fe5f9fd0606c39fb57240da9b42badc217b64e Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Mon, 18 Apr 2022 14:21:57 -0400 Subject: [PATCH 085/508] Add clip * Return 0 if abs(x) < limit, else returns x --- src/ripple/basics/Number.h | 10 ++++++++++ src/test/basics/Number_test.cpp | 15 +++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index 50975e7d052..3832a84959b 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -325,6 +325,16 @@ root(Number f, unsigned d); Number power(Number f, unsigned n, unsigned d); +// Return 0 if abs(x) < limit, else returns x + +inline constexpr Number +clip(Number const& x, Number const& limit) noexcept +{ + if (abs(x) < limit) + return Number{}; + return x; +} + } // namespace ripple #endif // RIPPLE_BASICS_NUMBER_H_INCLUDED diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index 444095bfb39..43a8884c598 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -129,6 +129,7 @@ class Number_test : public beast::unit_test::suite void test_to_integer() { + testcase("test_to_integer"); Number x[]{ Number{0}, Number{1}, @@ -195,6 +196,19 @@ class Number_test : public beast::unit_test::suite } } + void + test_clip() + { + testcase("test_clip"); + Number limit{1, -6}; + BEAST_EXPECT((clip(Number{2, -6}, limit) == Number{2, -6})); + BEAST_EXPECT((clip(Number{1, -6}, limit) == Number{1, -6})); + BEAST_EXPECT((clip(Number{9, -7}, limit) == Number{0})); + BEAST_EXPECT((clip(Number{-2, -6}, limit) == Number{-2, -6})); + BEAST_EXPECT((clip(Number{-1, -6}, limit) == Number{-1, -6})); + BEAST_EXPECT((clip(Number{-9, -7}, limit) == Number{0})); + } + void run() override { @@ -205,6 +219,7 @@ class Number_test : public beast::unit_test::suite test_root(); testConversions(); test_to_integer(); + test_clip(); } }; From c9c54c97994b3729604a42c5b96bcf6c25d8ba8d Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Wed, 20 Apr 2022 17:10:04 -0400 Subject: [PATCH 086/508] Add implicit conversion from STAmount to Number --- src/ripple/basics/impl/Number.cpp | 9 +++++---- src/ripple/protocol/STAmount.h | 9 +++++++++ src/ripple/rpc/impl/ShardArchiveHandler.cpp | 9 +++++---- src/test/basics/Number_test.cpp | 5 +++++ 4 files changed, 24 insertions(+), 8 deletions(-) diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index c9e9c51ce9e..e59ca5fb63f 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -325,7 +325,7 @@ Number::operator*=(Number const& y) std::to_string(xe)); mantissa_ = xm * zn; exponent_ = xe; - assert(isnormal()); + assert(isnormal() || *this == Number{}); return *this; } @@ -362,8 +362,9 @@ Number::operator/=(Number const& y) static_assert(a2.isnormal()); Number rm2{}; Number rm1{}; - Number r = a2; - r = (a2 * d + a1) * d + a0; + Number r = (a2 * d + a1) * d + a0; + // Newton–Raphson iteration of 1/x - d with initial guess r + // halt when r stops changing, checking for bouncing on the last iteration do { rm2 = rm1; @@ -376,7 +377,7 @@ Number::operator/=(Number const& y) Number::operator rep() const { - std::int64_t drops = mantissa_; + rep drops = mantissa_; int offset = exponent_; guard g; if (drops != 0) diff --git a/src/ripple/protocol/STAmount.h b/src/ripple/protocol/STAmount.h index d0add30dbba..b6e0e3046ff 100644 --- a/src/ripple/protocol/STAmount.h +++ b/src/ripple/protocol/STAmount.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -144,6 +145,7 @@ class STAmount final : public STBase, public CountedObject // Legacy support for new-style amounts STAmount(IOUAmount const& amount, Issue const& issue); STAmount(XRPAmount const& amount); + operator Number() const; //-------------------------------------------------------------------------- // @@ -370,6 +372,13 @@ inline STAmount::operator bool() const noexcept return *this != beast::zero; } +inline STAmount::operator Number() const +{ + if (mIsNative) + return xrp(); + return iou(); +} + inline STAmount& STAmount::operator=(beast::Zero) { clear(); diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index 2284780c2b6..d05744f483a 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -37,11 +37,12 @@ using namespace std::chrono_literals; boost::filesystem::path ShardArchiveHandler::getDownloadDirectory(Config const& config) { - return get(config.section(ConfigSection::shardDatabase()), - "download_path", + return boost::filesystem::path{ get(config.section(ConfigSection::shardDatabase()), - "path", - "")) / + "download_path", + get(config.section(ConfigSection::shardDatabase()), + "path", + ""))} / "download"; } diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index 43a8884c598..b605ed434c6 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include namespace ripple { @@ -124,6 +125,10 @@ class Number_test : public beast::unit_test::suite BEAST_EXPECT((y == Number{5, 6})); IOUAmount z{y}; BEAST_EXPECT(x == z); + XRPAmount xrp{500}; + STAmount st = xrp; + Number n = st; + BEAST_EXPECT(XRPAmount{n} == xrp); } void From 48e804c40c58342f7fc7a84b1782c2a9ffdce82f Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Thu, 21 Apr 2022 16:47:07 -0400 Subject: [PATCH 087/508] Add tests --- src/ripple/basics/Number.h | 14 +- src/ripple/basics/impl/Number.cpp | 112 +++++-- src/test/basics/Number_test.cpp | 475 ++++++++++++++++++++++-------- 3 files changed, 453 insertions(+), 148 deletions(-) diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index 3832a84959b..92b99bf0145 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -37,7 +37,7 @@ class Number { using rep = std::int64_t; rep mantissa_{0}; - int exponent_{-2'147'483'648}; + int exponent_{std::numeric_limits::lowest()}; public: struct unchecked @@ -45,7 +45,7 @@ class Number explicit unchecked() = default; }; - explicit Number() = default; + explicit constexpr Number() = default; Number(rep mantissa); explicit Number(rep mantissa, int exponent); @@ -166,7 +166,7 @@ class Number constexpr static int minExponent = -32768; constexpr static int maxExponent = 32768; - class guard; + class Guard; }; inline constexpr Number::Number(rep mantissa, int exponent, unchecked) noexcept @@ -308,10 +308,10 @@ abs(Number x) noexcept } // Returns f^n -// Uses a log_2(n) number of mulitiplications +// Uses a log_2(n) number of multiplications Number -power(Number f, unsigned n); +power(Number const& f, unsigned n); // Returns f^(1/d) // Uses Newton–Raphson iterations until the result stops changing @@ -323,12 +323,12 @@ root(Number f, unsigned d); // Returns f^(n/d) Number -power(Number f, unsigned n, unsigned d); +power(Number const& f, unsigned n, unsigned d); // Return 0 if abs(x) < limit, else returns x inline constexpr Number -clip(Number const& x, Number const& limit) noexcept +squelch(Number const& x, Number const& limit) noexcept { if (abs(x) < limit) return Number{}; diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index e59ca5fb63f..10834f08e3b 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -33,53 +33,66 @@ using uint128_t = __uint128_t; namespace ripple { -// guard +// Guard -class Number::guard +// The Guard class is used to tempoarily add extra digits of +// preicision to an operation. This enables the final result +// to be correctly rounded to the internal precision of Number. + +class Number::Guard { - std::uint64_t digits_; - std::uint8_t xbit_ : 1; - std::uint8_t sbit_ : 1; // TODO : get rid of + std::uint64_t digits_; // 16 decimal guard digits + std::uint8_t xbit_ : 1; // has a non-zero digit been shifted off the end + std::uint8_t sbit_ : 1; // the sign of the guard digits public: - explicit guard() : digits_{0}, xbit_{0}, sbit_{0} + explicit Guard() : digits_{0}, xbit_{0}, sbit_{0} { } + // set & test the sign bit void set_positive() noexcept; void set_negative() noexcept; bool is_negative() const noexcept; + + // add a digit void push(unsigned d) noexcept; + + // recover a digit unsigned pop() noexcept; + + // Indicate round direction: 1 is up, -1 is down, 0 is even + // This enables the client to round towards nearest, and on + // tie, round towards even. int round() noexcept; }; inline void -Number::guard::set_positive() noexcept +Number::Guard::set_positive() noexcept { sbit_ = 0; } inline void -Number::guard::set_negative() noexcept +Number::Guard::set_negative() noexcept { sbit_ = 1; } inline bool -Number::guard::is_negative() const noexcept +Number::Guard::is_negative() const noexcept { return sbit_ == 1; } inline void -Number::guard::push(unsigned d) noexcept +Number::Guard::push(unsigned d) noexcept { xbit_ = xbit_ || (digits_ & 0x0000'0000'0000'000F) != 0; digits_ >>= 4; @@ -87,7 +100,7 @@ Number::guard::push(unsigned d) noexcept } inline unsigned -Number::guard::pop() noexcept +Number::Guard::pop() noexcept { unsigned d = (digits_ & 0xF000'0000'0000'0000) >> 60; digits_ <<= 4; @@ -95,7 +108,7 @@ Number::guard::pop() noexcept } int -Number::guard::round() noexcept +Number::Guard::round() noexcept { if (digits_ > 0x5000'0000'0000'0000) return 1; @@ -127,10 +140,12 @@ Number::normalize() m *= 10; --exponent_; } + Guard g; while (m > maxMantissa) { if (exponent_ >= maxExponent) throw std::overflow_error("Number::normalize 1"); + g.push(m % 10); m /= 10; ++exponent_; } @@ -141,6 +156,16 @@ Number::normalize() return; } + auto r = g.round(); + if (r == 1 || (r == 0 && (mantissa_ & 1) == 1)) + { + ++mantissa_; + if (mantissa_ > maxMantissa) + { + mantissa_ /= 10; + ++exponent_; + } + } if (exponent_ > maxExponent) throw std::overflow_error("Number::normalize 2"); @@ -180,7 +205,7 @@ Number::operator+=(Number const& y) ym = -ym; yn = -1; } - guard g; + Guard g; if (xe < ye) { if (xn == -1) @@ -261,7 +286,6 @@ Number::operator+=(Number const& y) } mantissa_ = xm * xn; exponent_ = xe; - assert(isnormal()); return *this; } @@ -295,7 +319,7 @@ Number::operator*=(Number const& y) auto zm = uint128_t(xm) * uint128_t(ym); auto ze = xe + ye; auto zn = xn * yn; - guard g; + Guard g; while (zm > maxMantissa) { g.push(static_cast(zm % 10)); @@ -379,7 +403,7 @@ Number::operator rep() const { rep drops = mantissa_; int offset = exponent_; - guard g; + Guard g; if (drops != 0) { if (drops < 0) @@ -395,7 +419,7 @@ Number::operator rep() const for (; offset > 0; --offset) { if (drops > std::numeric_limits::max() / 10) - throw std::runtime_error("Number::operator rep() overflow"); + throw std::overflow_error("Number::operator rep() overflow"); drops *= 10; } auto r = g.round(); @@ -505,10 +529,10 @@ to_string(Number const& amount) } // Returns f^n -// Uses a log_2(n) number of mulitiplications +// Uses a log_2(n) number of multiplications Number -power(Number f, unsigned n) +power(Number const& f, unsigned n) { if (n == 0) return one; @@ -525,6 +549,11 @@ power(Number f, unsigned n) // Uses Newton–Raphson iterations until the result stops changing // to find the non-negative root of the polynomial g(x) = x^d - f +// This function, and power(Number f, unsigned n, unsigned d) +// treat corner cases such as 0 roots as advised by Annex F of +// the C standard, which itself is consistent with the IEEE +// floating point standards. + Number root(Number f, unsigned d) { @@ -590,10 +619,48 @@ root(Number f, unsigned d) return Number{r.mantissa(), r.exponent() + e / di}; } +Number +root2(Number f) +{ + if (f == one) + return f; + if (f < Number{}) + throw std::overflow_error("Number::root nan"); + if (f == Number{}) + return f; + + // Scale f into the range (0, 1) such that f's exponent is a multiple of d + auto e = f.exponent() + 16; + if (e % 2 != 0) + ++e; + f = Number{f.mantissa(), f.exponent() - e}; // f /= 10^e; + + // Quadratic least squares curve fit of f^(1/d) in the range [0, 1] + auto const D = 105; + auto const a0 = 18; + auto const a1 = 144; + auto const a2 = -60; + Number r = ((Number{a2} * f + Number{a1}) * f + Number{a0}) / Number{D}; + + // Newton–Raphson iteration of f^(1/2) with initial guess r + // halt when r stops changing, checking for bouncing on the last iteration + Number rm1{}; + Number rm2{}; + do + { + rm2 = rm1; + rm1 = r; + r = (r + f / r) / Number(2); + } while (r != rm1 && r != rm2); + + // return r * 10^(e/2) to reverse scaling + return Number{r.mantissa(), r.exponent() + e / 2}; +} + // Returns f^(n/d) Number -power(Number f, unsigned n, unsigned d) +power(Number const& f, unsigned n, unsigned d) { if (f == one) return f; @@ -606,9 +673,8 @@ power(Number f, unsigned n, unsigned d) return one; if (abs(f) < one) return Number{}; - if (abs(f) > one) - throw std::overflow_error("Number::power infinity"); - throw std::overflow_error("Number::power nan"); + // abs(f) > one + throw std::overflow_error("Number::power infinity"); } if (n == 0) return one; diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index b605ed434c6..b5425a7bc06 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -21,6 +21,8 @@ #include #include #include +#include +#include namespace ripple { @@ -42,77 +44,267 @@ class Number_test : public beast::unit_test::suite BEAST_EXPECT(z == -z); } + void + test_limits() + { + testcase("test_limits"); + bool caught = false; + try + { + Number x{10'000'000'000'000'000, 32768}; + } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); + Number x{10'000'000'000'000'000, 32767}; + BEAST_EXPECT((x == Number{1'000'000'000'000'000, 32768})); + Number z{1'000'000'000'000'000, -32769}; + BEAST_EXPECT(z == Number{}); + Number y{1'000'000'000'000'001'500, 32000}; + BEAST_EXPECT((y == Number{1'000'000'000'000'002, 32003})); + Number m{std::numeric_limits::min()}; + BEAST_EXPECT((m == Number{-9'223'372'036'854'776, 3})); + Number M{std::numeric_limits::max()}; + BEAST_EXPECT((M == Number{9'223'372'036'854'776, 3})); + caught = false; + try + { + Number q{99'999'999'999'999'999, 32767}; + } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); + } + void test_add() { testcase("test_add"); - Number x[]{ - Number{1'000'000'000'000'000, -15}, - Number{-1'000'000'000'000'000, -15}, - Number{-1'000'000'000'000'000, -15}, - Number{-6'555'555'555'555'555, -29}}; - Number y[]{ - Number{6'555'555'555'555'555, -29}, - Number{-6'555'555'555'555'555, -29}, - Number{6'555'555'555'555'555, -29}, - Number{1'000'000'000'000'000, -15}}; - Number z[]{ - Number{1'000'000'000'000'066, -15}, - Number{-1'000'000'000'000'066, -15}, - Number{-9'999'999'999'999'344, -16}, - Number{9'999'999'999'999'344, -16}}; - for (unsigned i = 0; i < std::size(x); ++i) + using Case = std::tuple; + Case c[]{ + {Number{1'000'000'000'000'000, -15}, + Number{6'555'555'555'555'555, -29}, + Number{1'000'000'000'000'066, -15}}, + {Number{-1'000'000'000'000'000, -15}, + Number{-6'555'555'555'555'555, -29}, + Number{-1'000'000'000'000'066, -15}}, + {Number{-1'000'000'000'000'000, -15}, + Number{6'555'555'555'555'555, -29}, + Number{-9'999'999'999'999'344, -16}}, + {Number{-6'555'555'555'555'555, -29}, + Number{1'000'000'000'000'000, -15}, + Number{9'999'999'999'999'344, -16}}, + {Number{}, Number{5}, Number{5}}, + {Number{5'555'555'555'555'555, -32768}, + Number{-5'555'555'555'555'554, -32768}, + Number{0}}, + {Number{-9'999'999'999'999'999, -31}, + Number{1'000'000'000'000'000, -15}, + Number{9'999'999'999'999'990, -16}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x + y == z); + bool caught = false; + try { - BEAST_EXPECT(x[i] + y[i] == z[i]); + Number{9'999'999'999'999'999, 32768} + + Number{5'000'000'000'000'000, 32767}; } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); } void test_sub() { testcase("test_sub"); - Number x[]{ - Number{1'000'000'000'000'000, -15}, - Number{6'555'555'555'555'555, -29}}; - Number y[]{ - Number{6'555'555'555'555'555, -29}, - Number{1'000'000'000'000'000, -15}}; - Number z[]{ - Number{9'999'999'999'999'344, -16}, - Number{-9'999'999'999'999'344, -16}}; - for (unsigned i = 0; i < std::size(x); ++i) + using Case = std::tuple; + Case c[]{ + {Number{1'000'000'000'000'000, -15}, + Number{6'555'555'555'555'555, -29}, + Number{9'999'999'999'999'344, -16}}, + {Number{6'555'555'555'555'555, -29}, + Number{1'000'000'000'000'000, -15}, + Number{-9'999'999'999'999'344, -16}}, + {Number{1'000'000'000'000'000, -15}, + Number{1'000'000'000'000'000, -15}, + Number{0}}, + {Number{1'000'000'000'000'000, -15}, + Number{1'000'000'000'000'001, -15}, + Number{-1'000'000'000'000'000, -30}}, + {Number{1'000'000'000'000'001, -15}, + Number{1'000'000'000'000'000, -15}, + Number{1'000'000'000'000'000, -30}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x - y == z); + } + + void + test_mul() + { + testcase("test_mul"); + using Case = std::tuple; + Case c[]{ + {Number{7}, Number{8}, Number{56}}, + {Number{1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{2000000000000000, -15}}, + {Number{-1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{-2000000000000000, -15}}, + {Number{-1414213562373095, -15}, + Number{-1414213562373095, -15}, + Number{2000000000000000, -15}}, + {Number{3214285714285706, -15}, + Number{3111111111111119, -15}, + Number{1000000000000000, -14}}, + {Number{1000000000000000, -32768}, + Number{1000000000000000, -32768}, + Number{0}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x * y == z); + bool caught = false; + try { - BEAST_EXPECT(x[i] - y[i] == z[i]); + Number{9'999'999'999'999'999, 32768} * + Number{5'000'000'000'000'000, 32767}; } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); } void test_div() { testcase("test_div"); - Number x[]{Number{1}, Number{1}, Number{0}}; - Number y[]{Number{2}, Number{10}, Number{100}}; - Number z[]{Number{5, -1}, Number{1, -1}, Number{0}}; - for (unsigned i = 0; i < std::size(x); ++i) + using Case = std::tuple; + Case c[]{ + {Number{1}, Number{2}, Number{5, -1}}, + {Number{1}, Number{10}, Number{1, -1}}, + {Number{1}, Number{-10}, Number{-1, -1}}, + {Number{0}, Number{100}, Number{0}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x / y == z); + bool caught = false; + try + { + Number{1000000000000000, -15} / Number{0}; + } + catch (std::overflow_error const&) { - BEAST_EXPECT(x[i] / y[i] == z[i]); + caught = true; } + BEAST_EXPECT(caught); } void test_root() { testcase("test_root"); - Number x[]{Number{2}, Number{2'000'000}, Number{2, -30}}; - unsigned y[]{2, 2, 2}; - Number z[]{ - Number{1414213562373095, -15}, - Number{1414213562373095, -12}, - Number{1414213562373095, -30}}; - for (unsigned i = 0; i < std::size(x); ++i) + using Case = std::tuple; + Case c[]{ + {Number{2}, 2, Number{1414213562373095, -15}}, + {Number{2'000'000}, 2, Number{1414213562373095, -12}}, + {Number{2, -30}, 2, Number{1414213562373095, -30}}, + {Number{-27}, 3, Number{-3}}, + {Number{1}, 5, Number{1}}, + {Number{-1}, 0, Number{1}}, + {Number{5, -1}, 0, Number{0}}, + {Number{0}, 5, Number{0}}, + {Number{5625, -4}, 2, Number{75, -2}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT((root(x, y) == z)); + bool caught = false; + try + { + (void)root(Number{-2}, 0); + } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); + caught = false; + try + { + (void)root(Number{-2}, 4); + } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); + } + + void + test_power1() + { + testcase("test_power1"); + using Case = std::tuple; + Case c[]{ + {Number{64}, 0, Number{1}}, + {Number{64}, 1, Number{64}}, + {Number{64}, 2, Number{4096}}, + {Number{-64}, 2, Number{4096}}, + {Number{64}, 3, Number{262144}}, + {Number{-64}, 3, Number{-262144}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT((power(x, y) == z)); + } + + void + test_power2() + { + testcase("test_power2"); + using Case = std::tuple; + Case c[]{ + {Number{1}, 3, 7, Number{1}}, + {Number{-1}, 1, 0, Number{1}}, + {Number{-1, -1}, 1, 0, Number{0}}, + {Number{16}, 0, 5, Number{1}}, + {Number{34}, 3, 3, Number{34}}, + {Number{4}, 3, 2, Number{8}}}; + for (auto const& [x, n, d, z] : c) + BEAST_EXPECT((power(x, n, d) == z)); + bool caught = false; + try + { + (void)power(Number{7}, 0, 0); + } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); + caught = false; + try { - BEAST_EXPECT(root(x[i], y[i]) == z[i]); + (void)power(Number{7}, 1, 0); } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); + caught = false; + try + { + (void)power(Number{-1, -1}, 3, 2); + } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); } void @@ -129,102 +321,149 @@ class Number_test : public beast::unit_test::suite STAmount st = xrp; Number n = st; BEAST_EXPECT(XRPAmount{n} == xrp); + IOUAmount x0{0, 0}; + Number y0 = x0; + BEAST_EXPECT((y0 == Number{0})); + IOUAmount z0{y0}; + BEAST_EXPECT(x0 == z0); + XRPAmount xrp0{0}; + Number n0 = xrp0; + BEAST_EXPECT(n0 == Number{0}); + XRPAmount xrp1{n0}; + BEAST_EXPECT(xrp1 == xrp0); } void test_to_integer() { testcase("test_to_integer"); - Number x[]{ - Number{0}, - Number{1}, - Number{2}, - Number{3}, - Number{-1}, - Number{-2}, - Number{-3}, - Number{10}, - Number{99}, - Number{1155}, - Number{9'999'999'999'999'999, 0}, - Number{9'999'999'999'999'999, 1}, - Number{9'999'999'999'999'999, 2}, - Number{-9'999'999'999'999'999, 2}, - Number{15, -1}, - Number{14, -1}, - Number{16, -1}, - Number{25, -1}, - Number{6, -1}, - Number{5, -1}, - Number{4, -1}, - Number{-15, -1}, - Number{-14, -1}, - Number{-16, -1}, - Number{-25, -1}, - Number{-6, -1}, - Number{-5, -1}, - Number{-4, -1}}; - std::int64_t y[]{ - 0, - 1, - 2, - 3, - -1, - -2, - -3, - 10, - 99, - 1155, - 9'999'999'999'999'999, - 99'999'999'999'999'990, - 999'999'999'999'999'900, - -999'999'999'999'999'900, - 2, - 1, - 2, - 2, - 1, - 0, - 0, - -2, - -1, - -2, - -2, - -1, - 0, - 0}; - static_assert(std::size(x) == std::size(y)); - for (unsigned u = 0; u < std::size(x); ++u) - { - auto j = static_cast(x[u]); - BEAST_EXPECT(j == y[u]); - } - } - - void - test_clip() - { - testcase("test_clip"); + using Case = std::tuple; + Case c[]{ + {Number{0}, 0}, + {Number{1}, 1}, + {Number{2}, 2}, + {Number{3}, 3}, + {Number{-1}, -1}, + {Number{-2}, -2}, + {Number{-3}, -3}, + {Number{10}, 10}, + {Number{99}, 99}, + {Number{1155}, 1155}, + {Number{9'999'999'999'999'999, 0}, 9'999'999'999'999'999}, + {Number{9'999'999'999'999'999, 1}, 99'999'999'999'999'990}, + {Number{9'999'999'999'999'999, 2}, 999'999'999'999'999'900}, + {Number{-9'999'999'999'999'999, 2}, -999'999'999'999'999'900}, + {Number{15, -1}, 2}, + {Number{14, -1}, 1}, + {Number{16, -1}, 2}, + {Number{25, -1}, 2}, + {Number{6, -1}, 1}, + {Number{5, -1}, 0}, + {Number{4, -1}, 0}, + {Number{-15, -1}, -2}, + {Number{-14, -1}, -1}, + {Number{-16, -1}, -2}, + {Number{-25, -1}, -2}, + {Number{-6, -1}, -1}, + {Number{-5, -1}, 0}, + {Number{-4, -1}, 0}}; + for (auto const& [x, y] : c) + { + auto j = static_cast(x); + BEAST_EXPECT(j == y); + } + bool caught = false; + try + { + (void)static_cast(Number{9223372036854776, 3}); + } + catch (std::overflow_error const&) + { + caught = true; + } + BEAST_EXPECT(caught); + } + + void + test_squelch() + { + testcase("test_squelch"); Number limit{1, -6}; - BEAST_EXPECT((clip(Number{2, -6}, limit) == Number{2, -6})); - BEAST_EXPECT((clip(Number{1, -6}, limit) == Number{1, -6})); - BEAST_EXPECT((clip(Number{9, -7}, limit) == Number{0})); - BEAST_EXPECT((clip(Number{-2, -6}, limit) == Number{-2, -6})); - BEAST_EXPECT((clip(Number{-1, -6}, limit) == Number{-1, -6})); - BEAST_EXPECT((clip(Number{-9, -7}, limit) == Number{0})); + BEAST_EXPECT((squelch(Number{2, -6}, limit) == Number{2, -6})); + BEAST_EXPECT((squelch(Number{1, -6}, limit) == Number{1, -6})); + BEAST_EXPECT((squelch(Number{9, -7}, limit) == Number{0})); + BEAST_EXPECT((squelch(Number{-2, -6}, limit) == Number{-2, -6})); + BEAST_EXPECT((squelch(Number{-1, -6}, limit) == Number{-1, -6})); + BEAST_EXPECT((squelch(Number{-9, -7}, limit) == Number{0})); + } + + void + testToString() + { + testcase("testToString"); + BEAST_EXPECT(to_string(Number(-2, 0)) == "-2"); + BEAST_EXPECT(to_string(Number(0, 0)) == "0"); + BEAST_EXPECT(to_string(Number(2, 0)) == "2"); + BEAST_EXPECT(to_string(Number(25, -3)) == "0.025"); + BEAST_EXPECT(to_string(Number(-25, -3)) == "-0.025"); + BEAST_EXPECT(to_string(Number(25, 1)) == "250"); + BEAST_EXPECT(to_string(Number(-25, 1)) == "-250"); + BEAST_EXPECT(to_string(Number(2, 20)) == "2000000000000000e5"); + BEAST_EXPECT(to_string(Number(-2, -20)) == "-2000000000000000e-35"); + } + + void + test_relationals() + { + testcase("test_relationals"); + BEAST_EXPECT(!(Number{100} < Number{10})); + BEAST_EXPECT(Number{100} > Number{10}); + BEAST_EXPECT(Number{100} >= Number{10}); + BEAST_EXPECT(!(Number{100} <= Number{10})); + } + + void + test_stream() + { + testcase("test_stream"); + Number x{100}; + std::ostringstream os; + os << x; + BEAST_EXPECT(os.str() == to_string(x)); + } + + void + test_inc_dec() + { + testcase("test_inc_dec"); + Number x{100}; + Number y = +x; + BEAST_EXPECT(x == y); + BEAST_EXPECT(x++ == y); + BEAST_EXPECT(x == Number{101}); + BEAST_EXPECT(x-- == Number{101}); + BEAST_EXPECT(x == y); } void run() override { testZero(); + test_limits(); test_add(); test_sub(); + test_mul(); test_div(); test_root(); + test_power1(); + test_power2(); testConversions(); test_to_integer(); - test_clip(); + test_squelch(); + testToString(); + test_relationals(); + test_stream(); + test_inc_dec(); } }; From a82ad5ba769bcf70bff1e69f7bfb6045f48e0830 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Wed, 1 Jun 2022 15:51:00 -0400 Subject: [PATCH 088/508] Use Number for IOUAmount and STAmount arithmetic * Guarded by amendment fixUniversalNumber * Produces slightly better accuracy in some computations. --- src/ripple/app/misc/impl/TxQ.cpp | 2 + src/ripple/app/paths/impl/AmountSpec.h | 4 +- src/ripple/app/tx/impl/Transactor.cpp | 1 + src/ripple/app/tx/impl/apply.cpp | 1 + src/ripple/basics/IOUAmount.h | 166 ++++++++++++++------ src/ripple/basics/Number.h | 12 -- src/ripple/basics/impl/IOUAmount.cpp | 206 +++++++------------------ src/ripple/protocol/Feature.h | 6 +- src/ripple/protocol/STAmount.h | 6 + src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/STAmount.cpp | 29 ++++ src/test/app/Offer_test.cpp | 2 +- src/test/app/Taker_test.cpp | 5 +- src/test/app/TrustAndBalance_test.cpp | 4 +- 14 files changed, 228 insertions(+), 217 deletions(-) diff --git a/src/ripple/app/misc/impl/TxQ.cpp b/src/ripple/app/misc/impl/TxQ.cpp index 8424b1d29af..bf278970bb8 100644 --- a/src/ripple/app/misc/impl/TxQ.cpp +++ b/src/ripple/app/misc/impl/TxQ.cpp @@ -293,6 +293,7 @@ TxQ::MaybeTx::apply(Application& app, OpenView& view, beast::Journal j) // If the rules or flags change, preflight again assert(pfresult); STAmountSO stAmountSO{view.rules().enabled(fixSTAmountCanonicalize)}; + NumberSO stNumberSO{view.rules().enabled(fixUniversalNumber)}; if (pfresult->rules != view.rules() || pfresult->flags != flags) { @@ -717,6 +718,7 @@ TxQ::apply( beast::Journal j) { STAmountSO stAmountSO{view.rules().enabled(fixSTAmountCanonicalize)}; + NumberSO stNumberSO{view.rules().enabled(fixUniversalNumber)}; // See if the transaction paid a high enough fee that it can go straight // into the ledger. diff --git a/src/ripple/app/paths/impl/AmountSpec.h b/src/ripple/app/paths/impl/AmountSpec.h index 927c3d72fe5..ca814c7b3ac 100644 --- a/src/ripple/app/paths/impl/AmountSpec.h +++ b/src/ripple/app/paths/impl/AmountSpec.h @@ -36,7 +36,7 @@ struct AmountSpec union { XRPAmount xrp; - IOUAmount iou; + IOUAmount iou = {}; }; std::optional issuer; std::optional currency; @@ -64,7 +64,7 @@ struct EitherAmount union { - IOUAmount iou; + IOUAmount iou = {}; XRPAmount xrp; }; diff --git a/src/ripple/app/tx/impl/Transactor.cpp b/src/ripple/app/tx/impl/Transactor.cpp index 4c1a7e726cd..1eecccedb25 100644 --- a/src/ripple/app/tx/impl/Transactor.cpp +++ b/src/ripple/app/tx/impl/Transactor.cpp @@ -782,6 +782,7 @@ Transactor::operator()() JLOG(j_.trace()) << "apply: " << ctx_.tx.getTransactionID(); STAmountSO stAmountSO{view().rules().enabled(fixSTAmountCanonicalize)}; + NumberSO stNumberSO{view().rules().enabled(fixUniversalNumber)}; #ifdef DEBUG { diff --git a/src/ripple/app/tx/impl/apply.cpp b/src/ripple/app/tx/impl/apply.cpp index cc1e792c014..5144e05fea3 100644 --- a/src/ripple/app/tx/impl/apply.cpp +++ b/src/ripple/app/tx/impl/apply.cpp @@ -114,6 +114,7 @@ apply( beast::Journal j) { STAmountSO stAmountSO{view.rules().enabled(fixSTAmountCanonicalize)}; + NumberSO stNumberSO{view.rules().enabled(fixUniversalNumber)}; auto pfresult = preflight(app, view.rules(), tx, flags, j); auto pcresult = preclaim(pfresult, app, view); diff --git a/src/ripple/basics/IOUAmount.h b/src/ripple/basics/IOUAmount.h index 7e9e50d7ee9..c3ef1340a76 100644 --- a/src/ripple/basics/IOUAmount.h +++ b/src/ripple/basics/IOUAmount.h @@ -20,6 +20,8 @@ #ifndef RIPPLE_BASICS_IOUAMOUNT_H_INCLUDED #define RIPPLE_BASICS_IOUAMOUNT_H_INCLUDED +#include +#include #include #include #include @@ -56,84 +58,119 @@ class IOUAmount : private boost::totally_ordered, public: IOUAmount() = default; - IOUAmount(IOUAmount const& other) = default; - IOUAmount& - operator=(IOUAmount const& other) = default; - - IOUAmount(beast::Zero) - { - *this = beast::zero; - } + explicit IOUAmount(Number const& other); + IOUAmount(beast::Zero); + IOUAmount(std::int64_t mantissa, int exponent); - IOUAmount(std::int64_t mantissa, int exponent) - : mantissa_(mantissa), exponent_(exponent) - { - normalize(); - } + IOUAmount& operator=(beast::Zero); - IOUAmount& operator=(beast::Zero) - { - // The -100 is used to allow 0 to sort less than small positive values - // which will have a large negative exponent. - mantissa_ = 0; - exponent_ = -100; - return *this; - } + operator Number() const; IOUAmount& operator+=(IOUAmount const& other); IOUAmount& - operator-=(IOUAmount const& other) - { - *this += -other; - return *this; - } + operator-=(IOUAmount const& other); IOUAmount - operator-() const - { - return {-mantissa_, exponent_}; - } + operator-() const; bool - operator==(IOUAmount const& other) const - { - return exponent_ == other.exponent_ && mantissa_ == other.mantissa_; - } + operator==(IOUAmount const& other) const; bool operator<(IOUAmount const& other) const; /** Returns true if the amount is not zero */ - explicit operator bool() const noexcept - { - return mantissa_ != 0; - } + explicit operator bool() const noexcept; /** Return the sign of the amount */ int - signum() const noexcept - { - return (mantissa_ < 0) ? -1 : (mantissa_ ? 1 : 0); - } + signum() const noexcept; int - exponent() const noexcept - { - return exponent_; - } + exponent() const noexcept; std::int64_t - mantissa() const noexcept - { - return mantissa_; - } + mantissa() const noexcept; static IOUAmount minPositiveAmount(); }; +inline IOUAmount::IOUAmount(beast::Zero) +{ + *this = beast::zero; +} + +inline IOUAmount::IOUAmount(std::int64_t mantissa, int exponent) + : mantissa_(mantissa), exponent_(exponent) +{ + normalize(); +} + +inline IOUAmount& IOUAmount::operator=(beast::Zero) +{ + // The -100 is used to allow 0 to sort less than small positive values + // which will have a large negative exponent. + mantissa_ = 0; + exponent_ = -100; + return *this; +} + +inline IOUAmount::operator Number() const +{ + return Number{mantissa_, exponent_}; +} + +inline IOUAmount& +IOUAmount::operator-=(IOUAmount const& other) +{ + *this += -other; + return *this; +} + +inline IOUAmount +IOUAmount::operator-() const +{ + return {-mantissa_, exponent_}; +} + +inline bool +IOUAmount::operator==(IOUAmount const& other) const +{ + return exponent_ == other.exponent_ && mantissa_ == other.mantissa_; +} + +inline bool +IOUAmount::operator<(IOUAmount const& other) const +{ + return Number{*this} < Number{other}; +} + +inline IOUAmount::operator bool() const noexcept +{ + return mantissa_ != 0; +} + +inline int +IOUAmount::signum() const noexcept +{ + return (mantissa_ < 0) ? -1 : (mantissa_ ? 1 : 0); +} + +inline int +IOUAmount::exponent() const noexcept +{ + return exponent_; +} + +inline std::int64_t +IOUAmount::mantissa() const noexcept +{ + return mantissa_; +} + std::string to_string(IOUAmount const& amount); @@ -149,6 +186,35 @@ mulRatio( std::uint32_t den, bool roundUp); +// Since IOUAmount and STAmount do not have access to a ledger, this +// is needed to put low-level routines on an amendment switch. Only +// transactions need to use this switchover. Outside of a transaction +// it's safe to unconditionally use the new behavior. +extern LocalValue stNumberSwitchover; + +/** RAII class to set and restore the Number switchover. + */ + +class NumberSO +{ + bool saved_; + +public: + ~NumberSO() + { + *stNumberSwitchover = saved_; + } + + NumberSO(NumberSO const&) = delete; + NumberSO& + operator=(NumberSO const&) = delete; + + explicit NumberSO(bool v) : saved_(*stNumberSwitchover) + { + *stNumberSwitchover = v; + } +}; + } // namespace ripple #endif diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index 92b99bf0145..ead0e432186 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -20,7 +20,6 @@ #ifndef RIPPLE_BASICS_NUMBER_H_INCLUDED #define RIPPLE_BASICS_NUMBER_H_INCLUDED -#include #include #include #include @@ -51,7 +50,6 @@ class Number explicit Number(rep mantissa, int exponent); explicit constexpr Number(rep mantissa, int exponent, unchecked) noexcept; - Number(IOUAmount const& x); Number(XRPAmount const& x); constexpr rep @@ -82,7 +80,6 @@ class Number Number& operator/=(Number const& x); - explicit operator IOUAmount() const; explicit operator XRPAmount() const; // round to nearest, even on tie explicit operator rep() const; // round to nearest, even on tie @@ -184,10 +181,6 @@ inline Number::Number(rep mantissa) : Number{mantissa, 0} { } -inline Number::Number(IOUAmount const& x) : Number{x.mantissa(), x.exponent()} -{ -} - inline Number::Number(XRPAmount const& x) : Number{x.drops()} { } @@ -286,11 +279,6 @@ operator/(Number const& x, Number const& y) return z; } -inline Number::operator IOUAmount() const -{ - return IOUAmount{mantissa(), exponent()}; -} - inline constexpr bool Number::isnormal() const noexcept { diff --git a/src/ripple/basics/impl/IOUAmount.cpp b/src/ripple/basics/impl/IOUAmount.cpp index 27254662347..1fa5e4fd22d 100644 --- a/src/ripple/basics/impl/IOUAmount.cpp +++ b/src/ripple/basics/impl/IOUAmount.cpp @@ -27,12 +27,14 @@ namespace ripple { +LocalValue stNumberSwitchover(true); + /* The range for the mantissa when normalized */ -static std::int64_t const minMantissa = 1000000000000000ull; -static std::int64_t const maxMantissa = 9999999999999999ull; +static std::int64_t constexpr minMantissa = 1000000000000000ull; +static std::int64_t constexpr maxMantissa = 9999999999999999ull; /* The range for the exponent when normalized */ -static int const minExponent = -96; -static int const maxExponent = 80; +static int constexpr minExponent = -96; +static int constexpr maxExponent = 80; IOUAmount IOUAmount::minPositiveAmount() @@ -43,6 +45,17 @@ IOUAmount::minPositiveAmount() void IOUAmount::normalize() { + if (*stNumberSwitchover) + { + Number v{mantissa_, exponent_}; + mantissa_ = v.mantissa(); + exponent_ = v.exponent(); + if (exponent_ > maxExponent) + Throw("value overflow"); + if (exponent_ < minExponent) + *this = beast::zero; + return; + } if (mantissa_ == 0) { *this = beast::zero; @@ -82,166 +95,67 @@ IOUAmount::normalize() mantissa_ = -mantissa_; } +IOUAmount::IOUAmount(Number const& other) + : mantissa_(other.mantissa()), exponent_(other.exponent()) +{ + if (exponent_ > maxExponent) + Throw("value overflow"); + if (exponent_ < minExponent) + *this = beast::zero; +} + IOUAmount& IOUAmount::operator+=(IOUAmount const& other) { - if (other == beast::zero) - return *this; - - if (*this == beast::zero) + if (*stNumberSwitchover) { - *this = other; - return *this; + *this = IOUAmount{Number{*this} + Number{other}}; } - - auto m = other.mantissa_; - auto e = other.exponent_; - - while (exponent_ < e) - { - mantissa_ /= 10; - ++exponent_; - } - - while (e < exponent_) + else { - m /= 10; - ++e; - } - - // This addition cannot overflow an std::int64_t but we may throw from - // normalize if the result isn't representable. - mantissa_ += m; + if (other == beast::zero) + return *this; - if (mantissa_ >= -10 && mantissa_ <= 10) - { - *this = beast::zero; - return *this; - } - - normalize(); - - return *this; -} + if (*this == beast::zero) + { + *this = other; + return *this; + } -bool -IOUAmount::operator<(IOUAmount const& other) const -{ - // If the two amounts have different signs (zero is treated as positive) - // then the comparison is true iff the left is negative. - bool const lneg = mantissa_ < 0; - bool const rneg = other.mantissa_ < 0; + auto m = other.mantissa_; + auto e = other.exponent_; - if (lneg != rneg) - return lneg; + while (exponent_ < e) + { + mantissa_ /= 10; + ++exponent_; + } - // Both have same sign and the left is zero: the right must be - // greater than 0. - if (mantissa_ == 0) - return other.mantissa_ > 0; + while (e < exponent_) + { + m /= 10; + ++e; + } - // Both have same sign, the right is zero and the left is non-zero. - if (other.mantissa_ == 0) - return false; + // This addition cannot overflow an std::int64_t but we may throw from + // normalize if the result isn't representable. + mantissa_ += m; - // Both have the same sign, compare by exponents: - if (exponent_ > other.exponent_) - return lneg; - if (exponent_ < other.exponent_) - return !lneg; + if (mantissa_ >= -10 && mantissa_ <= 10) + { + *this = beast::zero; + return *this; + } - // If equal exponents, compare mantissas - return mantissa_ < other.mantissa_; + normalize(); + } + return *this; } std::string to_string(IOUAmount const& amount) { - // keep full internal accuracy, but make more human friendly if possible - if (amount == beast::zero) - return "0"; - - int const exponent = amount.exponent(); - auto mantissa = amount.mantissa(); - - // Use scientific notation for exponents that are too small or too large - if (((exponent != 0) && ((exponent < -25) || (exponent > -5)))) - { - std::string ret = std::to_string(mantissa); - ret.append(1, 'e'); - ret.append(std::to_string(exponent)); - return ret; - } - - bool negative = false; - - if (mantissa < 0) - { - mantissa = -mantissa; - negative = true; - } - - assert(exponent + 43 > 0); - - size_t const pad_prefix = 27; - size_t const pad_suffix = 23; - - std::string const raw_value(std::to_string(mantissa)); - std::string val; - - val.reserve(raw_value.length() + pad_prefix + pad_suffix); - val.append(pad_prefix, '0'); - val.append(raw_value); - val.append(pad_suffix, '0'); - - size_t const offset(exponent + 43); - - auto pre_from(val.begin()); - auto const pre_to(val.begin() + offset); - - auto const post_from(val.begin() + offset); - auto post_to(val.end()); - - // Crop leading zeroes. Take advantage of the fact that there's always a - // fixed amount of leading zeroes and skip them. - if (std::distance(pre_from, pre_to) > pad_prefix) - pre_from += pad_prefix; - - assert(post_to >= post_from); - - pre_from = std::find_if(pre_from, pre_to, [](char c) { return c != '0'; }); - - // Crop trailing zeroes. Take advantage of the fact that there's always a - // fixed amount of trailing zeroes and skip them. - if (std::distance(post_from, post_to) > pad_suffix) - post_to -= pad_suffix; - - assert(post_to >= post_from); - - post_to = std::find_if( - std::make_reverse_iterator(post_to), - std::make_reverse_iterator(post_from), - [](char c) { return c != '0'; }) - .base(); - - std::string ret; - - if (negative) - ret.append(1, '-'); - - // Assemble the output: - if (pre_from == pre_to) - ret.append(1, '0'); - else - ret.append(pre_from, pre_to); - - if (post_to != post_from) - { - ret.append(1, '.'); - ret.append(post_from, post_to); - } - - return ret; + return to_string(Number{amount}); } IOUAmount diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index b3e1dba78bd..6be2d4dfb68 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 55; +static constexpr std::size_t numFeatures = 56; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -340,8 +340,12 @@ extern uint256 const featureNonFungibleTokensV1_1; extern uint256 const fixTrustLinesToSelf; extern uint256 const fixRemoveNFTokenAutoTrustLine; extern uint256 const featureImmediateOfferKilled; +<<<<<<< HEAD extern uint256 const featureDisallowIncoming; extern uint256 const featureXRPFees; +======= +extern uint256 const fixUniversalNumber; +>>>>>>> Use Number for IOUAmount and STAmount arithmetic } // namespace ripple diff --git a/src/ripple/protocol/STAmount.h b/src/ripple/protocol/STAmount.h index b6e0e3046ff..0b9ca953f1a 100644 --- a/src/ripple/protocol/STAmount.h +++ b/src/ripple/protocol/STAmount.h @@ -277,7 +277,13 @@ class STAmount final : public STBase, public CountedObject STBase* move(std::size_t n, void* buf) override; + STAmount& + operator=(IOUAmount const& iou); + friend class detail::STVar; + + friend STAmount + operator+(STAmount const& v1, STAmount const& v2); }; //------------------------------------------------------------------------------ diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 2e141c11fd1..d650950da4b 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -452,6 +452,7 @@ REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, DefaultVote::yes REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no); REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no); REGISTER_FEATURE(XRPFees, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixUniversalNumber, Supported::yes, DefaultVote::yes); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/ripple/protocol/impl/STAmount.cpp b/src/ripple/protocol/impl/STAmount.cpp index d764eb00d33..51e8adb568d 100644 --- a/src/ripple/protocol/impl/STAmount.cpp +++ b/src/ripple/protocol/impl/STAmount.cpp @@ -339,6 +339,19 @@ STAmount::iou() const return {mantissa, exponent}; } +STAmount& +STAmount::operator=(IOUAmount const& iou) +{ + assert(mIsNative == false); + mOffset = iou.exponent(); + mIsNegative = iou < beast::zero; + if (mIsNegative) + mValue = static_cast(-iou.mantissa()); + else + mValue = static_cast(iou.mantissa()); + return *this; +} + //------------------------------------------------------------------------------ // // Operators @@ -382,6 +395,13 @@ operator+(STAmount const& v1, STAmount const& v2) if (v1.native()) return {v1.getFName(), getSNValue(v1) + getSNValue(v2)}; + if (*stNumberSwitchover) + { + auto x = v1; + x = v1.iou() + v2.iou(); + return x; + } + int ov1 = v1.exponent(), ov2 = v2.exponent(); std::int64_t vv1 = static_cast(v1.mantissa()); std::int64_t vv2 = static_cast(v2.mantissa()); @@ -733,6 +753,12 @@ STAmount::canonicalize() mIsNative = false; + if (*stNumberSwitchover) + { + *this = iou(); + return; + } + if (mValue == 0) { mOffset = -100; @@ -1170,6 +1196,9 @@ multiply(STAmount const& v1, STAmount const& v2, Issue const& issue) return STAmount(v1.getFName(), minV * maxV); } + if (*stNumberSwitchover) + return {IOUAmount{Number{v1} * Number{v2}}, issue}; + std::uint64_t value1 = v1.mantissa(); std::uint64_t value2 = v2.mantissa(); int offset1 = v1.exponent(); diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 9f6e165bc16..fc9a38cd28e 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -2122,7 +2122,7 @@ class Offer_test : public beast::unit_test::suite jrr = ledgerEntryState(env, bob, gw, "USD"); BEAST_EXPECT( jrr[jss::node][sfBalance.fieldName][jss::value] == - "-0.966500000033334"); + "-0.9665000000333333"); } void diff --git a/src/test/app/Taker_test.cpp b/src/test/app/Taker_test.cpp index ff0152408ff..c7474b6798e 100644 --- a/src/test/app/Taker_test.cpp +++ b/src/test/app/Taker_test.cpp @@ -905,6 +905,7 @@ class Taker_test : public beast::unit_test::suite { testcase("IOU to IOU"); + NumberSO stNumberSO{true}; Quality q1 = get_quality("1", "1"); // Highly exaggerated 50% transfer rate for the input and output: @@ -937,7 +938,7 @@ class Taker_test : public beast::unit_test::suite q1, {"4", "4"}, "4", - {"2.666666666666666", "2.666666666666666"}, + {"2.666666666666667", "2.666666666666667"}, eur(), usd(), rate, @@ -993,7 +994,7 @@ class Taker_test : public beast::unit_test::suite q1, {"2", "2"}, "10", - {"1.666666666666666", "1.666666666666666"}, + {"1.666666666666667", "1.666666666666667"}, eur(), usd(), rate, diff --git a/src/test/app/TrustAndBalance_test.cpp b/src/test/app/TrustAndBalance_test.cpp index e67f3ba327f..0a94138ad71 100644 --- a/src/test/app/TrustAndBalance_test.cpp +++ b/src/test/app/TrustAndBalance_test.cpp @@ -411,13 +411,11 @@ class TrustAndBalance_test : public beast::unit_test::suite if (with_rate) { - // 65.00000000000001 is correct. - // This is result of limited precision. env.require(balance( alice, STAmount( carol["USD"].issue(), - 6500000000000001ull, + 6500000000000000ull, -14, false, true, From 3f3347122019a257fa4881c161f2f7f4e73e651b Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Fri, 12 Aug 2022 10:33:43 -0400 Subject: [PATCH 089/508] Introduce rounding modes for Number: You can set a thread-local flag to direct Number how to round non-exact results with the syntax: Number::rounding_mode prev_mode = Number::setround(Number::towards_zero); This flag will stay in effect for this thread only until another call to setround. The previously set rounding mode is returned. You can also retrieve the current rounding mode with: Number::rounding_mode current_mode = Number::getround(); The available rounding modes are: * to_nearest : Rounds to nearest representable value. On tie, rounds to even. * towards_zero : Rounds towards zero. * downward : Rounds towards negative infinity. * upward : Rounds towards positive infinity. The default rounding mode is to_nearest. --- src/ripple/basics/Number.h | 13 ++ src/ripple/basics/impl/Number.cpp | 53 ++++++-- src/test/basics/Number_test.cpp | 199 +++++++++++++++++++++++++----- 3 files changed, 226 insertions(+), 39 deletions(-) diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index ead0e432186..cc009fa2ed4 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -149,7 +149,17 @@ class Number return os << to_string(x); } + // Thread local rounding control. Default is to_nearest + enum rounding_mode { to_nearest, towards_zero, downward, upward }; + static rounding_mode + getround(); + // Returns previously set mode + static rounding_mode + setround(rounding_mode mode); + private: + static thread_local rounding_mode mode_; + void normalize(); constexpr bool @@ -308,6 +318,9 @@ power(Number const& f, unsigned n); Number root(Number f, unsigned d); +Number +root2(Number f); + // Returns f^(n/d) Number diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index 10834f08e3b..0690be0773f 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #ifdef _MSVC_LANG #include @@ -33,6 +34,20 @@ using uint128_t = __uint128_t; namespace ripple { +thread_local Number::rounding_mode Number::mode_ = Number::to_nearest; + +Number::rounding_mode +Number::getround() +{ + return mode_; +} + +Number::rounding_mode +Number::setround(rounding_mode mode) +{ + return std::exchange(mode_, mode); +} + // Guard // The Guard class is used to tempoarily add extra digits of @@ -107,16 +122,40 @@ Number::Guard::pop() noexcept return d; } +// Returns: +// -1 if Guard is less than half +// 0 if Guard is exactly half +// 1 if Guard is greater than half int Number::Guard::round() noexcept { - if (digits_ > 0x5000'0000'0000'0000) - return 1; - if (digits_ < 0x5000'0000'0000'0000) - return -1; - if (xbit_) - return 1; - return 0; + auto mode = Number::getround(); + switch (mode) + { + case to_nearest: + if (digits_ > 0x5000'0000'0000'0000) + return 1; + if (digits_ < 0x5000'0000'0000'0000) + return -1; + if (xbit_) + return 1; + return 0; + case towards_zero: + return -1; + case downward: + if (sbit_) + { + if (digits_ > 0 || xbit_) + return 1; + } + return -1; + case upward: + if (sbit_) + return -1; + if (digits_ > 0 || xbit_) + return 1; + return -1; + } } // Number diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index b5425a7bc06..d7bd8264878 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -26,6 +26,24 @@ namespace ripple { +class saveNumberRoundMode +{ + Number::rounding_mode mode_; + +public: + ~saveNumberRoundMode() + { + Number::setround(mode_); + } + explicit saveNumberRoundMode(Number::rounding_mode mode) noexcept + : mode_{mode} + { + } + saveNumberRoundMode(saveNumberRoundMode const&) = delete; + saveNumberRoundMode& + operator=(saveNumberRoundMode const&) = delete; +}; + class Number_test : public beast::unit_test::suite { public: @@ -338,39 +356,156 @@ class Number_test : public beast::unit_test::suite { testcase("test_to_integer"); using Case = std::tuple; - Case c[]{ - {Number{0}, 0}, - {Number{1}, 1}, - {Number{2}, 2}, - {Number{3}, 3}, - {Number{-1}, -1}, - {Number{-2}, -2}, - {Number{-3}, -3}, - {Number{10}, 10}, - {Number{99}, 99}, - {Number{1155}, 1155}, - {Number{9'999'999'999'999'999, 0}, 9'999'999'999'999'999}, - {Number{9'999'999'999'999'999, 1}, 99'999'999'999'999'990}, - {Number{9'999'999'999'999'999, 2}, 999'999'999'999'999'900}, - {Number{-9'999'999'999'999'999, 2}, -999'999'999'999'999'900}, - {Number{15, -1}, 2}, - {Number{14, -1}, 1}, - {Number{16, -1}, 2}, - {Number{25, -1}, 2}, - {Number{6, -1}, 1}, - {Number{5, -1}, 0}, - {Number{4, -1}, 0}, - {Number{-15, -1}, -2}, - {Number{-14, -1}, -1}, - {Number{-16, -1}, -2}, - {Number{-25, -1}, -2}, - {Number{-6, -1}, -1}, - {Number{-5, -1}, 0}, - {Number{-4, -1}, 0}}; - for (auto const& [x, y] : c) + saveNumberRoundMode save{Number::setround(Number::to_nearest)}; + { + Case c[]{ + {Number{0}, 0}, + {Number{1}, 1}, + {Number{2}, 2}, + {Number{3}, 3}, + {Number{-1}, -1}, + {Number{-2}, -2}, + {Number{-3}, -3}, + {Number{10}, 10}, + {Number{99}, 99}, + {Number{1155}, 1155}, + {Number{9'999'999'999'999'999, 0}, 9'999'999'999'999'999}, + {Number{9'999'999'999'999'999, 1}, 99'999'999'999'999'990}, + {Number{9'999'999'999'999'999, 2}, 999'999'999'999'999'900}, + {Number{-9'999'999'999'999'999, 2}, -999'999'999'999'999'900}, + {Number{15, -1}, 2}, + {Number{14, -1}, 1}, + {Number{16, -1}, 2}, + {Number{25, -1}, 2}, + {Number{6, -1}, 1}, + {Number{5, -1}, 0}, + {Number{4, -1}, 0}, + {Number{-15, -1}, -2}, + {Number{-14, -1}, -1}, + {Number{-16, -1}, -2}, + {Number{-25, -1}, -2}, + {Number{-6, -1}, -1}, + {Number{-5, -1}, 0}, + {Number{-4, -1}, 0}}; + for (auto const& [x, y] : c) + { + auto j = static_cast(x); + BEAST_EXPECT(j == y); + } + } + auto prev_mode = Number::setround(Number::towards_zero); + BEAST_EXPECT(prev_mode == Number::to_nearest); + { + Case c[]{ + {Number{0}, 0}, + {Number{1}, 1}, + {Number{2}, 2}, + {Number{3}, 3}, + {Number{-1}, -1}, + {Number{-2}, -2}, + {Number{-3}, -3}, + {Number{10}, 10}, + {Number{99}, 99}, + {Number{1155}, 1155}, + {Number{9'999'999'999'999'999, 0}, 9'999'999'999'999'999}, + {Number{9'999'999'999'999'999, 1}, 99'999'999'999'999'990}, + {Number{9'999'999'999'999'999, 2}, 999'999'999'999'999'900}, + {Number{-9'999'999'999'999'999, 2}, -999'999'999'999'999'900}, + {Number{15, -1}, 1}, + {Number{14, -1}, 1}, + {Number{16, -1}, 1}, + {Number{25, -1}, 2}, + {Number{6, -1}, 0}, + {Number{5, -1}, 0}, + {Number{4, -1}, 0}, + {Number{-15, -1}, -1}, + {Number{-14, -1}, -1}, + {Number{-16, -1}, -1}, + {Number{-25, -1}, -2}, + {Number{-6, -1}, 0}, + {Number{-5, -1}, 0}, + {Number{-4, -1}, 0}}; + for (auto const& [x, y] : c) + { + auto j = static_cast(x); + BEAST_EXPECT(j == y); + } + } + prev_mode = Number::setround(Number::downward); + BEAST_EXPECT(prev_mode == Number::towards_zero); + { + Case c[]{ + {Number{0}, 0}, + {Number{1}, 1}, + {Number{2}, 2}, + {Number{3}, 3}, + {Number{-1}, -1}, + {Number{-2}, -2}, + {Number{-3}, -3}, + {Number{10}, 10}, + {Number{99}, 99}, + {Number{1155}, 1155}, + {Number{9'999'999'999'999'999, 0}, 9'999'999'999'999'999}, + {Number{9'999'999'999'999'999, 1}, 99'999'999'999'999'990}, + {Number{9'999'999'999'999'999, 2}, 999'999'999'999'999'900}, + {Number{-9'999'999'999'999'999, 2}, -999'999'999'999'999'900}, + {Number{15, -1}, 1}, + {Number{14, -1}, 1}, + {Number{16, -1}, 1}, + {Number{25, -1}, 2}, + {Number{6, -1}, 0}, + {Number{5, -1}, 0}, + {Number{4, -1}, 0}, + {Number{-15, -1}, -2}, + {Number{-14, -1}, -2}, + {Number{-16, -1}, -2}, + {Number{-25, -1}, -3}, + {Number{-6, -1}, -1}, + {Number{-5, -1}, -1}, + {Number{-4, -1}, -1}}; + for (auto const& [x, y] : c) + { + auto j = static_cast(x); + BEAST_EXPECT(j == y); + } + } + prev_mode = Number::setround(Number::upward); + BEAST_EXPECT(prev_mode == Number::downward); { - auto j = static_cast(x); - BEAST_EXPECT(j == y); + Case c[]{ + {Number{0}, 0}, + {Number{1}, 1}, + {Number{2}, 2}, + {Number{3}, 3}, + {Number{-1}, -1}, + {Number{-2}, -2}, + {Number{-3}, -3}, + {Number{10}, 10}, + {Number{99}, 99}, + {Number{1155}, 1155}, + {Number{9'999'999'999'999'999, 0}, 9'999'999'999'999'999}, + {Number{9'999'999'999'999'999, 1}, 99'999'999'999'999'990}, + {Number{9'999'999'999'999'999, 2}, 999'999'999'999'999'900}, + {Number{-9'999'999'999'999'999, 2}, -999'999'999'999'999'900}, + {Number{15, -1}, 2}, + {Number{14, -1}, 2}, + {Number{16, -1}, 2}, + {Number{25, -1}, 3}, + {Number{6, -1}, 1}, + {Number{5, -1}, 1}, + {Number{4, -1}, 1}, + {Number{-15, -1}, -1}, + {Number{-14, -1}, -1}, + {Number{-16, -1}, -1}, + {Number{-25, -1}, -2}, + {Number{-6, -1}, 0}, + {Number{-5, -1}, 0}, + {Number{-4, -1}, 0}}; + for (auto const& [x, y] : c) + { + auto j = static_cast(x); + BEAST_EXPECT(j == y); + } } bool caught = false; try From d275a2ab725879230618c687481caefac40efa93 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Thu, 15 Sep 2022 08:17:48 -0400 Subject: [PATCH 090/508] Silence warnings --- src/ripple/basics/Number.h | 1 + src/ripple/basics/impl/Number.cpp | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index cc009fa2ed4..ef45b3f7795 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -22,6 +22,7 @@ #include #include +#include #include #include diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index 0690be0773f..c4b6daffafb 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -132,6 +132,8 @@ Number::Guard::round() noexcept auto mode = Number::getround(); switch (mode) { + // round to nearest if mode is not one of the predefined values + default: case to_nearest: if (digits_ > 0x5000'0000'0000'0000) return 1; @@ -506,8 +508,8 @@ to_string(Number const& amount) assert(exponent + 43 > 0); - size_t const pad_prefix = 27; - size_t const pad_suffix = 23; + ptrdiff_t const pad_prefix = 27; + ptrdiff_t const pad_suffix = 23; std::string const raw_value(std::to_string(mantissa)); std::string val; @@ -517,7 +519,7 @@ to_string(Number const& amount) val.append(raw_value); val.append(pad_suffix, '0'); - size_t const offset(exponent + 43); + ptrdiff_t const offset(exponent + 43); auto pre_from(val.begin()); auto const pre_to(val.begin() + offset); From 6fcd654bee91827d287d7beaf6ba437b1a4e9fd4 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Thu, 15 Sep 2022 11:31:15 -0400 Subject: [PATCH 091/508] Remove undefined behavior * Taking the negative of a signed negative is UB, but taking the negative of an unsigned is not. --- src/ripple/basics/impl/IOUAmount.cpp | 75 +++++---- src/ripple/basics/impl/Number.cpp | 55 ++++--- src/test/app/Offer_test.cpp | 62 +++++--- src/test/app/Taker_test.cpp | 227 ++++++++++++++++----------- src/test/jtx/Env.h | 3 + src/test/jtx/impl/Env.cpp | 8 + 6 files changed, 251 insertions(+), 179 deletions(-) diff --git a/src/ripple/basics/impl/IOUAmount.cpp b/src/ripple/basics/impl/IOUAmount.cpp index 1fa5e4fd22d..76f4bbe9fcd 100644 --- a/src/ripple/basics/impl/IOUAmount.cpp +++ b/src/ripple/basics/impl/IOUAmount.cpp @@ -45,6 +45,12 @@ IOUAmount::minPositiveAmount() void IOUAmount::normalize() { + if (mantissa_ == 0) + { + *this = beast::zero; + return; + } + if (*stNumberSwitchover) { Number v{mantissa_, exponent_}; @@ -56,11 +62,6 @@ IOUAmount::normalize() *this = beast::zero; return; } - if (mantissa_ == 0) - { - *this = beast::zero; - return; - } bool const negative = (mantissa_ < 0); @@ -107,48 +108,46 @@ IOUAmount::IOUAmount(Number const& other) IOUAmount& IOUAmount::operator+=(IOUAmount const& other) { + if (other == beast::zero) + return *this; + + if (*this == beast::zero) + { + *this = other; + return *this; + } + if (*stNumberSwitchover) { *this = IOUAmount{Number{*this} + Number{other}}; + return *this; } - else - { - if (other == beast::zero) - return *this; - - if (*this == beast::zero) - { - *this = other; - return *this; - } + auto m = other.mantissa_; + auto e = other.exponent_; - auto m = other.mantissa_; - auto e = other.exponent_; - - while (exponent_ < e) - { - mantissa_ /= 10; - ++exponent_; - } - - while (e < exponent_) - { - m /= 10; - ++e; - } + while (exponent_ < e) + { + mantissa_ /= 10; + ++exponent_; + } - // This addition cannot overflow an std::int64_t but we may throw from - // normalize if the result isn't representable. - mantissa_ += m; + while (e < exponent_) + { + m /= 10; + ++e; + } - if (mantissa_ >= -10 && mantissa_ <= 10) - { - *this = beast::zero; - return *this; - } + // This addition cannot overflow an std::int64_t but we may throw from + // normalize if the result isn't representable. + mantissa_ += m; - normalize(); + if (mantissa_ >= -10 && mantissa_ <= 10) + { + *this = beast::zero; + return *this; } + + normalize(); return *this; } diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index c4b6daffafb..a7a8159fed3 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -25,7 +25,7 @@ #include #include -#ifdef _MSVC_LANG +#ifdef BOOST_COMP_MSVC #include using uint128_t = boost::multiprecision::uint128_t; #else // !defined(_MSVC_LANG) @@ -130,34 +130,37 @@ int Number::Guard::round() noexcept { auto mode = Number::getround(); - switch (mode) + + if (mode == towards_zero) + return -1; + + if (mode == downward) { - // round to nearest if mode is not one of the predefined values - default: - case to_nearest: - if (digits_ > 0x5000'0000'0000'0000) - return 1; - if (digits_ < 0x5000'0000'0000'0000) - return -1; - if (xbit_) - return 1; - return 0; - case towards_zero: - return -1; - case downward: - if (sbit_) - { - if (digits_ > 0 || xbit_) - return 1; - } - return -1; - case upward: - if (sbit_) - return -1; + if (sbit_) + { if (digits_ > 0 || xbit_) return 1; + } + return -1; + } + + if (mode == upward) + { + if (sbit_) return -1; + if (digits_ > 0 || xbit_) + return 1; + return -1; } + + // assume round to nearest if mode is not one of the predefined values + if (digits_ > 0x5000'0000'0000'0000) + return 1; + if (digits_ < 0x5000'0000'0000'0000) + return -1; + if (xbit_) + return 1; + return 0; } // Number @@ -173,9 +176,9 @@ Number::normalize() return; } bool const negative = (mantissa_ < 0); - if (negative) - mantissa_ = -mantissa_; auto m = static_cast>(mantissa_); + if (negative) + m = -m; while ((m < minMantissa) && (exponent_ > minExponent)) { m *= 10; diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index fc9a38cd28e..1162612b733 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -2092,37 +2092,53 @@ class Offer_test : public beast::unit_test::suite using namespace jtx; - Env env{*this, features}; + for (auto NumberSwitchOver : {false, true}) + { + Env env{*this, features}; + if (NumberSwitchOver) + env.enableFeature(fixUniversalNumber); + else + env.disableFeature(fixUniversalNumber); - auto const gw = Account{"gateway"}; - auto const alice = Account{"alice"}; - auto const bob = Account{"bob"}; - auto const USD = gw["USD"]; + auto const gw = Account{"gateway"}; + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + auto const USD = gw["USD"]; - env.fund(XRP(10000), gw, alice, bob); + env.fund(XRP(10000), gw, alice, bob); - env(rate(gw, 1.005)); + env(rate(gw, 1.005)); - env(trust(alice, USD(1000))); - env(trust(bob, USD(1000))); - env(trust(gw, alice["USD"](50))); + env(trust(alice, USD(1000))); + env(trust(bob, USD(1000))); + env(trust(gw, alice["USD"](50))); - env(pay(gw, bob, bob["USD"](1))); - env(pay(alice, gw, USD(50))); + env(pay(gw, bob, bob["USD"](1))); + env(pay(alice, gw, USD(50))); - env(trust(gw, alice["USD"](0))); + env(trust(gw, alice["USD"](0))); - env(offer(alice, USD(50), XRP(150000))); - env(offer(bob, XRP(100), USD(0.1))); + env(offer(alice, USD(50), XRP(150000))); + env(offer(bob, XRP(100), USD(0.1))); - auto jrr = ledgerEntryState(env, alice, gw, "USD"); - BEAST_EXPECT( - jrr[jss::node][sfBalance.fieldName][jss::value] == - "49.96666666666667"); - jrr = ledgerEntryState(env, bob, gw, "USD"); - BEAST_EXPECT( - jrr[jss::node][sfBalance.fieldName][jss::value] == - "-0.9665000000333333"); + auto jrr = ledgerEntryState(env, alice, gw, "USD"); + BEAST_EXPECT( + jrr[jss::node][sfBalance.fieldName][jss::value] == + "49.96666666666667"); + jrr = ledgerEntryState(env, bob, gw, "USD"); + if (NumberSwitchOver) + { + BEAST_EXPECT( + jrr[jss::node][sfBalance.fieldName][jss::value] == + "-0.9665000000333333"); + } + else + { + BEAST_EXPECT( + jrr[jss::node][sfBalance.fieldName][jss::value] == + "-0.966500000033334"); + } + } } void diff --git a/src/test/app/Taker_test.cpp b/src/test/app/Taker_test.cpp index c7474b6798e..38735d8cfce 100644 --- a/src/test/app/Taker_test.cpp +++ b/src/test/app/Taker_test.cpp @@ -905,100 +905,143 @@ class Taker_test : public beast::unit_test::suite { testcase("IOU to IOU"); - NumberSO stNumberSO{true}; - Quality q1 = get_quality("1", "1"); + for (auto NumberSwitchOver : {false, true}) + { + NumberSO stNumberSO{NumberSwitchOver}; + Quality q1 = get_quality("1", "1"); - // Highly exaggerated 50% transfer rate for the input and output: - Rate const rate{parityRate.value + (parityRate.value / 2)}; + // Highly exaggerated 50% transfer rate for the input and output: + Rate const rate{parityRate.value + (parityRate.value / 2)}; - // TAKER OWNER - // QUAL OFFER FUNDS QUAL OFFER FUNDS - // EXPECTED - // EUR USD - attempt( - Sell, - "N:N", - q1, - {"2", "2"}, - "10", - q1, - {"2", "2"}, - "10", - {"2", "2"}, - eur(), - usd(), - rate, - rate); - attempt( - Sell, - "N:B", - q1, - {"4", "4"}, - "10", - q1, - {"4", "4"}, - "4", - {"2.666666666666667", "2.666666666666667"}, - eur(), - usd(), - rate, - rate); - attempt( - Buy, - "N:T", - q1, - {"1", "1"}, - "10", - q1, - {"2", "2"}, - "10", - {"1", "1"}, - eur(), - usd(), - rate, - rate); - attempt( - Buy, - "N:BT", - q1, - {"2", "2"}, - "10", - q1, - {"6", "6"}, - "5", - {"2", "2"}, - eur(), - usd(), - rate, - rate); - attempt( - Buy, - "N:TB", - q1, - {"2", "2"}, - "2", - q1, - {"6", "6"}, - "1", - {"0.6666666666666667", "0.6666666666666667"}, - eur(), - usd(), - rate, - rate); - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "2.5", - q1, - {"2", "2"}, - "10", - {"1.666666666666667", "1.666666666666667"}, - eur(), - usd(), - rate, - rate); + // TAKER OWNER + // QUAL OFFER FUNDS QUAL OFFER FUNDS + // EXPECTED + // EUR USD + attempt( + Sell, + "N:N", + q1, + {"2", "2"}, + "10", + q1, + {"2", "2"}, + "10", + {"2", "2"}, + eur(), + usd(), + rate, + rate); + if (NumberSwitchOver) + { + attempt( + Sell, + "N:B", + q1, + {"4", "4"}, + "10", + q1, + {"4", "4"}, + "4", + {"2.666666666666667", "2.666666666666667"}, + eur(), + usd(), + rate, + rate); + } + else + { + attempt( + Sell, + "N:B", + q1, + {"4", "4"}, + "10", + q1, + {"4", "4"}, + "4", + {"2.666666666666666", "2.666666666666666"}, + eur(), + usd(), + rate, + rate); + } + attempt( + Buy, + "N:T", + q1, + {"1", "1"}, + "10", + q1, + {"2", "2"}, + "10", + {"1", "1"}, + eur(), + usd(), + rate, + rate); + attempt( + Buy, + "N:BT", + q1, + {"2", "2"}, + "10", + q1, + {"6", "6"}, + "5", + {"2", "2"}, + eur(), + usd(), + rate, + rate); + attempt( + Buy, + "N:TB", + q1, + {"2", "2"}, + "2", + q1, + {"6", "6"}, + "1", + {"0.6666666666666667", "0.6666666666666667"}, + eur(), + usd(), + rate, + rate); + if (NumberSwitchOver) + { + attempt( + Sell, + "A:N", + q1, + {"2", "2"}, + "2.5", + q1, + {"2", "2"}, + "10", + {"1.666666666666667", "1.666666666666667"}, + eur(), + usd(), + rate, + rate); + } + else + { + attempt( + Sell, + "A:N", + q1, + {"2", "2"}, + "2.5", + q1, + {"2", "2"}, + "10", + {"1.666666666666666", "1.666666666666666"}, + eur(), + usd(), + rate, + rate); + } + } } void diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 8841d2d2f58..2a85a57e1db 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -543,6 +543,9 @@ class Env void enableFeature(uint256 const feature); + void + disableFeature(uint256 const feature); + private: void fund(bool setDefaultRipple, STAmount const& amount, Account const& account); diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 900b9812dae..41eac3204ed 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -466,6 +466,14 @@ Env::enableFeature(uint256 const feature) app().config().features.insert(feature); } +void +Env::disableFeature(uint256 const feature) +{ + // Env::close() must be called for feature + // enable to take place. + app().config().features.erase(feature); +} + } // namespace jtx } // namespace test From e354497f6311ee9fbb00ad70657a2fd68a7ed87b Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Wed, 19 Oct 2022 16:00:41 -0400 Subject: [PATCH 092/508] Include rounding mode in XRPAmount to STAmount conversion. --- src/ripple/basics/IOUAmount.h | 4 - src/ripple/basics/Number.h | 18 + src/ripple/protocol/impl/STAmount.cpp | 38 +- src/test/app/NFToken_test.cpp | 22 +- src/test/app/Taker_test.cpp | 1557 +++++++++++++++---------- src/test/basics/Number_test.cpp | 42 +- 6 files changed, 1028 insertions(+), 653 deletions(-) diff --git a/src/ripple/basics/IOUAmount.h b/src/ripple/basics/IOUAmount.h index c3ef1340a76..764aa38aae3 100644 --- a/src/ripple/basics/IOUAmount.h +++ b/src/ripple/basics/IOUAmount.h @@ -186,10 +186,6 @@ mulRatio( std::uint32_t den, bool roundUp); -// Since IOUAmount and STAmount do not have access to a ledger, this -// is needed to put low-level routines on an amendment switch. Only -// transactions need to use this switchover. Outside of a transaction -// it's safe to unconditionally use the new behavior. extern LocalValue stNumberSwitchover; /** RAII class to set and restore the Number switchover. diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index ef45b3f7795..58d903579b5 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -337,6 +337,24 @@ squelch(Number const& x, Number const& limit) noexcept return x; } +class saveNumberRoundMode +{ + Number::rounding_mode mode_; + +public: + ~saveNumberRoundMode() + { + Number::setround(mode_); + } + explicit saveNumberRoundMode(Number::rounding_mode mode) noexcept + : mode_{mode} + { + } + saveNumberRoundMode(saveNumberRoundMode const&) = delete; + saveNumberRoundMode& + operator=(saveNumberRoundMode const&) = delete; +}; + } // namespace ripple #endif // RIPPLE_BASICS_NUMBER_H_INCLUDED diff --git a/src/ripple/protocol/impl/STAmount.cpp b/src/ripple/protocol/impl/STAmount.cpp index 51e8adb568d..d1a878c8b4f 100644 --- a/src/ripple/protocol/impl/STAmount.cpp +++ b/src/ripple/protocol/impl/STAmount.cpp @@ -725,24 +725,36 @@ STAmount::canonicalize() "Native currency amount out of range"); } - while (mOffset < 0) + if (*stNumberSwitchover && *stAmountCanonicalizeSwitchover) { - mValue /= 10; - ++mOffset; + Number num( + mIsNegative ? -mValue : mValue, mOffset, Number::unchecked{}); + XRPAmount xrp{num}; + mIsNegative = xrp.drops() < 0; + mValue = mIsNegative ? -xrp.drops() : xrp.drops(); + mOffset = 0; } - - while (mOffset > 0) + else { - if (*stAmountCanonicalizeSwitchover) + while (mOffset < 0) + { + mValue /= 10; + ++mOffset; + } + + while (mOffset > 0) { - // N.B. do not move the overflow check to after the - // multiplication - if (mValue > cMaxNativeN) - Throw( - "Native currency amount out of range"); + if (*stAmountCanonicalizeSwitchover) + { + // N.B. do not move the overflow check to after the + // multiplication + if (mValue > cMaxNativeN) + Throw( + "Native currency amount out of range"); + } + mValue *= 10; + --mOffset; } - mValue *= 10; - --mOffset; } if (mValue > cMaxNativeN) diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 42a6eb4d3ce..842f3f76cc8 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -2349,7 +2349,13 @@ class NFToken_test : public beast::unit_test::suite // See the impact of rounding when the nft is sold for small amounts // of drops. + for (auto NumberSwitchOver : {true}) { + if (NumberSwitchOver) + env.enableFeature(fixUniversalNumber); + else + env.disableFeature(fixUniversalNumber); + // An nft with a transfer fee of 1 basis point. uint256 const nftID = token::getNextID(env, alice, 0u, tfTransferable, 1); @@ -2374,16 +2380,16 @@ class NFToken_test : public beast::unit_test::suite // minter sells to carol. The payment is just small enough that // alice does not get any transfer fee. + auto pmt = NumberSwitchOver ? drops(50000) : drops(99999); STAmount carolBalance = env.balance(carol); uint256 const minterSellOfferIndex = keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, drops(99999)), - txflags(tfSellNFToken)); + env(token::createOffer(minter, nftID, pmt), txflags(tfSellNFToken)); env.close(); env(token::acceptSellOffer(carol, minterSellOfferIndex)); env.close(); - minterBalance += drops(99999) - fee; - carolBalance -= drops(99999) + fee; + minterBalance += pmt - fee; + carolBalance -= pmt + fee; BEAST_EXPECT(env.balance(alice) == aliceBalance); BEAST_EXPECT(env.balance(minter) == minterBalance); BEAST_EXPECT(env.balance(carol) == carolBalance); @@ -2393,13 +2399,13 @@ class NFToken_test : public beast::unit_test::suite STAmount beckyBalance = env.balance(becky); uint256 const beckyBuyOfferIndex = keylet::nftoffer(becky, env.seq(becky)).key; - env(token::createOffer(becky, nftID, drops(100000)), - token::owner(carol)); + pmt = NumberSwitchOver ? drops(50001) : drops(100000); + env(token::createOffer(becky, nftID, pmt), token::owner(carol)); env.close(); env(token::acceptBuyOffer(carol, beckyBuyOfferIndex)); env.close(); - carolBalance += drops(99999) - fee; - beckyBalance -= drops(100000) + fee; + carolBalance += pmt - drops(1) - fee; + beckyBalance -= pmt + fee; aliceBalance += drops(1); BEAST_EXPECT(env.balance(alice) == aliceBalance); diff --git a/src/test/app/Taker_test.cpp b/src/test/app/Taker_test.cpp index 38735d8cfce..0b69b25f24b 100644 --- a/src/test/app/Taker_test.cpp +++ b/src/test/app/Taker_test.cpp @@ -273,314 +273,521 @@ class Taker_test : public beast::unit_test::suite Quality q1 = get_quality("1", "1"); - // TAKER OWNER - // QUAL OFFER FUNDS QUAL OFFER FUNDS - // EXPECTED - // XRP USD - attempt( - Sell, - "N:N", - q1, - {"2", "2"}, - "2", - q1, - {"2", "2"}, - "2", - {"2", "2"}, - xrp(), - usd()); - attempt( - Sell, - "N:B", - q1, - {"2", "2"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "N:T", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "N:BT", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "N:TB", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); - - attempt( - Sell, - "T:N", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - attempt( - Sell, - "T:B", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "T:T", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "T:BT", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "1.8", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "T:TB", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); + for (auto NumberSwitchOver : {false, true}) + { + NumberSO stNumberSO{NumberSwitchOver}; + // TAKER OWNER + // QUAL OFFER FUNDS QUAL OFFER FUNDS + // EXPECTED + // XRP USD + attempt( + Sell, + "N:N", + q1, + {"2", "2"}, + "2", + q1, + {"2", "2"}, + "2", + {"2", "2"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Sell, + "N:B", + q1, + {"2", "2"}, + "2", + q1, + {"2", "2"}, + "1.8", + {"2", "1.8"}, + xrp(), + usd()); + } + else + { + attempt( + Sell, + "N:B", + q1, + {"2", "2"}, + "2", + q1, + {"2", "2"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + } + attempt( + Buy, + "N:T", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "2", + {"1", "1"}, + xrp(), + usd()); + attempt( + Buy, + "N:BT", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Buy, + "N:TB", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "0.8", + {"1", "0.8"}, + xrp(), + usd()); + } + else + { + attempt( + Buy, + "N:TB", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "0.8", + {"0", "0.8"}, + xrp(), + usd()); + } + attempt( + Sell, + "T:N", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "2", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Sell, + "T:B", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + } + else + { + attempt( + Sell, + "T:B", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + } + attempt( + Buy, + "T:T", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "2", + {"1", "1"}, + xrp(), + usd()); + attempt( + Buy, + "T:BT", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Buy, + "T:TB", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "0.8", + {"1", "0.8"}, + xrp(), + usd()); + } + else + { + attempt( + Buy, + "T:TB", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "0.8", + {"0", "0.8"}, + xrp(), + usd()); + } - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "1", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - attempt( - Sell, - "A:B", - q1, - {"2", "2"}, - "1", - q1, - {"2", "2"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "A:T", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "A:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "2.4", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "A:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); + attempt( + Sell, + "A:N", + q1, + {"2", "2"}, + "1", + q1, + {"2", "2"}, + "2", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Sell, + "A:B", + q1, + {"2", "2"}, + "1", + q1, + {"2", "2"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + } + else + { + attempt( + Sell, + "A:B", + q1, + {"2", "2"}, + "1", + q1, + {"2", "2"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + } + attempt( + Buy, + "A:T", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "3", + {"1", "1"}, + xrp(), + usd()); + attempt( + Buy, + "A:BT", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "2.4", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Buy, + "A:TB", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "0.8", + {"1", "0.8"}, + xrp(), + usd()); + } + else + { + attempt( + Buy, + "A:TB", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "0.8", + {"0", "0.8"}, + xrp(), + usd()); + } - attempt( - Sell, - "TA:N", - q1, - {"2", "2"}, - "1", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - xrp(), - usd()); - attempt( - Sell, - "TA:B", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "TA:T", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "TA:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "TA:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); + attempt( + Sell, + "TA:N", + q1, + {"2", "2"}, + "1", + q1, + {"2", "2"}, + "2", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Sell, + "TA:B", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + } + else + { + attempt( + Sell, + "TA:B", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + } + attempt( + Buy, + "TA:T", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "3", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Buy, + "TA:BT", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + attempt( + Buy, + "TA:TB", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + } + else + { + attempt( + Buy, + "TA:BT", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + attempt( + Buy, + "TA:TB", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + } - attempt( - Sell, - "AT:N", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - attempt( - Sell, - "AT:B", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "AT:T", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "3", - {"1", "1"}, - xrp(), - usd()); - attempt( - Buy, - "AT:BT", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "1.8", - {"1", "1.8"}, - xrp(), - usd()); - attempt( - Buy, - "AT:TB", - q1, - {"2", "2"}, - "1", - q1, - {"3", "3"}, - "0.8", - {"0", "0.8"}, - xrp(), - usd()); + attempt( + Sell, + "AT:N", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "3", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Sell, + "AT:B", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + } + else + { + attempt( + Sell, + "AT:B", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + } + attempt( + Buy, + "AT:T", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "3", + {"1", "1"}, + xrp(), + usd()); + if (NumberSwitchOver) + { + attempt( + Buy, + "AT:BT", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1"}, + xrp(), + usd()); + attempt( + Buy, + "AT:TB", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "0.8", + {"1", "0.8"}, + xrp(), + usd()); + } + else + { + attempt( + Buy, + "AT:BT", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "1.8", + {"1", "1.8"}, + xrp(), + usd()); + attempt( + Buy, + "AT:TB", + q1, + {"2", "2"}, + "1", + q1, + {"3", "3"}, + "0.8", + {"0", "0.8"}, + xrp(), + usd()); + } + } } void @@ -588,316 +795,446 @@ class Taker_test : public beast::unit_test::suite { testcase("XRP Quantization: output"); - Quality q1 = get_quality("1", "1"); + for (auto NumberSwitchOver : {false, true}) + { + NumberSO stNumberSO{NumberSwitchOver}; + Quality q1 = get_quality("1", "1"); - // TAKER OWNER - // QUAL OFFER FUNDS QUAL OFFER FUNDS - // EXPECTED - // USD XRP - attempt( - Sell, - "N:N", - q1, - {"3", "3"}, - "3", - q1, - {"3", "3"}, - "3", - {"3", "3"}, - usd(), - xrp()); - attempt( - Sell, - "N:B", - q1, - {"3", "3"}, - "3", - q1, - {"3", "3"}, - "2", - {"2", "2"}, - usd(), - xrp()); - attempt( - Buy, - "N:T", - q1, - {"3", "3"}, - "2.5", - q1, - {"5", "5"}, - "5", - {"2.5", "2"}, - usd(), - xrp()); - attempt( - Buy, - "N:BT", - q1, - {"3", "3"}, - "1.5", - q1, - {"5", "5"}, - "4", - {"1.5", "1"}, - usd(), - xrp()); - attempt( - Buy, - "N:TB", - q1, - {"3", "3"}, - "2.2", - q1, - {"5", "5"}, - "1", - {"1", "1"}, - usd(), - xrp()); + // TAKER OWNER + // QUAL OFFER FUNDS QUAL OFFER FUNDS + // EXPECTED + // USD XRP + attempt( + Sell, + "N:N", + q1, + {"3", "3"}, + "3", + q1, + {"3", "3"}, + "3", + {"3", "3"}, + usd(), + xrp()); + attempt( + Sell, + "N:B", + q1, + {"3", "3"}, + "3", + q1, + {"3", "3"}, + "2", + {"2", "2"}, + usd(), + xrp()); + if (NumberSwitchOver) + { + attempt( + Buy, + "N:T", + q1, + {"3", "3"}, + "2.5", + q1, + {"5", "5"}, + "5", + {"2.5", "3"}, + usd(), + xrp()); + attempt( + Buy, + "N:BT", + q1, + {"3", "3"}, + "1.5", + q1, + {"5", "5"}, + "4", + {"1.5", "2"}, + usd(), + xrp()); + } + else + { + attempt( + Buy, + "N:T", + q1, + {"3", "3"}, + "2.5", + q1, + {"5", "5"}, + "5", + {"2.5", "2"}, + usd(), + xrp()); + attempt( + Buy, + "N:BT", + q1, + {"3", "3"}, + "1.5", + q1, + {"5", "5"}, + "4", + {"1.5", "1"}, + usd(), + xrp()); + } + attempt( + Buy, + "N:TB", + q1, + {"3", "3"}, + "2.2", + q1, + {"5", "5"}, + "1", + {"1", "1"}, + usd(), + xrp()); - attempt( - Sell, - "T:N", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - usd(), - xrp()); - attempt( - Sell, - "T:B", - q1, - {"2", "2"}, - "2", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "T:T", - q1, - {"1", "1"}, - "2", - q1, - {"2", "2"}, - "2", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "T:BT", - q1, - {"1", "1"}, - "2", - q1, - {"3", "3"}, - "2", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "T:TB", - q1, - {"2", "2"}, - "2", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); + attempt( + Sell, + "T:N", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "2", + {"1", "1"}, + usd(), + xrp()); + attempt( + Sell, + "T:B", + q1, + {"2", "2"}, + "2", + q1, + {"3", "3"}, + "1", + {"1", "1"}, + usd(), + xrp()); + attempt( + Buy, + "T:T", + q1, + {"1", "1"}, + "2", + q1, + {"2", "2"}, + "2", + {"1", "1"}, + usd(), + xrp()); + attempt( + Buy, + "T:BT", + q1, + {"1", "1"}, + "2", + q1, + {"3", "3"}, + "2", + {"1", "1"}, + usd(), + xrp()); + attempt( + Buy, + "T:TB", + q1, + {"2", "2"}, + "2", + q1, + {"3", "3"}, + "1", + {"1", "1"}, + usd(), + xrp()); - attempt( - Sell, - "A:N", - q1, - {"2", "2"}, - "1.5", - q1, - {"2", "2"}, - "2", - {"1.5", "1"}, - usd(), - xrp()); - attempt( - Sell, - "A:B", - q1, - {"2", "2"}, - "1.8", - q1, - {"3", "3"}, - "2", - {"1.8", "1"}, - usd(), - xrp()); - attempt( - Buy, - "A:T", - q1, - {"2", "2"}, - "1.2", - q1, - {"3", "3"}, - "3", - {"1.2", "1"}, - usd(), - xrp()); - attempt( - Buy, - "A:BT", - q1, - {"2", "2"}, - "1.5", - q1, - {"4", "4"}, - "3", - {"1.5", "1"}, - usd(), - xrp()); - attempt( - Buy, - "A:TB", - q1, - {"2", "2"}, - "1.5", - q1, - {"4", "4"}, - "1", - {"1", "1"}, - usd(), - xrp()); + if (NumberSwitchOver) + { + attempt( + Sell, + "A:N", + q1, + {"2", "2"}, + "1.5", + q1, + {"2", "2"}, + "2", + {"1.5", "2"}, + usd(), + xrp()); + attempt( + Sell, + "A:B", + q1, + {"2", "2"}, + "1.8", + q1, + {"3", "3"}, + "2", + {"1.8", "2"}, + usd(), + xrp()); + } + else + { + attempt( + Sell, + "A:N", + q1, + {"2", "2"}, + "1.5", + q1, + {"2", "2"}, + "2", + {"1.5", "1"}, + usd(), + xrp()); + attempt( + Sell, + "A:B", + q1, + {"2", "2"}, + "1.8", + q1, + {"3", "3"}, + "2", + {"1.8", "1"}, + usd(), + xrp()); + } + attempt( + Buy, + "A:T", + q1, + {"2", "2"}, + "1.2", + q1, + {"3", "3"}, + "3", + {"1.2", "1"}, + usd(), + xrp()); + if (NumberSwitchOver) + { + attempt( + Buy, + "A:BT", + q1, + {"2", "2"}, + "1.5", + q1, + {"4", "4"}, + "3", + {"1.5", "2"}, + usd(), + xrp()); + } + else + { + attempt( + Buy, + "A:BT", + q1, + {"2", "2"}, + "1.5", + q1, + {"4", "4"}, + "3", + {"1.5", "1"}, + usd(), + xrp()); + } + attempt( + Buy, + "A:TB", + q1, + {"2", "2"}, + "1.5", + q1, + {"4", "4"}, + "1", + {"1", "1"}, + usd(), + xrp()); - attempt( - Sell, - "TA:N", - q1, - {"2", "2"}, - "1.5", - q1, - {"2", "2"}, - "2", - {"1.5", "1"}, - usd(), - xrp()); - attempt( - Sell, - "TA:B", - q1, - {"2", "2"}, - "1.5", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "TA:T", - q1, - {"2", "2"}, - "1.5", - q1, - {"3", "3"}, - "3", - {"1.5", "1"}, - usd(), - xrp()); - attempt( - Buy, - "TA:BT", - q1, - {"2", "2"}, - "1.8", - q1, - {"4", "4"}, - "3", - {"1.8", "1"}, - usd(), - xrp()); - attempt( - Buy, - "TA:TB", - q1, - {"2", "2"}, - "1.2", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); + if (NumberSwitchOver) + { + attempt( + Sell, + "TA:N", + q1, + {"2", "2"}, + "1.5", + q1, + {"2", "2"}, + "2", + {"1.5", "2"}, + usd(), + xrp()); + } + else + { + attempt( + Sell, + "TA:N", + q1, + {"2", "2"}, + "1.5", + q1, + {"2", "2"}, + "2", + {"1.5", "1"}, + usd(), + xrp()); + } + attempt( + Sell, + "TA:B", + q1, + {"2", "2"}, + "1.5", + q1, + {"3", "3"}, + "1", + {"1", "1"}, + usd(), + xrp()); + if (NumberSwitchOver) + { + attempt( + Buy, + "TA:T", + q1, + {"2", "2"}, + "1.5", + q1, + {"3", "3"}, + "3", + {"1.5", "2"}, + usd(), + xrp()); + attempt( + Buy, + "TA:BT", + q1, + {"2", "2"}, + "1.8", + q1, + {"4", "4"}, + "3", + {"1.8", "2"}, + usd(), + xrp()); + } + else + { + attempt( + Buy, + "TA:T", + q1, + {"2", "2"}, + "1.5", + q1, + {"3", "3"}, + "3", + {"1.5", "1"}, + usd(), + xrp()); + attempt( + Buy, + "TA:BT", + q1, + {"2", "2"}, + "1.8", + q1, + {"4", "4"}, + "3", + {"1.8", "1"}, + usd(), + xrp()); + } + attempt( + Buy, + "TA:TB", + q1, + {"2", "2"}, + "1.2", + q1, + {"3", "3"}, + "1", + {"1", "1"}, + usd(), + xrp()); - attempt( - Sell, - "AT:N", - q1, - {"2", "2"}, - "2.5", - q1, - {"4", "4"}, - "4", - {"2", "2"}, - usd(), - xrp()); - attempt( - Sell, - "AT:B", - q1, - {"2", "2"}, - "2.5", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); - attempt( - Buy, - "AT:T", - q1, - {"2", "2"}, - "2.5", - q1, - {"3", "3"}, - "3", - {"2", "2"}, - usd(), - xrp()); - attempt( - Buy, - "AT:BT", - q1, - {"2", "2"}, - "2.5", - q1, - {"4", "4"}, - "3", - {"2", "2"}, - usd(), - xrp()); - attempt( - Buy, - "AT:TB", - q1, - {"2", "2"}, - "2.5", - q1, - {"3", "3"}, - "1", - {"1", "1"}, - usd(), - xrp()); + attempt( + Sell, + "AT:N", + q1, + {"2", "2"}, + "2.5", + q1, + {"4", "4"}, + "4", + {"2", "2"}, + usd(), + xrp()); + attempt( + Sell, + "AT:B", + q1, + {"2", "2"}, + "2.5", + q1, + {"3", "3"}, + "1", + {"1", "1"}, + usd(), + xrp()); + attempt( + Buy, + "AT:T", + q1, + {"2", "2"}, + "2.5", + q1, + {"3", "3"}, + "3", + {"2", "2"}, + usd(), + xrp()); + attempt( + Buy, + "AT:BT", + q1, + {"2", "2"}, + "2.5", + q1, + {"4", "4"}, + "3", + {"2", "2"}, + usd(), + xrp()); + attempt( + Buy, + "AT:TB", + q1, + {"2", "2"}, + "2.5", + q1, + {"3", "3"}, + "1", + {"1", "1"}, + usd(), + xrp()); + } } void diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index d7bd8264878..d3ece630e3e 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -26,24 +26,6 @@ namespace ripple { -class saveNumberRoundMode -{ - Number::rounding_mode mode_; - -public: - ~saveNumberRoundMode() - { - Number::setround(mode_); - } - explicit saveNumberRoundMode(Number::rounding_mode mode) noexcept - : mode_{mode} - { - } - saveNumberRoundMode(saveNumberRoundMode const&) = delete; - saveNumberRoundMode& - operator=(saveNumberRoundMode const&) = delete; -}; - class Number_test : public beast::unit_test::suite { public: @@ -580,6 +562,29 @@ class Number_test : public beast::unit_test::suite BEAST_EXPECT(x == y); } + void + test_toSTAmount() + { + NumberSO stNumberSO{true}; + Issue const issue; + Number const n{7'518'783'80596, -5}; + saveNumberRoundMode const save{Number::setround(Number::to_nearest)}; + auto res2 = STAmount{issue, n.mantissa(), n.exponent()}; + BEAST_EXPECT(res2 == STAmount{7518784}); + + Number::setround(Number::towards_zero); + res2 = STAmount{issue, n.mantissa(), n.exponent()}; + BEAST_EXPECT(res2 == STAmount{7518783}); + + Number::setround(Number::downward); + res2 = STAmount{issue, n.mantissa(), n.exponent()}; + BEAST_EXPECT(res2 == STAmount{7518783}); + + Number::setround(Number::upward); + res2 = STAmount{issue, n.mantissa(), n.exponent()}; + BEAST_EXPECT(res2 == STAmount{7518784}); + } + void run() override { @@ -599,6 +604,7 @@ class Number_test : public beast::unit_test::suite test_relationals(); test_stream(); test_inc_dec(); + test_toSTAmount(); } }; From 6eaaa7bcfa6e50cb596552b40cb00a3595b88eed Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Thu, 22 Dec 2022 12:20:56 -0500 Subject: [PATCH 093/508] Replace Number division algorithm * Replace division with faster algorithm. * Correct some rounding bugs in multiplication. * Add tests for rounding bugs. --- src/ripple/basics/impl/Number.cpp | 39 +++---- src/test/basics/Number_test.cpp | 186 +++++++++++++++++++++++++----- 2 files changed, 175 insertions(+), 50 deletions(-) diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index a7a8159fed3..52d2b556d44 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -185,6 +185,8 @@ Number::normalize() --exponent_; } Guard g; + if (negative) + g.set_negative(); while (m > maxMantissa) { if (exponent_ >= maxExponent) @@ -364,6 +366,8 @@ Number::operator*=(Number const& y) auto ze = xe + ye; auto zn = xn * yn; Guard g; + if (zn == -1) + g.set_negative(); while (zm > maxMantissa) { g.push(static_cast(zm % 10)); @@ -402,8 +406,11 @@ Number::operator/=(Number const& y) { if (y == Number{}) throw std::overflow_error("Number: divide by 0"); + if (*this == Number{}) + return *this; int np = 1; auto nm = mantissa(); + auto ne = exponent(); if (nm < 0) { nm = -nm; @@ -411,35 +418,19 @@ Number::operator/=(Number const& y) } int dp = 1; auto dm = y.mantissa(); + auto de = y.exponent(); if (dm < 0) { dm = -dm; dp = -1; } - // Divide numerator and denominator such that the - // denominator is in the range [1, 10). - const int offset = -15 - y.exponent(); - Number n{nm * (np * dp), exponent() + offset}; - Number d{dm, y.exponent() + offset}; - // Quadratic least squares fit to 1/x in the range [1, 10] - constexpr Number a0{9178756872006464, -16, unchecked{}}; - constexpr Number a1{-2149215784206187, -16, unchecked{}}; - constexpr Number a2{1405502114116773, -17, unchecked{}}; - static_assert(a0.isnormal()); - static_assert(a1.isnormal()); - static_assert(a2.isnormal()); - Number rm2{}; - Number rm1{}; - Number r = (a2 * d + a1) * d + a0; - // Newton–Raphson iteration of 1/x - d with initial guess r - // halt when r stops changing, checking for bouncing on the last iteration - do - { - rm2 = rm1; - rm1 = r; - r = r + r * (one - d * r); - } while (r != rm1 && r != rm2); - *this = n * r; + // Shift by 10^17 gives greatest precision while not overflowing uint128_t + // or the cast back to int64_t + const uint128_t f = 100'000'000'000'000'000; + mantissa_ = static_cast(uint128_t(nm) * f / uint128_t(dm)); + exponent_ = ne - de - 17; + mantissa_ *= np * dp; + normalize(); return *this; } diff --git a/src/test/basics/Number_test.cpp b/src/test/basics/Number_test.cpp index d3ece630e3e..8c12ff7c5e4 100644 --- a/src/test/basics/Number_test.cpp +++ b/src/test/basics/Number_test.cpp @@ -150,25 +150,94 @@ class Number_test : public beast::unit_test::suite { testcase("test_mul"); using Case = std::tuple; - Case c[]{ - {Number{7}, Number{8}, Number{56}}, - {Number{1414213562373095, -15}, - Number{1414213562373095, -15}, - Number{2000000000000000, -15}}, - {Number{-1414213562373095, -15}, - Number{1414213562373095, -15}, - Number{-2000000000000000, -15}}, - {Number{-1414213562373095, -15}, - Number{-1414213562373095, -15}, - Number{2000000000000000, -15}}, - {Number{3214285714285706, -15}, - Number{3111111111111119, -15}, - Number{1000000000000000, -14}}, - {Number{1000000000000000, -32768}, - Number{1000000000000000, -32768}, - Number{0}}}; - for (auto const& [x, y, z] : c) - BEAST_EXPECT(x * y == z); + saveNumberRoundMode save{Number::setround(Number::to_nearest)}; + { + Case c[]{ + {Number{7}, Number{8}, Number{56}}, + {Number{1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{2000000000000000, -15}}, + {Number{-1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{-2000000000000000, -15}}, + {Number{-1414213562373095, -15}, + Number{-1414213562373095, -15}, + Number{2000000000000000, -15}}, + {Number{3214285714285706, -15}, + Number{3111111111111119, -15}, + Number{1000000000000000, -14}}, + {Number{1000000000000000, -32768}, + Number{1000000000000000, -32768}, + Number{0}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x * y == z); + } + Number::setround(Number::towards_zero); + { + Case c[]{ + {Number{7}, Number{8}, Number{56}}, + {Number{1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{1999999999999999, -15}}, + {Number{-1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{-1999999999999999, -15}}, + {Number{-1414213562373095, -15}, + Number{-1414213562373095, -15}, + Number{1999999999999999, -15}}, + {Number{3214285714285706, -15}, + Number{3111111111111119, -15}, + Number{9999999999999999, -15}}, + {Number{1000000000000000, -32768}, + Number{1000000000000000, -32768}, + Number{0}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x * y == z); + } + Number::setround(Number::downward); + { + Case c[]{ + {Number{7}, Number{8}, Number{56}}, + {Number{1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{1999999999999999, -15}}, + {Number{-1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{-2000000000000000, -15}}, + {Number{-1414213562373095, -15}, + Number{-1414213562373095, -15}, + Number{1999999999999999, -15}}, + {Number{3214285714285706, -15}, + Number{3111111111111119, -15}, + Number{9999999999999999, -15}}, + {Number{1000000000000000, -32768}, + Number{1000000000000000, -32768}, + Number{0}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x * y == z); + } + Number::setround(Number::upward); + { + Case c[]{ + {Number{7}, Number{8}, Number{56}}, + {Number{1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{2000000000000000, -15}}, + {Number{-1414213562373095, -15}, + Number{1414213562373095, -15}, + Number{-1999999999999999, -15}}, + {Number{-1414213562373095, -15}, + Number{-1414213562373095, -15}, + Number{2000000000000000, -15}}, + {Number{3214285714285706, -15}, + Number{3111111111111119, -15}, + Number{1000000000000000, -14}}, + {Number{1000000000000000, -32768}, + Number{1000000000000000, -32768}, + Number{0}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x * y == z); + } bool caught = false; try { @@ -187,13 +256,78 @@ class Number_test : public beast::unit_test::suite { testcase("test_div"); using Case = std::tuple; - Case c[]{ - {Number{1}, Number{2}, Number{5, -1}}, - {Number{1}, Number{10}, Number{1, -1}}, - {Number{1}, Number{-10}, Number{-1, -1}}, - {Number{0}, Number{100}, Number{0}}}; - for (auto const& [x, y, z] : c) - BEAST_EXPECT(x / y == z); + saveNumberRoundMode save{Number::setround(Number::to_nearest)}; + { + Case c[]{ + {Number{1}, Number{2}, Number{5, -1}}, + {Number{1}, Number{10}, Number{1, -1}}, + {Number{1}, Number{-10}, Number{-1, -1}}, + {Number{0}, Number{100}, Number{0}}, + {Number{1414213562373095, -10}, + Number{1414213562373095, -10}, + Number{1}}, + {Number{9'999'999'999'999'999}, + Number{1'000'000'000'000'000}, + Number{9'999'999'999'999'999, -15}}, + {Number{2}, Number{3}, Number{6'666'666'666'666'667, -16}}, + {Number{-2}, Number{3}, Number{-6'666'666'666'666'667, -16}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x / y == z); + } + Number::setround(Number::towards_zero); + { + Case c[]{ + {Number{1}, Number{2}, Number{5, -1}}, + {Number{1}, Number{10}, Number{1, -1}}, + {Number{1}, Number{-10}, Number{-1, -1}}, + {Number{0}, Number{100}, Number{0}}, + {Number{1414213562373095, -10}, + Number{1414213562373095, -10}, + Number{1}}, + {Number{9'999'999'999'999'999}, + Number{1'000'000'000'000'000}, + Number{9'999'999'999'999'999, -15}}, + {Number{2}, Number{3}, Number{6'666'666'666'666'666, -16}}, + {Number{-2}, Number{3}, Number{-6'666'666'666'666'666, -16}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x / y == z); + } + Number::setround(Number::downward); + { + Case c[]{ + {Number{1}, Number{2}, Number{5, -1}}, + {Number{1}, Number{10}, Number{1, -1}}, + {Number{1}, Number{-10}, Number{-1, -1}}, + {Number{0}, Number{100}, Number{0}}, + {Number{1414213562373095, -10}, + Number{1414213562373095, -10}, + Number{1}}, + {Number{9'999'999'999'999'999}, + Number{1'000'000'000'000'000}, + Number{9'999'999'999'999'999, -15}}, + {Number{2}, Number{3}, Number{6'666'666'666'666'666, -16}}, + {Number{-2}, Number{3}, Number{-6'666'666'666'666'667, -16}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x / y == z); + } + Number::setround(Number::upward); + { + Case c[]{ + {Number{1}, Number{2}, Number{5, -1}}, + {Number{1}, Number{10}, Number{1, -1}}, + {Number{1}, Number{-10}, Number{-1, -1}}, + {Number{0}, Number{100}, Number{0}}, + {Number{1414213562373095, -10}, + Number{1414213562373095, -10}, + Number{1}}, + {Number{9'999'999'999'999'999}, + Number{1'000'000'000'000'000}, + Number{9'999'999'999'999'999, -15}}, + {Number{2}, Number{3}, Number{6'666'666'666'666'667, -16}}, + {Number{-2}, Number{3}, Number{-6'666'666'666'666'666, -16}}}; + for (auto const& [x, y, z] : c) + BEAST_EXPECT(x / y == z); + } bool caught = false; try { From 2f1f453052088a2e49e725efb758962d3b9faa3f Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Sat, 28 Jan 2023 20:26:59 -0500 Subject: [PATCH 094/508] Optimize uint128_t division by 10 within Number.cpp * Optimization includes computing remainder from division. * Used only within Number::operator*=. --- src/ripple/basics/impl/IOUAmount.cpp | 2 +- src/ripple/basics/impl/Number.cpp | 35 ++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/src/ripple/basics/impl/IOUAmount.cpp b/src/ripple/basics/impl/IOUAmount.cpp index 76f4bbe9fcd..f011af19c01 100644 --- a/src/ripple/basics/impl/IOUAmount.cpp +++ b/src/ripple/basics/impl/IOUAmount.cpp @@ -53,7 +53,7 @@ IOUAmount::normalize() if (*stNumberSwitchover) { - Number v{mantissa_, exponent_}; + const Number v{mantissa_, exponent_}; mantissa_ = v.mantissa(); exponent_ = v.exponent(); if (exponent_ > maxExponent) diff --git a/src/ripple/basics/impl/Number.cpp b/src/ripple/basics/impl/Number.cpp index 52d2b556d44..9b3247536f9 100644 --- a/src/ripple/basics/impl/Number.cpp +++ b/src/ripple/basics/impl/Number.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -335,6 +336,34 @@ Number::operator+=(Number const& y) return *this; } +// Optimization equivalent to: +// auto r = static_cast(u % 10); +// u /= 10; +// return r; +// Derived from Hacker's Delight Second Edition Chapter 10 +// by Henry S. Warren, Jr. +static inline unsigned +divu10(uint128_t& u) +{ + // q = u * 0.75 + auto q = (u >> 1) + (u >> 2); + // iterate towards q = u * 0.8 + q += q >> 4; + q += q >> 8; + q += q >> 16; + q += q >> 32; + q += q >> 64; + // q /= 8 approximately == u / 10 + q >>= 3; + // r = u - q * 10 approximately == u % 10 + auto r = static_cast(u - ((q << 3) + (q << 1))); + // correction c is 1 if r >= 10 else 0 + auto c = (r + 6) >> 4; + u = q + c; + r -= c * 10; + return r; +} + Number& Number::operator*=(Number const& y) { @@ -370,8 +399,10 @@ Number::operator*=(Number const& y) g.set_negative(); while (zm > maxMantissa) { - g.push(static_cast(zm % 10)); - zm /= 10; + // The following is optimization for: + // g.push(static_cast(zm % 10)); + // zm /= 10; + g.push(divu10(zm)); ++ze; } xm = static_cast(zm); From 5edaec2bd0eaaf95164ce07a98683f57635173fe Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Wed, 1 Feb 2023 11:20:13 -0500 Subject: [PATCH 095/508] Introduce min/max observers for Number Three static member functions are introduced with definitions consistent with std::numeric_limits: static constexpr Number min() noexcept; Returns: The minimum positive value. This is the value closest to zero. static constexpr Number max() noexcept; Returns: The maximum possible value. static constexpr Number lowest() noexcept; Returns: The negative value which is less than all other values. --- src/ripple/basics/Number.h | 25 +++++++++++++++++++++++++ src/ripple/basics/impl/IOUAmount.cpp | 2 +- src/ripple/protocol/Feature.h | 3 --- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/src/ripple/basics/Number.h b/src/ripple/basics/Number.h index 58d903579b5..c308abec712 100644 --- a/src/ripple/basics/Number.h +++ b/src/ripple/basics/Number.h @@ -81,6 +81,13 @@ class Number Number& operator/=(Number const& x); + static constexpr Number + min() noexcept; + static constexpr Number + max() noexcept; + static constexpr Number + lowest() noexcept; + explicit operator XRPAmount() const; // round to nearest, even on tie explicit operator rep() const; // round to nearest, even on tie @@ -290,6 +297,24 @@ operator/(Number const& x, Number const& y) return z; } +inline constexpr Number +Number::min() noexcept +{ + return Number{minMantissa, minExponent, unchecked{}}; +} + +inline constexpr Number +Number::max() noexcept +{ + return Number{maxMantissa, maxExponent, unchecked{}}; +} + +inline constexpr Number +Number::lowest() noexcept +{ + return -Number{maxMantissa, maxExponent, unchecked{}}; +} + inline constexpr bool Number::isnormal() const noexcept { diff --git a/src/ripple/basics/impl/IOUAmount.cpp b/src/ripple/basics/impl/IOUAmount.cpp index f011af19c01..c9b52874abd 100644 --- a/src/ripple/basics/impl/IOUAmount.cpp +++ b/src/ripple/basics/impl/IOUAmount.cpp @@ -53,7 +53,7 @@ IOUAmount::normalize() if (*stNumberSwitchover) { - const Number v{mantissa_, exponent_}; + Number const v{mantissa_, exponent_}; mantissa_ = v.mantissa(); exponent_ = v.exponent(); if (exponent_ > maxExponent) diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 6be2d4dfb68..78b5b152c87 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -340,12 +340,9 @@ extern uint256 const featureNonFungibleTokensV1_1; extern uint256 const fixTrustLinesToSelf; extern uint256 const fixRemoveNFTokenAutoTrustLine; extern uint256 const featureImmediateOfferKilled; -<<<<<<< HEAD extern uint256 const featureDisallowIncoming; extern uint256 const featureXRPFees; -======= extern uint256 const fixUniversalNumber; ->>>>>>> Use Number for IOUAmount and STAmount arithmetic } // namespace ripple From 36b34a7bd5a68650f5ee020f1852b969ada35eb6 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Tue, 7 Feb 2023 17:46:38 -0600 Subject: [PATCH 096/508] Update dependency: grpc (#4407) --- conanfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conanfile.py b/conanfile.py index 61e131f2f3d..51e8d7d553e 100644 --- a/conanfile.py +++ b/conanfile.py @@ -28,7 +28,7 @@ class Xrpl(ConanFile): 'date/3.0.1', 'libarchive/3.6.0', 'lz4/1.9.3', - 'grpc/1.44.0', + 'grpc/1.50.1', 'nudb/2.0.8', 'openssl/1.1.1m', 'protobuf/3.21.4', From 8f514937a41eba90d98fb99daf938925527f0c44 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 8 Feb 2023 15:14:09 -0500 Subject: [PATCH 097/508] Update documented pathfinding configuration defaults: (#4409) * Add recommended values for node which want to support "advanced" pathfinding, which is effectively the old default behavior. --- cfg/rippled-example.cfg | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index fda1671e416..02ef02e920f 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -758,7 +758,9 @@ # When searching for paths, the default search aggressiveness. This can take # exponentially more resources as the size is increased. # -# The default is: 7 +# The recommended value to support advanced pathfinding is: 7 +# +# The default is: 2 # # [path_search_fast] # [path_search_max] @@ -767,12 +769,19 @@ # If you do not need pathfinding, you can set path_search_max to zero to # disable it and avoid some expensive bookkeeping. # -# The default for 'path_search_fast' is 2. The default for 'path_search_max' is 10. +# To support advanced pathfinding the recommended value for +# 'path_search_fast' is 2, and for 'path_search_max' is 10. +# +# The default for 'path_search_fast' is 2. The default for 'path_search_max' is 3. # # [path_search_old] # # For clients that use the legacy path finding interfaces, the search -# aggressiveness to use. The default is 7. +# aggressiveness to use. +# +# The recommended value to support advanced pathfinding is: 7. +# +# The default is: 2 # # # From 32559463efba03b90b67f51b489f217881f31599 Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Fri, 10 Feb 2023 00:01:48 -0500 Subject: [PATCH 098/508] Change default vote on fixUniversalNumber from yes to no (#4414) --- src/ripple/protocol/impl/Feature.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index d650950da4b..2f01c39888a 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -452,7 +452,7 @@ REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, DefaultVote::yes REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no); REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no); REGISTER_FEATURE(XRPFees, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixUniversalNumber, Supported::yes, DefaultVote::yes); +REGISTER_FIX (fixUniversalNumber, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. From f7a8d2de84c6968c8dad8c842d950dfae7224223 Mon Sep 17 00:00:00 2001 From: ledhed2222 Date: Wed, 18 Jan 2023 16:37:39 -0500 Subject: [PATCH 099/508] Add fixUnburnableNFToken feature (#4391) --- src/ripple/protocol/Feature.h | 3 ++- src/ripple/protocol/impl/Feature.cpp | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 78b5b152c87..0bdfd224dda 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 56; +static constexpr std::size_t numFeatures = 57; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -343,6 +343,7 @@ extern uint256 const featureImmediateOfferKilled; extern uint256 const featureDisallowIncoming; extern uint256 const featureXRPFees; extern uint256 const fixUniversalNumber; +extern uint256 const fixUnburnableNFToken; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 2f01c39888a..f021ea4674d 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -453,6 +453,7 @@ REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no) REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no); REGISTER_FEATURE(XRPFees, Supported::yes, DefaultVote::no); REGISTER_FIX (fixUniversalNumber, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixUnburnableNFToken, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. From a828e24cf0061380524a8c7ef304c52ab3ea100d Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Thu, 2 Feb 2023 18:48:03 -0500 Subject: [PATCH 100/508] Allow NFT to be burned when number of offers is greater than 500 (#4346) * Allow offers to be removable * Delete sell offers first Signed-off-by: Shawn Xie --- src/ripple/app/tx/impl/NFTokenBurn.cpp | 46 ++- .../app/tx/impl/details/NFTokenUtils.cpp | 67 ++-- src/ripple/app/tx/impl/details/NFTokenUtils.h | 10 +- src/test/app/NFTokenBurn_test.cpp | 323 ++++++++++++++---- 4 files changed, 347 insertions(+), 99 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenBurn.cpp b/src/ripple/app/tx/impl/NFTokenBurn.cpp index da23d78bdbd..e8693c7c6fb 100644 --- a/src/ripple/app/tx/impl/NFTokenBurn.cpp +++ b/src/ripple/app/tx/impl/NFTokenBurn.cpp @@ -77,9 +77,14 @@ NFTokenBurn::preclaim(PreclaimContext const& ctx) } } - // If there are too many offers, then burning the token would produce too - // much metadata. Disallow burning a token with too many offers. - return nft::notTooManyOffers(ctx.view, ctx.tx[sfNFTokenID]); + if (!ctx.view.rules().enabled(fixUnburnableNFToken)) + { + // If there are too many offers, then burning the token would produce + // too much metadata. Disallow burning a token with too many offers. + return nft::notTooManyOffers(ctx.view, ctx.tx[sfNFTokenID]); + } + + return tesSUCCESS; } TER @@ -104,9 +109,38 @@ NFTokenBurn::doApply() view().update(issuer); } - // Optimized deletion of all offers. - nft::removeAllTokenOffers(view(), keylet::nft_sells(ctx_.tx[sfNFTokenID])); - nft::removeAllTokenOffers(view(), keylet::nft_buys(ctx_.tx[sfNFTokenID])); + if (ctx_.view().rules().enabled(fixUnburnableNFToken)) + { + // Delete up to 500 offers in total. + // Because the number of sell offers is likely to be less than + // the number of buy offers, we prioritize the deletion of sell + // offers in order to clean up sell offer directory + std::size_t const deletedSellOffers = nft::removeTokenOffersWithLimit( + view(), + keylet::nft_sells(ctx_.tx[sfNFTokenID]), + maxDeletableTokenOfferEntries); + + if (maxDeletableTokenOfferEntries > deletedSellOffers) + { + nft::removeTokenOffersWithLimit( + view(), + keylet::nft_buys(ctx_.tx[sfNFTokenID]), + maxDeletableTokenOfferEntries - deletedSellOffers); + } + } + else + { + // Deletion of all offers. + nft::removeTokenOffersWithLimit( + view(), + keylet::nft_sells(ctx_.tx[sfNFTokenID]), + std::numeric_limits::max()); + + nft::removeTokenOffersWithLimit( + view(), + keylet::nft_buys(ctx_.tx[sfNFTokenID]), + std::numeric_limits::max()); + } return tesSUCCESS; } diff --git a/src/ripple/app/tx/impl/details/NFTokenUtils.cpp b/src/ripple/app/tx/impl/details/NFTokenUtils.cpp index d1214a98ee8..db2c3ae62f7 100644 --- a/src/ripple/app/tx/impl/details/NFTokenUtils.cpp +++ b/src/ripple/app/tx/impl/details/NFTokenUtils.cpp @@ -520,34 +520,55 @@ findTokenAndPage( } return std::nullopt; } -void -removeAllTokenOffers(ApplyView& view, Keylet const& directory) -{ - view.dirDelete(directory, [&view](uint256 const& id) { - auto offer = view.peek(Keylet{ltNFTOKEN_OFFER, id}); - if (!offer) - Throw( - "Offer " + to_string(id) + " not found in ledger!"); +std::size_t +removeTokenOffersWithLimit( + ApplyView& view, + Keylet const& directory, + std::size_t maxDeletableOffers) +{ + if (maxDeletableOffers == 0) + return 0; - auto const owner = (*offer)[sfOwner]; + std::optional pageIndex{0}; + std::size_t deletedOffersCount = 0; - if (!view.dirRemove( - keylet::ownerDir(owner), - (*offer)[sfOwnerNode], - offer->key(), - false)) - Throw( - "Offer " + to_string(id) + " not found in owner directory!"); + do + { + auto const page = view.peek(keylet::page(directory, *pageIndex)); + if (!page) + break; + + // We get the index of the next page in case the current + // page is deleted after all of its entries have been removed + pageIndex = (*page)[~sfIndexNext]; + + auto offerIndexes = page->getFieldV256(sfIndexes); + + // We reverse-iterate the offer directory page to delete all entries. + // Deleting an entry in a NFTokenOffer directory page won't cause + // entries from other pages to move to the current, so, it is safe to + // delete entries one by one in the page. It is required to iterate + // backwards to handle iterator invalidation for vector, as we are + // deleting during iteration. + for (int i = offerIndexes.size() - 1; i >= 0; --i) + { + if (auto const offer = view.peek(keylet::nftoffer(offerIndexes[i]))) + { + if (deleteTokenOffer(view, offer)) + ++deletedOffersCount; + else + Throw( + "Offer " + to_string(offerIndexes[i]) + + " cannot be deleted!"); + } - adjustOwnerCount( - view, - view.peek(keylet::account(owner)), - -1, - beast::Journal{beast::Journal::getNullSink()}); + if (maxDeletableOffers == deletedOffersCount) + break; + } + } while (pageIndex.value_or(0) && maxDeletableOffers != deletedOffersCount); - view.erase(offer); - }); + return deletedOffersCount; } TER diff --git a/src/ripple/app/tx/impl/details/NFTokenUtils.h b/src/ripple/app/tx/impl/details/NFTokenUtils.h index fa8c43b5877..db7cf00be10 100644 --- a/src/ripple/app/tx/impl/details/NFTokenUtils.h +++ b/src/ripple/app/tx/impl/details/NFTokenUtils.h @@ -53,9 +53,13 @@ constexpr std::uint16_t const flagOnlyXRP = 0x0002; constexpr std::uint16_t const flagCreateTrustLines = 0x0004; constexpr std::uint16_t const flagTransferable = 0x0008; -/** Deletes all offers from the specified token offer directory. */ -void -removeAllTokenOffers(ApplyView& view, Keylet const& directory); +/** Delete up to a specified number of offers from the specified token offer + * directory. */ +std::size_t +removeTokenOffersWithLimit( + ApplyView& view, + Keylet const& directory, + std::size_t maxDeletableOffers); /** Returns tesSUCCESS if NFToken has few enough offers that it can be burned */ TER diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index 00124731cb9..4896932acd2 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -49,6 +49,37 @@ class NFTokenBurn_test : public beast::unit_test::suite return nfts[jss::result][jss::account_nfts].size(); }; + // Helper function that returns new nft id for an account and create + // specified number of sell offers + uint256 + createNftAndOffers( + test::jtx::Env& env, + test::jtx::Account const& owner, + std::vector& offerIndexes, + size_t const tokenCancelCount) + { + using namespace test::jtx; + uint256 const nftokenID = + token::getNextID(env, owner, 0, tfTransferable); + env(token::mint(owner, 0), + token::uri(std::string(maxTokenURILength, 'u')), + txflags(tfTransferable)); + env.close(); + + offerIndexes.reserve(tokenCancelCount); + + for (uint32_t i = 0; i < tokenCancelCount; ++i) + { + // Create sell offer + offerIndexes.push_back(keylet::nftoffer(owner, env.seq(owner)).key); + env(token::createOffer(owner, nftokenID, drops(1)), + txflags(tfSellNFToken)); + env.close(); + } + + return nftokenID; + }; + void testBurnRandom(FeatureBitset features) { @@ -492,94 +523,251 @@ class NFTokenBurn_test : public beast::unit_test::suite using namespace test::jtx; - Env env{*this, features}; + // Test what happens if a NFT is unburnable when there are + // more than 500 offers, before fixUnburnableNFToken goes live + if (!features[fixUnburnableNFToken]) + { + Env env{*this, features}; - Account const alice("alice"); - Account const becky("becky"); - env.fund(XRP(1000), alice, becky); - env.close(); + Account const alice("alice"); + Account const becky("becky"); + env.fund(XRP(1000), alice, becky); + env.close(); - // We structure the test to try and maximize the metadata produced. - // This verifies that we don't create too much metadata during a - // maximal burn operation. - // - // 1. alice mints an nft with a full-sized URI. - // 2. We create 1000 new accounts, each of which creates an offer for - // alice's nft. - // 3. becky creates one more offer for alice's NFT - // 4. Attempt to burn the nft which fails because there are too - // many offers. - // 5. Cancel becky's offer and the nft should become burnable. - uint256 const nftokenID = - token::getNextID(env, alice, 0, tfTransferable); - env(token::mint(alice, 0), - token::uri(std::string(maxTokenURILength, 'u')), - txflags(tfTransferable)); - env.close(); + // We structure the test to try and maximize the metadata produced. + // This verifies that we don't create too much metadata during a + // maximal burn operation. + // + // 1. alice mints an nft with a full-sized URI. + // 2. We create 500 new accounts, each of which creates an offer + // for alice's nft. + // 3. becky creates one more offer for alice's NFT + // 4. Attempt to burn the nft which fails because there are too + // many offers. + // 5. Cancel becky's offer and the nft should become burnable. + uint256 const nftokenID = + token::getNextID(env, alice, 0, tfTransferable); + env(token::mint(alice, 0), + token::uri(std::string(maxTokenURILength, 'u')), + txflags(tfTransferable)); + env.close(); - std::vector offerIndexes; - offerIndexes.reserve(maxTokenOfferCancelCount); - for (uint32_t i = 0; i < maxTokenOfferCancelCount; ++i) + std::vector offerIndexes; + offerIndexes.reserve(maxTokenOfferCancelCount); + for (std::uint32_t i = 0; i < maxTokenOfferCancelCount; ++i) + { + Account const acct(std::string("acct") + std::to_string(i)); + env.fund(XRP(1000), acct); + env.close(); + + offerIndexes.push_back( + keylet::nftoffer(acct, env.seq(acct)).key); + env(token::createOffer(acct, nftokenID, drops(1)), + token::owner(alice)); + env.close(); + } + + // Verify all offers are present in the ledger. + for (uint256 const& offerIndex : offerIndexes) + { + BEAST_EXPECT(env.le(keylet::nftoffer(offerIndex))); + } + + // Create one too many offers. + uint256 const beckyOfferIndex = + keylet::nftoffer(becky, env.seq(becky)).key; + env(token::createOffer(becky, nftokenID, drops(1)), + token::owner(alice)); + + // Attempt to burn the nft which should fail. + env(token::burn(alice, nftokenID), ter(tefTOO_BIG)); + + // Close enough ledgers that the burn transaction is no longer + // retried. + for (int i = 0; i < 10; ++i) + env.close(); + + // Cancel becky's offer, but alice adds a sell offer. The token + // should still not be burnable. + env(token::cancelOffer(becky, {beckyOfferIndex})); + env.close(); + + uint256 const aliceOfferIndex = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftokenID, drops(1)), + txflags(tfSellNFToken)); + env.close(); + + env(token::burn(alice, nftokenID), ter(tefTOO_BIG)); + env.close(); + + // Cancel alice's sell offer. Now the token should be burnable. + env(token::cancelOffer(alice, {aliceOfferIndex})); + env.close(); + + env(token::burn(alice, nftokenID)); + env.close(); + + // Burning the token should remove all the offers from the ledger. + for (uint256 const& offerIndex : offerIndexes) + { + BEAST_EXPECT(!env.le(keylet::nftoffer(offerIndex))); + } + + // Both alice and becky should have ownerCounts of zero. + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, becky) == 0); + } + + // Test that up to 499 buy/sell offers will be removed when NFT is + // burned after fixUnburnableNFToken is enabled. This is to test that we + // can successfully remove all offers if the number of offers is less + // than 500. + if (features[fixUnburnableNFToken]) { - Account const acct(std::string("acct") + std::to_string(i)); - env.fund(XRP(1000), acct); + Env env{*this, features}; + + Account const alice("alice"); + Account const becky("becky"); + env.fund(XRP(100000), alice, becky); env.close(); - offerIndexes.push_back(keylet::nftoffer(acct, env.seq(acct)).key); - env(token::createOffer(acct, nftokenID, drops(1)), + // alice creates 498 sell offers and becky creates 1 buy offers. + // When the token is burned, 498 sell offers and 1 buy offer are + // removed. In total, 499 offers are removed + std::vector offerIndexes; + auto const nftokenID = createNftAndOffers( + env, alice, offerIndexes, maxDeletableTokenOfferEntries - 2); + + // Verify all sell offers are present in the ledger. + for (uint256 const& offerIndex : offerIndexes) + { + BEAST_EXPECT(env.le(keylet::nftoffer(offerIndex))); + } + + // Becky creates a buy offer + uint256 const beckyOfferIndex = + keylet::nftoffer(becky, env.seq(becky)).key; + env(token::createOffer(becky, nftokenID, drops(1)), token::owner(alice)); env.close(); + + // Burn the token + env(token::burn(alice, nftokenID)); + env.close(); + + // Burning the token should remove all 498 sell offers + // that alice created + for (uint256 const& offerIndex : offerIndexes) + { + BEAST_EXPECT(!env.le(keylet::nftoffer(offerIndex))); + } + + // Burning the token should also remove the one buy offer + // that becky created + BEAST_EXPECT(!env.le(keylet::nftoffer(beckyOfferIndex))); + + // alice and becky should have ownerCounts of zero + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, becky) == 0); } - // Verify all offers are present in the ledger. - for (uint256 const& offerIndex : offerIndexes) + // Test that up to 500 buy offers are removed when NFT is burned + // after fixUnburnableNFToken is enabled + if (features[fixUnburnableNFToken]) { - BEAST_EXPECT(env.le(keylet::nftoffer(offerIndex))); - } + Env env{*this, features}; - // Create one too many offers. - uint256 const beckyOfferIndex = - keylet::nftoffer(becky, env.seq(becky)).key; - env(token::createOffer(becky, nftokenID, drops(1)), - token::owner(alice)); + Account const alice("alice"); + Account const becky("becky"); + env.fund(XRP(100000), alice, becky); + env.close(); - // Attempt to burn the nft which should fail. - env(token::burn(alice, nftokenID), ter(tefTOO_BIG)); + // alice creates 501 sell offers for the token + // After we burn the token, 500 of the sell offers should be + // removed, and one is left over + std::vector offerIndexes; + auto const nftokenID = createNftAndOffers( + env, alice, offerIndexes, maxDeletableTokenOfferEntries + 1); - // Close enough ledgers that the burn transaction is no longer retried. - for (int i = 0; i < 10; ++i) - env.close(); + // Verify all sell offers are present in the ledger. + for (uint256 const& offerIndex : offerIndexes) + { + BEAST_EXPECT(env.le(keylet::nftoffer(offerIndex))); + } - // Cancel becky's offer, but alice adds a sell offer. The token - // should still not be burnable. - env(token::cancelOffer(becky, {beckyOfferIndex})); - env.close(); + // Burn the token + env(token::burn(alice, nftokenID)); + env.close(); - uint256 const aliceOfferIndex = - keylet::nftoffer(alice, env.seq(alice)).key; - env(token::createOffer(alice, nftokenID, drops(1)), - txflags(tfSellNFToken)); - env.close(); + uint32_t offerDeletedCount = 0; + // Count the number of sell offers that have been deleted + for (uint256 const& offerIndex : offerIndexes) + { + if (!env.le(keylet::nftoffer(offerIndex))) + offerDeletedCount++; + } - env(token::burn(alice, nftokenID), ter(tefTOO_BIG)); - env.close(); + BEAST_EXPECT(offerIndexes.size() == maxTokenOfferCancelCount + 1); - // Cancel alice's sell offer. Now the token should be burnable. - env(token::cancelOffer(alice, {aliceOfferIndex})); - env.close(); + // 500 sell offers should be removed + BEAST_EXPECT(offerDeletedCount == maxTokenOfferCancelCount); - env(token::burn(alice, nftokenID)); - env.close(); + // alice should have ownerCounts of one for the orphaned sell offer + BEAST_EXPECT(ownerCount(env, alice) == 1); + } - // Burning the token should remove all the offers from the ledger. - for (uint256 const& offerIndex : offerIndexes) + // Test that up to 500 buy/sell offers are removed when NFT is burned + // after fixUnburnableNFToken is enabled + if (features[fixUnburnableNFToken]) { - BEAST_EXPECT(!env.le(keylet::nftoffer(offerIndex))); - } + Env env{*this, features}; - // Both alice and becky should have ownerCounts of zero. - BEAST_EXPECT(ownerCount(env, alice) == 0); - BEAST_EXPECT(ownerCount(env, becky) == 0); + Account const alice("alice"); + Account const becky("becky"); + env.fund(XRP(100000), alice, becky); + env.close(); + + // alice creates 499 sell offers and becky creates 2 buy offers. + // When the token is burned, 499 sell offers and 1 buy offer + // are removed. + // In total, 500 offers are removed + std::vector offerIndexes; + auto const nftokenID = createNftAndOffers( + env, alice, offerIndexes, maxDeletableTokenOfferEntries - 1); + + // Verify all sell offers are present in the ledger. + for (uint256 const& offerIndex : offerIndexes) + { + BEAST_EXPECT(env.le(keylet::nftoffer(offerIndex))); + } + + // becky creates 2 buy offers + env(token::createOffer(becky, nftokenID, drops(1)), + token::owner(alice)); + env.close(); + env(token::createOffer(becky, nftokenID, drops(1)), + token::owner(alice)); + env.close(); + + // Burn the token + env(token::burn(alice, nftokenID)); + env.close(); + + // Burning the token should remove all 499 sell offers from the + // ledger. + for (uint256 const& offerIndex : offerIndexes) + { + BEAST_EXPECT(!env.le(keylet::nftoffer(offerIndex))); + } + + // alice should have ownerCount of zero because all her + // sell offers have been deleted + BEAST_EXPECT(ownerCount(env, alice) == 0); + + // becky has ownerCount of one due to an orphaned buy offer + BEAST_EXPECT(ownerCount(env, becky) == 1); + } } void @@ -598,7 +786,8 @@ class NFTokenBurn_test : public beast::unit_test::suite FeatureBitset const all{supported_amendments()}; FeatureBitset const fixNFTDir{fixNFTokenDirV1}; - testWithFeats(all - fixNFTDir); + testWithFeats(all - fixUnburnableNFToken - fixNFTDir); + testWithFeats(all - fixUnburnableNFToken); testWithFeats(all); } }; From 89aa8b21ec14233399513b60c88abd2969f2cba7 Mon Sep 17 00:00:00 2001 From: ledhed2222 Date: Thu, 9 Feb 2023 16:57:51 -0500 Subject: [PATCH 101/508] Fix 3 issues around NFToken offer acceptance (#4380) Fixes 3 issues: In the following scenario, an account cannot perform NFTokenAcceptOffer even though it should be allowed to: - BROKER has < S - ALICE offers to sell token for S - BOB offers to buy token for > S - BROKER tries to bridge the two offers This currently results in `tecINSUFFICIENT_FUNDS`, but should not because BROKER is not spending any funds in this transaction, beyond the transaction fee. When trading an NFT using IOUs, and when the issuer of the IOU has any non-zero value set for TransferFee on their account via AccountSet (not a TransferFee on the NFT), and when the sale amount is equal to the total balance of that IOU that the buyer has, the resulting balance for the issuer of the IOU will become positive. This means that the buyer of the NFT was supposed to have caused a certain amount of IOU to be burned. That amount was unable to be burned because the buyer couldn't cover it. This results in the buyer owing this amount back to the issuer. In a real world scenario, this is appropriate and can be settled off-chain. Currency issuers could not make offers for NFTs using their own currency, receiving `tecINSUFFICIENT_FUNDS` if they tried to do so. With this fix, they are now able to buy/sell NFTs using their own currency. --- src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp | 74 +- src/ripple/app/tx/impl/NFTokenCreateOffer.cpp | 31 +- src/ripple/ledger/View.h | 5 + src/test/app/NFToken_test.cpp | 1650 +++++++++++++---- 4 files changed, 1339 insertions(+), 421 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp index 07fe9957a76..c335f8d28fd 100644 --- a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp @@ -168,10 +168,21 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) dest.has_value() && *dest != ctx.tx[sfAccount]) return tecNO_PERMISSION; } + // The account offering to buy must have funds: + // + // After this amendment, we allow an IOU issuer to buy an NFT with their + // own currency auto const needed = bo->at(sfAmount); - - if (accountHolds( + if (ctx.view.rules().enabled(fixUnburnableNFToken)) + { + if (accountFunds( + ctx.view, (*bo)[sfOwner], needed, fhZERO_IF_FROZEN, ctx.j) < + needed) + return tecINSUFFICIENT_FUNDS; + } + else if ( + accountHolds( ctx.view, (*bo)[sfOwner], needed.getCurrency(), @@ -206,15 +217,39 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) // The account offering to buy must have funds: auto const needed = so->at(sfAmount); - - if (accountHolds( - ctx.view, - ctx.tx[sfAccount], - needed.getCurrency(), - needed.getIssuer(), - fhZERO_IF_FROZEN, - ctx.j) < needed) - return tecINSUFFICIENT_FUNDS; + if (!ctx.view.rules().enabled(fixUnburnableNFToken)) + { + if (accountHolds( + ctx.view, + ctx.tx[sfAccount], + needed.getCurrency(), + needed.getIssuer(), + fhZERO_IF_FROZEN, + ctx.j) < needed) + return tecINSUFFICIENT_FUNDS; + } + else if (!bo) + { + // After this amendment, we allow buyers to buy with their own + // issued currency. + // + // In the case of brokered mode, this check is essentially + // redundant, since we have already confirmed that buy offer is > + // than the sell offer, and that the buyer can cover the buy + // offer. + // + // We also _must not_ check the tx submitter in brokered + // mode, because then we are confirming that the broker can + // cover what the buyer will pay, which doesn't make sense, causes + // an unncessary tec, and is also resolved with this amendment. + if (accountFunds( + ctx.view, + ctx.tx[sfAccount], + needed, + fhZERO_IF_FROZEN, + ctx.j) < needed) + return tecINSUFFICIENT_FUNDS; + } } return tesSUCCESS; @@ -230,7 +265,22 @@ NFTokenAcceptOffer::pay( if (amount < beast::zero) return tecINTERNAL; - return accountSend(view(), from, to, amount, j_); + auto const result = accountSend(view(), from, to, amount, j_); + + // After this amendment, if any payment would cause a non-IOU-issuer to + // have a negative balance, or an IOU-issuer to have a positive balance in + // their own currency, we know that something went wrong. This was + // originally found in the context of IOU transfer fees. Since there are + // several payouts in this tx, just confirm that the end state is OK. + if (!view().rules().enabled(fixUnburnableNFToken)) + return result; + if (result != tesSUCCESS) + return result; + if (accountFunds(view(), from, amount, fhZERO_IF_FROZEN, j_).signum() < 0) + return tecINSUFFICIENT_FUNDS; + if (accountFunds(view(), to, amount, fhZERO_IF_FROZEN, j_).signum() < 0) + return tecINSUFFICIENT_FUNDS; + return tesSUCCESS; } TER diff --git a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp index 695efdd0aa4..ff8668e4488 100644 --- a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp @@ -153,15 +153,28 @@ NFTokenCreateOffer::preclaim(PreclaimContext const& ctx) // offer may later become unfunded. if (!isSellOffer) { - auto const funds = accountHolds( - ctx.view, - ctx.tx[sfAccount], - amount.getCurrency(), - amount.getIssuer(), - FreezeHandling::fhZERO_IF_FROZEN, - ctx.j); - - if (funds.signum() <= 0) + // After this amendment, we allow an IOU issuer to make a buy offer + // using their own currency. + if (ctx.view.rules().enabled(fixUnburnableNFToken)) + { + if (accountFunds( + ctx.view, + ctx.tx[sfAccount], + amount, + FreezeHandling::fhZERO_IF_FROZEN, + ctx.j) + .signum() <= 0) + return tecUNFUNDED_OFFER; + } + else if ( + accountHolds( + ctx.view, + ctx.tx[sfAccount], + amount.getCurrency(), + amount.getIssuer(), + FreezeHandling::fhZERO_IF_FROZEN, + ctx.j) + .signum() <= 0) return tecUNFUNDED_OFFER; } diff --git a/src/ripple/ledger/View.h b/src/ripple/ledger/View.h index ee917115515..24a647c768d 100644 --- a/src/ripple/ledger/View.h +++ b/src/ripple/ledger/View.h @@ -97,6 +97,11 @@ accountHolds( FreezeHandling zeroIfFrozen, beast::Journal j); +// Returns the amount an account can spend of the currency type saDefault, or +// returns saDefault if this account is the issuer of the the currency in +// question. Should be used in favor of accountHolds when questioning how much +// an account can spend while also allowing currency issuers to spend +// unlimited amounts of their own currency (since they can always issue more). [[nodiscard]] STAmount accountFunds( ReadView const& view, diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 842f3f76cc8..33d725e5a17 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -3827,497 +3827,586 @@ class NFToken_test : public beast::unit_test::suite using namespace test::jtx; - Env env{*this, features}; - - // The most important thing to explore here is the way funds are - // assigned from the buyer to... - // o the Seller, - // o the Broker, and - // o the Issuer (in the case of a transfer fee). - - Account const issuer{"issuer"}; - Account const minter{"minter"}; - Account const buyer{"buyer"}; - Account const broker{"broker"}; - Account const gw{"gw"}; - IOU const gwXAU(gw["XAU"]); - - env.fund(XRP(1000), issuer, minter, buyer, broker, gw); - env.close(); - - env(trust(issuer, gwXAU(2000))); - env(trust(minter, gwXAU(2000))); - env(trust(buyer, gwXAU(2000))); - env(trust(broker, gwXAU(2000))); - env.close(); - - env(token::setMinter(issuer, minter)); - env.close(); - - // Lambda to check owner count of all accounts is one. - auto checkOwnerCountIsOne = - [this, &env]( - std::initializer_list> - accounts, - int line) { - for (Account const& acct : accounts) - { - if (std::uint32_t ownerCount = this->ownerCount(env, acct); - ownerCount != 1) - { - std::stringstream ss; - ss << "Account " << acct.human() - << " expected ownerCount == 1. Got " << ownerCount; - fail(ss.str(), __FILE__, line); - } - } - }; - - // Lambda that mints an NFT and returns the nftID. - auto mintNFT = [&env, &issuer, &minter](std::uint16_t xferFee = 0) { - uint256 const nftID = - token::getNextID(env, issuer, 0, tfTransferable, xferFee); - env(token::mint(minter, 0), - token::issuer(issuer), - token::xferFee(xferFee), - txflags(tfTransferable)); - env.close(); - return nftID; - }; - - // o Seller is selling for zero XRP. - // o Broker charges no fee. - // o No transfer fee. - // - // Since minter is selling for zero the currency must be XRP. + for (auto const& tweakedFeatures : + {features - fixUnburnableNFToken, features | fixUnburnableNFToken}) { - checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + Env env{*this, tweakedFeatures}; - uint256 const nftID = mintNFT(); + // The most important thing to explore here is the way funds are + // assigned from the buyer to... + // o the Seller, + // o the Broker, and + // o the Issuer (in the case of a transfer fee). - // minter creates their offer. - uint256 const minterOfferIndex = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, XRP(0)), - txflags(tfSellNFToken)); - env.close(); + Account const issuer{"issuer"}; + Account const minter{"minter"}; + Account const buyer{"buyer"}; + Account const broker{"broker"}; + Account const gw{"gw"}; + IOU const gwXAU(gw["XAU"]); - // buyer creates their offer. Note: a buy offer can never - // offer zero. - uint256 const buyOfferIndex = - keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, XRP(1)), token::owner(minter)); + env.fund(XRP(1000), issuer, minter, buyer, broker, gw); env.close(); - auto const minterBalance = env.balance(minter); - auto const buyerBalance = env.balance(buyer); - auto const brokerBalance = env.balance(broker); - auto const issuerBalance = env.balance(issuer); - - // Broker charges no brokerFee. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex)); + env(trust(issuer, gwXAU(2000))); + env(trust(minter, gwXAU(2000))); + env(trust(buyer, gwXAU(2000))); + env(trust(broker, gwXAU(2000))); env.close(); - // Note that minter's XRP balance goes up even though they - // requested XRP(0). - BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(1)); - BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); - BEAST_EXPECT(env.balance(broker) == brokerBalance - drops(10)); - BEAST_EXPECT(env.balance(issuer) == issuerBalance); - - // Burn the NFT so the next test starts with a clean state. - env(token::burn(buyer, nftID)); + env(token::setMinter(issuer, minter)); env.close(); - } - // o Seller is selling for zero XRP. - // o Broker charges a fee. - // o No transfer fee. - // - // Since minter is selling for zero the currency must be XRP. - { - checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + // Lambda to check owner count of all accounts is one. + auto checkOwnerCountIsOne = + [this, &env]( + std::initializer_list> + accounts, + int line) { + for (Account const& acct : accounts) + { + if (std::uint32_t ownerCount = + this->ownerCount(env, acct); + ownerCount != 1) + { + std::stringstream ss; + ss << "Account " << acct.human() + << " expected ownerCount == 1. Got " + << ownerCount; + fail(ss.str(), __FILE__, line); + } + } + }; - uint256 const nftID = mintNFT(); + // Lambda that mints an NFT and returns the nftID. + auto mintNFT = [&env, &issuer, &minter](std::uint16_t xferFee = 0) { + uint256 const nftID = + token::getNextID(env, issuer, 0, tfTransferable, xferFee); + env(token::mint(minter, 0), + token::issuer(issuer), + token::xferFee(xferFee), + txflags(tfTransferable)); + env.close(); + return nftID; + }; - // minter creates their offer. - uint256 const minterOfferIndex = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, XRP(0)), - txflags(tfSellNFToken)); - env.close(); + // o Seller is selling for zero XRP. + // o Broker charges no fee. + // o No transfer fee. + // + // Since minter is selling for zero the currency must be XRP. + { + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); - // buyer creates their offer. Note: a buy offer can never - // offer zero. - uint256 const buyOfferIndex = - keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, XRP(1)), token::owner(minter)); - env.close(); + uint256 const nftID = mintNFT(); - // Broker attempts to charge a 1.1 XRP brokerFee and fails. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex), - token::brokerFee(XRP(1.1)), - ter(tecINSUFFICIENT_PAYMENT)); - env.close(); + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, XRP(0)), + txflags(tfSellNFToken)); + env.close(); - auto const minterBalance = env.balance(minter); - auto const buyerBalance = env.balance(buyer); - auto const brokerBalance = env.balance(broker); - auto const issuerBalance = env.balance(issuer); + // buyer creates their offer. Note: a buy offer can never + // offer zero. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, XRP(1)), + token::owner(minter)); + env.close(); - // Broker charges a 0.5 XRP brokerFee. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex), - token::brokerFee(XRP(0.5))); - env.close(); + auto const minterBalance = env.balance(minter); + auto const buyerBalance = env.balance(buyer); + auto const brokerBalance = env.balance(broker); + auto const issuerBalance = env.balance(issuer); - // Note that minter's XRP balance goes up even though they - // requested XRP(0). - BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(0.5)); - BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); - BEAST_EXPECT( - env.balance(broker) == brokerBalance + XRP(0.5) - drops(10)); - BEAST_EXPECT(env.balance(issuer) == issuerBalance); + // Broker charges no brokerFee. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex)); + env.close(); - // Burn the NFT so the next test starts with a clean state. - env(token::burn(buyer, nftID)); - env.close(); - } + // Note that minter's XRP balance goes up even though they + // requested XRP(0). + BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(1)); + BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); + BEAST_EXPECT(env.balance(broker) == brokerBalance - drops(10)); + BEAST_EXPECT(env.balance(issuer) == issuerBalance); - // o Seller is selling for zero XRP. - // o Broker charges no fee. - // o 50% transfer fee. - // - // Since minter is selling for zero the currency must be XRP. - { - checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); + env.close(); + } - uint256 const nftID = mintNFT(maxTransferFee); + // o Seller is selling for zero XRP. + // o Broker charges a fee. + // o No transfer fee. + // + // Since minter is selling for zero the currency must be XRP. + { + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); - // minter creates their offer. - uint256 const minterOfferIndex = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, XRP(0)), - txflags(tfSellNFToken)); - env.close(); + uint256 const nftID = mintNFT(); - // buyer creates their offer. Note: a buy offer can never - // offer zero. - uint256 const buyOfferIndex = - keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, XRP(1)), token::owner(minter)); - env.close(); + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, XRP(0)), + txflags(tfSellNFToken)); + env.close(); - auto const minterBalance = env.balance(minter); - auto const buyerBalance = env.balance(buyer); - auto const brokerBalance = env.balance(broker); - auto const issuerBalance = env.balance(issuer); + // buyer creates their offer. Note: a buy offer can never + // offer zero. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, XRP(1)), + token::owner(minter)); + env.close(); - // Broker charges no brokerFee. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex)); - env.close(); + // Broker attempts to charge a 1.1 XRP brokerFee and fails. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + token::brokerFee(XRP(1.1)), + ter(tecINSUFFICIENT_PAYMENT)); + env.close(); - // Note that minter's XRP balance goes up even though they - // requested XRP(0). - BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(0.5)); - BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); - BEAST_EXPECT(env.balance(broker) == brokerBalance - drops(10)); - BEAST_EXPECT(env.balance(issuer) == issuerBalance + XRP(0.5)); + auto const minterBalance = env.balance(minter); + auto const buyerBalance = env.balance(buyer); + auto const brokerBalance = env.balance(broker); + auto const issuerBalance = env.balance(issuer); - // Burn the NFT so the next test starts with a clean state. - env(token::burn(buyer, nftID)); - env.close(); - } + // Broker charges a 0.5 XRP brokerFee. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + token::brokerFee(XRP(0.5))); + env.close(); - // o Seller is selling for zero XRP. - // o Broker charges 0.5 XRP. - // o 50% transfer fee. - // - // Since minter is selling for zero the currency must be XRP. - { - checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + // Note that minter's XRP balance goes up even though they + // requested XRP(0). + BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(0.5)); + BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); + BEAST_EXPECT( + env.balance(broker) == + brokerBalance + XRP(0.5) - drops(10)); + BEAST_EXPECT(env.balance(issuer) == issuerBalance); + + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); + env.close(); + } - uint256 const nftID = mintNFT(maxTransferFee); + // o Seller is selling for zero XRP. + // o Broker charges no fee. + // o 50% transfer fee. + // + // Since minter is selling for zero the currency must be XRP. + { + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); - // minter creates their offer. - uint256 const minterOfferIndex = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, XRP(0)), - txflags(tfSellNFToken)); - env.close(); + uint256 const nftID = mintNFT(maxTransferFee); - // buyer creates their offer. Note: a buy offer can never - // offer zero. - uint256 const buyOfferIndex = - keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, XRP(1)), token::owner(minter)); - env.close(); + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, XRP(0)), + txflags(tfSellNFToken)); + env.close(); - auto const minterBalance = env.balance(minter); - auto const buyerBalance = env.balance(buyer); - auto const brokerBalance = env.balance(broker); - auto const issuerBalance = env.balance(issuer); + // buyer creates their offer. Note: a buy offer can never + // offer zero. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, XRP(1)), + token::owner(minter)); + env.close(); - // Broker charges a 0.75 XRP brokerFee. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex), - token::brokerFee(XRP(0.75))); - env.close(); + auto const minterBalance = env.balance(minter); + auto const buyerBalance = env.balance(buyer); + auto const brokerBalance = env.balance(broker); + auto const issuerBalance = env.balance(issuer); - // Note that, with a 50% transfer fee, issuer gets 1/2 of what's - // left _after_ broker takes their fee. minter gets the remainder - // after both broker and minter take their cuts - BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(0.125)); - BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); - BEAST_EXPECT( - env.balance(broker) == brokerBalance + XRP(0.75) - drops(10)); - BEAST_EXPECT(env.balance(issuer) == issuerBalance + XRP(0.125)); + // Broker charges no brokerFee. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex)); + env.close(); - // Burn the NFT so the next test starts with a clean state. - env(token::burn(buyer, nftID)); - env.close(); - } + // Note that minter's XRP balance goes up even though they + // requested XRP(0). + BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(0.5)); + BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); + BEAST_EXPECT(env.balance(broker) == brokerBalance - drops(10)); + BEAST_EXPECT(env.balance(issuer) == issuerBalance + XRP(0.5)); - // Lambda to set the balance of all passed in accounts to gwXAU(1000). - auto setXAUBalance_1000 = - [this, &gw, &gwXAU, &env]( - std::initializer_list> - accounts, - int line) { - for (Account const& acct : accounts) - { - static const auto xau1000 = gwXAU(1000); - auto const balance = env.balance(acct, gwXAU); - if (balance < xau1000) - { - env(pay(gw, acct, xau1000 - balance)); - env.close(); - } - else if (balance > xau1000) - { - env(pay(acct, gw, balance - xau1000)); - env.close(); - } - if (env.balance(acct, gwXAU) != xau1000) - { - std::stringstream ss; - ss << "Unable to set " << acct.human() - << " account balance to gwXAU(1000)"; - this->fail(ss.str(), __FILE__, line); - } - } - }; + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); + env.close(); + } - // The buyer and seller have identical amounts and there is no - // transfer fee. - { - checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); - setXAUBalance_1000({issuer, minter, buyer, broker}, __LINE__); + // o Seller is selling for zero XRP. + // o Broker charges 0.5 XRP. + // o 50% transfer fee. + // + // Since minter is selling for zero the currency must be XRP. + { + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); - uint256 const nftID = mintNFT(); + uint256 const nftID = mintNFT(maxTransferFee); - // minter creates their offer. - uint256 const minterOfferIndex = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, gwXAU(1000)), - txflags(tfSellNFToken)); - env.close(); + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, XRP(0)), + txflags(tfSellNFToken)); + env.close(); - { - // buyer creates an offer for more XAU than they currently own. + // buyer creates their offer. Note: a buy offer can never + // offer zero. uint256 const buyOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, gwXAU(1001)), + env(token::createOffer(buyer, nftID, XRP(1)), token::owner(minter)); env.close(); - // broker attempts to broker the offers but cannot. + auto const minterBalance = env.balance(minter); + auto const buyerBalance = env.balance(buyer); + auto const brokerBalance = env.balance(broker); + auto const issuerBalance = env.balance(issuer); + + // Broker charges a 0.75 XRP brokerFee. env(token::brokerOffers( broker, buyOfferIndex, minterOfferIndex), - ter(tecINSUFFICIENT_FUNDS)); + token::brokerFee(XRP(0.75))); env.close(); - // Cancel buyer's bad offer so the next test starts in a - // clean state. - env(token::cancelOffer(buyer, {buyOfferIndex})); + // Note that, with a 50% transfer fee, issuer gets 1/2 of what's + // left _after_ broker takes their fee. minter gets the + // remainder after both broker and minter take their cuts + BEAST_EXPECT(env.balance(minter) == minterBalance + XRP(0.125)); + BEAST_EXPECT(env.balance(buyer) == buyerBalance - XRP(1)); + BEAST_EXPECT( + env.balance(broker) == + brokerBalance + XRP(0.75) - drops(10)); + BEAST_EXPECT(env.balance(issuer) == issuerBalance + XRP(0.125)); + + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); env.close(); } + + // Lambda to set the balance of all passed in accounts to + // gwXAU(amount). + auto setXAUBalance = + [this, &gw, &gwXAU, &env]( + std::initializer_list> + accounts, + int amount, + int line) { + for (Account const& acct : accounts) + { + auto const xauAmt = gwXAU(amount); + auto const balance = env.balance(acct, gwXAU); + if (balance < xauAmt) + { + env(pay(gw, acct, xauAmt - balance)); + env.close(); + } + else if (balance > xauAmt) + { + env(pay(acct, gw, balance - xauAmt)); + env.close(); + } + if (env.balance(acct, gwXAU) != xauAmt) + { + std::stringstream ss; + ss << "Unable to set " << acct.human() + << " account balance to gwXAU(" << amount << ")"; + this->fail(ss.str(), __FILE__, line); + } + } + }; + + // The buyer and seller have identical amounts and there is no + // transfer fee. { - // buyer creates an offer for less that what minter is asking. + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + setXAUBalance({issuer, minter, buyer, broker}, 1000, __LINE__); + + uint256 const nftID = mintNFT(); + + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, gwXAU(1000)), + txflags(tfSellNFToken)); + env.close(); + + { + // buyer creates an offer for more XAU than they currently + // own. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, gwXAU(1001)), + token::owner(minter)); + env.close(); + + // broker attempts to broker the offers but cannot. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + + // Cancel buyer's bad offer so the next test starts in a + // clean state. + env(token::cancelOffer(buyer, {buyOfferIndex})); + env.close(); + } + { + // buyer creates an offer for less that what minter is + // asking. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, gwXAU(999)), + token::owner(minter)); + env.close(); + + // broker attempts to broker the offers but cannot. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + ter(tecINSUFFICIENT_PAYMENT)); + env.close(); + + // Cancel buyer's bad offer so the next test starts in a + // clean state. + env(token::cancelOffer(buyer, {buyOfferIndex})); + env.close(); + } + + // buyer creates a large enough offer. uint256 const buyOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, gwXAU(999)), + env(token::createOffer(buyer, nftID, gwXAU(1000)), token::owner(minter)); env.close(); - // broker attempts to broker the offers but cannot. + // Broker attempts to charge a brokerFee but cannot. env(token::brokerOffers( broker, buyOfferIndex, minterOfferIndex), + token::brokerFee(gwXAU(0.1)), ter(tecINSUFFICIENT_PAYMENT)); env.close(); - // Cancel buyer's bad offer so the next test starts in a - // clean state. - env(token::cancelOffer(buyer, {buyOfferIndex})); + // broker charges no brokerFee and succeeds. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex)); + env.close(); + + BEAST_EXPECT(ownerCount(env, issuer) == 1); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 2); + BEAST_EXPECT(ownerCount(env, broker) == 1); + BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(2000)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(1000)); + + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); env.close(); } - // buyer creates a large enough offer. - uint256 const buyOfferIndex = - keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, gwXAU(1000)), - token::owner(minter)); - env.close(); + // seller offers more than buyer is asking. + // There are both transfer and broker fees. + { + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + setXAUBalance({issuer, minter, buyer, broker}, 1000, __LINE__); - // Broker attempts to charge a brokerFee but cannot. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex), - token::brokerFee(gwXAU(0.1)), - ter(tecINSUFFICIENT_PAYMENT)); - env.close(); + uint256 const nftID = mintNFT(maxTransferFee); - // broker charges no brokerFee and succeeds. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex)); - env.close(); - - BEAST_EXPECT(ownerCount(env, issuer) == 1); - BEAST_EXPECT(ownerCount(env, minter) == 1); - BEAST_EXPECT(ownerCount(env, buyer) == 2); - BEAST_EXPECT(ownerCount(env, broker) == 1); - BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1000)); - BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(2000)); - BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); - BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(1000)); + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, gwXAU(900)), + txflags(tfSellNFToken)); + env.close(); + { + // buyer creates an offer for more XAU than they currently + // own. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, gwXAU(1001)), + token::owner(minter)); + env.close(); - // Burn the NFT so the next test starts with a clean state. - env(token::burn(buyer, nftID)); - env.close(); - } + // broker attempts to broker the offers but cannot. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); - // seller offers more than buyer is asking. - // There are both transfer and broker fees. - { - checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); - setXAUBalance_1000({issuer, minter, buyer, broker}, __LINE__); + // Cancel buyer's bad offer so the next test starts in a + // clean state. + env(token::cancelOffer(buyer, {buyOfferIndex})); + env.close(); + } + { + // buyer creates an offer for less that what minter is + // asking. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, gwXAU(899)), + token::owner(minter)); + env.close(); - uint256 const nftID = mintNFT(maxTransferFee); + // broker attempts to broker the offers but cannot. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + ter(tecINSUFFICIENT_PAYMENT)); + env.close(); - // minter creates their offer. - uint256 const minterOfferIndex = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, gwXAU(900)), - txflags(tfSellNFToken)); - env.close(); - { - // buyer creates an offer for more XAU than they currently own. + // Cancel buyer's bad offer so the next test starts in a + // clean state. + env(token::cancelOffer(buyer, {buyOfferIndex})); + env.close(); + } + // buyer creates a large enough offer. uint256 const buyOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, gwXAU(1001)), + env(token::createOffer(buyer, nftID, gwXAU(1000)), token::owner(minter)); env.close(); - // broker attempts to broker the offers but cannot. + // Broker attempts to charge a brokerFee larger than the + // difference between the two offers but cannot. + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + token::brokerFee(gwXAU(101)), + ter(tecINSUFFICIENT_PAYMENT)); + env.close(); + + // broker charges the full difference between the two offers and + // succeeds. env(token::brokerOffers( broker, buyOfferIndex, minterOfferIndex), - ter(tecINSUFFICIENT_FUNDS)); + token::brokerFee(gwXAU(100))); env.close(); - // Cancel buyer's bad offer so the next test starts in a - // clean state. - env(token::cancelOffer(buyer, {buyOfferIndex})); + BEAST_EXPECT(ownerCount(env, issuer) == 1); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 2); + BEAST_EXPECT(ownerCount(env, broker) == 1); + BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1450)); + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1450)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(1100)); + + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); env.close(); } + // seller offers more than buyer is asking. + // There are both transfer and broker fees, but broker takes less + // than the maximum. { - // buyer creates an offer for less that what minter is asking. + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + setXAUBalance({issuer, minter, buyer, broker}, 1000, __LINE__); + + uint256 const nftID = mintNFT(maxTransferFee / 2); // 25% + + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, gwXAU(900)), + txflags(tfSellNFToken)); + env.close(); + + // buyer creates a large enough offer. uint256 const buyOfferIndex = keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, gwXAU(899)), + env(token::createOffer(buyer, nftID, gwXAU(1000)), token::owner(minter)); env.close(); - // broker attempts to broker the offers but cannot. + // broker charges half difference between the two offers and + // succeeds. 25% of the remaining difference goes to issuer. + // The rest goes to minter. env(token::brokerOffers( broker, buyOfferIndex, minterOfferIndex), - ter(tecINSUFFICIENT_PAYMENT)); + token::brokerFee(gwXAU(50))); env.close(); - // Cancel buyer's bad offer so the next test starts in a - // clean state. - env(token::cancelOffer(buyer, {buyOfferIndex})); + BEAST_EXPECT(ownerCount(env, issuer) == 1); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 2); + BEAST_EXPECT(ownerCount(env, broker) == 1); + BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1237.5)); + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1712.5)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(1050)); + + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); env.close(); } - // buyer creates a large enough offer. - uint256 const buyOfferIndex = - keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, gwXAU(1000)), - token::owner(minter)); - env.close(); - - // Broker attempts to charge a brokerFee larger than the - // difference between the two offers but cannot. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex), - token::brokerFee(gwXAU(101)), - ter(tecINSUFFICIENT_PAYMENT)); - env.close(); - - // broker charges the full difference between the two offers and - // succeeds. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex), - token::brokerFee(gwXAU(100))); - env.close(); - - BEAST_EXPECT(ownerCount(env, issuer) == 1); - BEAST_EXPECT(ownerCount(env, minter) == 1); - BEAST_EXPECT(ownerCount(env, buyer) == 2); - BEAST_EXPECT(ownerCount(env, broker) == 1); - BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1450)); - BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1450)); - BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); - BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(1100)); - - // Burn the NFT so the next test starts with a clean state. - env(token::burn(buyer, nftID)); - env.close(); - } - // seller offers more than buyer is asking. - // There are both transfer and broker fees, but broker takes less than - // the maximum. - { - checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); - setXAUBalance_1000({issuer, minter, buyer, broker}, __LINE__); - - uint256 const nftID = mintNFT(maxTransferFee / 2); // 25% - - // minter creates their offer. - uint256 const minterOfferIndex = - keylet::nftoffer(minter, env.seq(minter)).key; - env(token::createOffer(minter, nftID, gwXAU(900)), - txflags(tfSellNFToken)); - env.close(); - - // buyer creates a large enough offer. - uint256 const buyOfferIndex = - keylet::nftoffer(buyer, env.seq(buyer)).key; - env(token::createOffer(buyer, nftID, gwXAU(1000)), - token::owner(minter)); - env.close(); - - // broker charges half difference between the two offers and - // succeeds. 25% of the remaining difference goes to issuer. - // The rest goes to minter. - env(token::brokerOffers(broker, buyOfferIndex, minterOfferIndex), - token::brokerFee(gwXAU(50))); - env.close(); + // Broker has a balance less than the seller offer + { + checkOwnerCountIsOne({issuer, minter, buyer, broker}, __LINE__); + setXAUBalance({issuer, minter, buyer}, 1000, __LINE__); + setXAUBalance({broker}, 500, __LINE__); + uint256 const nftID = mintNFT(maxTransferFee / 2); // 25% + + // minter creates their offer. + uint256 const minterOfferIndex = + keylet::nftoffer(minter, env.seq(minter)).key; + env(token::createOffer(minter, nftID, gwXAU(900)), + txflags(tfSellNFToken)); + env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 1); - BEAST_EXPECT(ownerCount(env, minter) == 1); - BEAST_EXPECT(ownerCount(env, buyer) == 2); - BEAST_EXPECT(ownerCount(env, broker) == 1); - BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1237.5)); - BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1712.5)); - BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); - BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(1050)); + // buyer creates a large enough offer. + uint256 const buyOfferIndex = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, gwXAU(1000)), + token::owner(minter)); + env.close(); - // Burn the NFT so the next test starts with a clean state. - env(token::burn(buyer, nftID)); - env.close(); + if (tweakedFeatures[fixUnburnableNFToken]) + { + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + token::brokerFee(gwXAU(50))); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 1); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 2); + BEAST_EXPECT(ownerCount(env, broker) == 1); + BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1237.5)); + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1712.5)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(550)); + + // Burn the NFT so the next test starts with a clean state. + env(token::burn(buyer, nftID)); + env.close(); + } + else + { + env(token::brokerOffers( + broker, buyOfferIndex, minterOfferIndex), + token::brokerFee(gwXAU(50)), + ter(tecINSUFFICIENT_FUNDS)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 1); + BEAST_EXPECT(ownerCount(env, minter) == 3); + BEAST_EXPECT(ownerCount(env, buyer) == 2); + BEAST_EXPECT(ownerCount(env, broker) == 1); + BEAST_EXPECT(env.balance(issuer, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(500)); + + // Burn the NFT so the next test starts with a clean state. + env(token::burn(minter, nftID)); + env.close(); + } + } } } @@ -4823,9 +4912,14 @@ class NFToken_test : public beast::unit_test::suite Account const gw{"gw"}; IOU const gwXAU(gw["XAU"]); - // Test both with and without fixNFTokenNegOffer + // Test both with and without fixNFTokenNegOffer and + // fixUnburnableNFToken. Need to turn off fixUnburnableNFToken as well + // because that amendment came later and addressed the acceptance + // side of this issue. for (auto const& tweakedFeatures : - {features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1, + {features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1 - + fixUnburnableNFToken, + features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1, features | fixNFTokenNegOffer}) { // There was a bug in the initial NFT implementation that @@ -4914,8 +5008,10 @@ class NFToken_test : public beast::unit_test::suite env.close(); } { - // 1. If fixNFTokenNegOffer is NOT enabled get tecSUCCESS. - // 2. If fixNFTokenNegOffer IS enabled get tecOBJECT_NOT_FOUND. + // 1. If fixNFTokenNegOffer is enabled get tecOBJECT_NOT_FOUND + // 2. If it is not enabled, but fixUnburnableNFToken is + // enabled, get tecOBJECT_NOT_FOUND. + // 3. If neither are enabled, get tesSUCCESS. TER const offerAcceptTER = tweakedFeatures[fixNFTokenNegOffer] ? static_cast(tecOBJECT_NOT_FOUND) : static_cast(tesSUCCESS); @@ -5047,6 +5143,757 @@ class NFToken_test : public beast::unit_test::suite } } + void + testIOUWithTransferFee(FeatureBitset features) + { + using namespace test::jtx; + + testcase("Payments with IOU transfer fees"); + + for (auto const& tweakedFeatures : + {features - fixUnburnableNFToken, features | fixUnburnableNFToken}) + { + Env env{*this, tweakedFeatures}; + + Account const minter{"minter"}; + Account const secondarySeller{"seller"}; + Account const buyer{"buyer"}; + Account const gw{"gateway"}; + Account const broker{"broker"}; + IOU const gwXAU(gw["XAU"]); + IOU const gwXPB(gw["XPB"]); + + env.fund(XRP(1000), gw, minter, secondarySeller, buyer, broker); + env.close(); + + env(trust(minter, gwXAU(2000))); + env(trust(secondarySeller, gwXAU(2000))); + env(trust(broker, gwXAU(10000))); + env(trust(buyer, gwXAU(2000))); + env(trust(buyer, gwXPB(2000))); + env.close(); + + // The IOU issuer has a 2% transfer rate + env(rate(gw, 1.02)); + env.close(); + + auto expectInitialState = [this, + &env, + &buyer, + &minter, + &secondarySeller, + &broker, + &gw, + &gwXAU, + &gwXPB]() { + // Buyer should have XAU 1000, XPB 0 + // Minter should have XAU 0, XPB 0 + // Secondary seller should have XAU 0, XPB 0 + // Broker should have XAU 5000, XPB 0 + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(buyer, gwXPB) == gwXPB(0)); + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(minter, gwXPB) == gwXPB(0)); + BEAST_EXPECT(env.balance(secondarySeller, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(secondarySeller, gwXPB) == gwXPB(0)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(5000)); + BEAST_EXPECT(env.balance(broker, gwXPB) == gwXPB(0)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(-1000)); + BEAST_EXPECT(env.balance(gw, buyer["XPB"]) == gwXPB(0)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(0)); + BEAST_EXPECT(env.balance(gw, minter["XPB"]) == gwXPB(0)); + BEAST_EXPECT( + env.balance(gw, secondarySeller["XAU"]) == gwXAU(0)); + BEAST_EXPECT( + env.balance(gw, secondarySeller["XPB"]) == gwXPB(0)); + BEAST_EXPECT(env.balance(gw, broker["XAU"]) == gwXAU(-5000)); + BEAST_EXPECT(env.balance(gw, broker["XPB"]) == gwXPB(0)); + }; + + auto reinitializeTrustLineBalances = [&expectInitialState, + &env, + &buyer, + &minter, + &secondarySeller, + &broker, + &gw, + &gwXAU, + &gwXPB]() { + if (auto const difference = + gwXAU(1000) - env.balance(buyer, gwXAU); + difference > gwXAU(0)) + env(pay(gw, buyer, difference)); + if (env.balance(buyer, gwXPB) > gwXPB(0)) + env(pay(buyer, gw, env.balance(buyer, gwXPB))); + if (env.balance(minter, gwXAU) > gwXAU(0)) + env(pay(minter, gw, env.balance(minter, gwXAU))); + if (env.balance(minter, gwXPB) > gwXPB(0)) + env(pay(minter, gw, env.balance(minter, gwXPB))); + if (env.balance(secondarySeller, gwXAU) > gwXAU(0)) + env( + pay(secondarySeller, + gw, + env.balance(secondarySeller, gwXAU))); + if (env.balance(secondarySeller, gwXPB) > gwXPB(0)) + env( + pay(secondarySeller, + gw, + env.balance(secondarySeller, gwXPB))); + auto brokerDiff = gwXAU(5000) - env.balance(broker, gwXAU); + if (brokerDiff > gwXAU(0)) + env(pay(gw, broker, brokerDiff)); + else if (brokerDiff < gwXAU(0)) + { + brokerDiff.negate(); + env(pay(broker, gw, brokerDiff)); + } + if (env.balance(broker, gwXPB) > gwXPB(0)) + env(pay(broker, gw, env.balance(broker, gwXPB))); + env.close(); + expectInitialState(); + }; + + auto mintNFT = [&env](Account const& minter, int transferFee = 0) { + uint256 const nftID = token::getNextID( + env, minter, 0, tfTransferable, transferFee); + env(token::mint(minter), + token::xferFee(transferFee), + txflags(tfTransferable)); + env.close(); + return nftID; + }; + + auto createBuyOffer = + [&env]( + Account const& offerer, + Account const& owner, + uint256 const& nftID, + STAmount const& amount, + std::optional const terCode = {}) { + uint256 const offerID = + keylet::nftoffer(offerer, env.seq(offerer)).key; + env(token::createOffer(offerer, nftID, amount), + token::owner(owner), + terCode ? ter(*terCode) + : ter(static_cast(tesSUCCESS))); + env.close(); + return offerID; + }; + + auto createSellOffer = + [&env]( + Account const& offerer, + uint256 const& nftID, + STAmount const& amount, + std::optional const terCode = {}) { + uint256 const offerID = + keylet::nftoffer(offerer, env.seq(offerer)).key; + env(token::createOffer(offerer, nftID, amount), + txflags(tfSellNFToken), + terCode ? ter(*terCode) + : ter(static_cast(tesSUCCESS))); + env.close(); + return offerID; + }; + + { + // Buyer attempts to send 100% of their balance of an IOU + // (sellside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = + createSellOffer(minter, nftID, gwXAU(1000)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tecINSUFFICIENT_FUNDS) + : static_cast(tesSUCCESS); + env(token::acceptSellOffer(buyer, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + expectInitialState(); + else + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(-20)); + BEAST_EXPECT( + env.balance(gw, minter["XAU"]) == gwXAU(-1000)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(20)); + } + } + { + // Buyer attempts to send 100% of their balance of an IOU + // (buyside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = + createBuyOffer(buyer, minter, nftID, gwXAU(1000)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tecINSUFFICIENT_FUNDS) + : static_cast(tesSUCCESS); + env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + expectInitialState(); + else + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(-20)); + BEAST_EXPECT( + env.balance(gw, minter["XAU"]) == gwXAU(-1000)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(20)); + } + } + { + // Buyer attempts to send an amount less than 100% of their + // balance of an IOU, but such that the addition of the transfer + // fee would be greater than the buyer's balance (sellside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = createSellOffer(minter, nftID, gwXAU(995)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tecINSUFFICIENT_FUNDS) + : static_cast(tesSUCCESS); + env(token::acceptSellOffer(buyer, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + expectInitialState(); + else + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(995)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(-14.9)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-995)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(14.9)); + } + } + { + // Buyer attempts to send an amount less than 100% of their + // balance of an IOU, but such that the addition of the transfer + // fee would be greater than the buyer's balance (buyside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = + createBuyOffer(buyer, minter, nftID, gwXAU(995)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tecINSUFFICIENT_FUNDS) + : static_cast(tesSUCCESS); + env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + expectInitialState(); + else + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(995)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(-14.9)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-995)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(14.9)); + } + } + { + // Buyer attempts to send an amount less than 100% of their + // balance of an IOU with a transfer fee, and such that the + // addition of the transfer fee is still less than their balance + // (sellside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = createSellOffer(minter, nftID, gwXAU(900)); + env(token::acceptSellOffer(buyer, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(900)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(82)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-900)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(-82)); + } + { + // Buyer attempts to send an amount less than 100% of their + // balance of an IOU with a transfer fee, and such that the + // addition of the transfer fee is still less than their balance + // (buyside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = + createBuyOffer(buyer, minter, nftID, gwXAU(900)); + env(token::acceptBuyOffer(minter, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(900)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(82)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-900)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(-82)); + } + { + // Buyer attempts to send an amount less than 100% of their + // balance of an IOU with a transfer fee, and such that the + // addition of the transfer fee is equal than their balance + // (sellside) + reinitializeTrustLineBalances(); + + // pay them an additional XAU 20 to cover transfer rate + env(pay(gw, buyer, gwXAU(20))); + env.close(); + + auto const nftID = mintNFT(minter); + auto const offerID = + createSellOffer(minter, nftID, gwXAU(1000)); + env(token::acceptSellOffer(buyer, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-1000)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(0)); + } + { + // Buyer attempts to send an amount less than 100% of their + // balance of an IOU with a transfer fee, and such that the + // addition of the transfer fee is equal than their balance + // (buyside) + reinitializeTrustLineBalances(); + + // pay them an additional XAU 20 to cover transfer rate + env(pay(gw, buyer, gwXAU(20))); + env.close(); + + auto const nftID = mintNFT(minter); + auto const offerID = + createBuyOffer(buyer, minter, nftID, gwXAU(1000)); + env(token::acceptBuyOffer(minter, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-1000)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(0)); + } + { + // Gateway attempts to buy NFT with their own IOU - no + // transfer fee is calculated here (sellside) + reinitializeTrustLineBalances(); + + auto const nftID = mintNFT(minter); + auto const offerID = + createSellOffer(minter, nftID, gwXAU(1000)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tesSUCCESS) + : static_cast(tecINSUFFICIENT_FUNDS); + env(token::acceptSellOffer(gw, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); + BEAST_EXPECT( + env.balance(gw, minter["XAU"]) == gwXAU(-1000)); + } + else + expectInitialState(); + } + { + // Gateway attempts to buy NFT with their own IOU - no + // transfer fee is calculated here (buyside) + reinitializeTrustLineBalances(); + + auto const nftID = mintNFT(minter); + auto const offerTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tesSUCCESS) + : static_cast(tecUNFUNDED_OFFER); + auto const offerID = + createBuyOffer(gw, minter, nftID, gwXAU(1000), {offerTER}); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tesSUCCESS) + : static_cast(tecOBJECT_NOT_FOUND); + env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); + BEAST_EXPECT( + env.balance(gw, minter["XAU"]) == gwXAU(-1000)); + } + else + expectInitialState(); + } + { + // Gateway attempts to buy NFT with their own IOU for more + // than minter trusts (sellside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = + createSellOffer(minter, nftID, gwXAU(5000)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tesSUCCESS) + : static_cast(tecINSUFFICIENT_FUNDS); + env(token::acceptSellOffer(gw, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(5000)); + BEAST_EXPECT( + env.balance(gw, minter["XAU"]) == gwXAU(-5000)); + } + else + expectInitialState(); + } + { + // Gateway attempts to buy NFT with their own IOU for more + // than minter trusts (buyside) + reinitializeTrustLineBalances(); + + auto const nftID = mintNFT(minter); + auto const offerTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tesSUCCESS) + : static_cast(tecUNFUNDED_OFFER); + auto const offerID = + createBuyOffer(gw, minter, nftID, gwXAU(5000), {offerTER}); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tesSUCCESS) + : static_cast(tecOBJECT_NOT_FOUND); + env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(5000)); + BEAST_EXPECT( + env.balance(gw, minter["XAU"]) == gwXAU(-5000)); + } + else + expectInitialState(); + } + { + // Gateway is the NFT minter and attempts to sell NFT for an + // amount that would be greater than a balance if there were a + // transfer fee calculated in this transaction. (sellside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(gw); + auto const offerID = createSellOffer(gw, nftID, gwXAU(1000)); + env(token::acceptSellOffer(buyer, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(0)); + } + { + // Gateway is the NFT minter and attempts to sell NFT for an + // amount that would be greater than a balance if there were a + // transfer fee calculated in this transaction. (buyside) + reinitializeTrustLineBalances(); + + auto const nftID = mintNFT(gw); + auto const offerID = + createBuyOffer(buyer, gw, nftID, gwXAU(1000)); + env(token::acceptBuyOffer(gw, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(0)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(0)); + } + { + // Gateway is the NFT minter and attempts to sell NFT for an + // amount that is greater than a balance before transfer fees. + // (sellside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(gw); + auto const offerID = createSellOffer(gw, nftID, gwXAU(2000)); + env(token::acceptSellOffer(buyer, offerID), + ter(static_cast(tecINSUFFICIENT_FUNDS))); + env.close(); + expectInitialState(); + } + { + // Gateway is the NFT minter and attempts to sell NFT for an + // amount that is greater than a balance before transfer fees. + // (buyside) + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(gw); + auto const offerID = + createBuyOffer(buyer, gw, nftID, gwXAU(2000)); + env(token::acceptBuyOffer(gw, offerID), + ter(static_cast(tecINSUFFICIENT_FUNDS))); + env.close(); + expectInitialState(); + } + { + // Minter attempts to sell the token for XPB 10, which they + // have no trust line for and buyer has none of (sellside). + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = createSellOffer(minter, nftID, gwXPB(10)); + env(token::acceptSellOffer(buyer, offerID), + ter(static_cast(tecINSUFFICIENT_FUNDS))); + env.close(); + expectInitialState(); + } + { + // Minter attempts to sell the token for XPB 10, which they + // have no trust line for and buyer has none of (buyside). + reinitializeTrustLineBalances(); + auto const nftID = mintNFT(minter); + auto const offerID = createBuyOffer( + buyer, + minter, + nftID, + gwXPB(10), + {static_cast(tecUNFUNDED_OFFER)}); + env(token::acceptBuyOffer(minter, offerID), + ter(static_cast(tecOBJECT_NOT_FOUND))); + env.close(); + expectInitialState(); + } + { + // Minter attempts to sell the token for XPB 10 and the buyer + // has it but the minter has no trust line. Trust line is + // created as a result of the tx (sellside). + reinitializeTrustLineBalances(); + env(pay(gw, buyer, gwXPB(100))); + env.close(); + + auto const nftID = mintNFT(minter); + auto const offerID = createSellOffer(minter, nftID, gwXPB(10)); + env(token::acceptSellOffer(buyer, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXPB) == gwXPB(10)); + BEAST_EXPECT(env.balance(buyer, gwXPB) == gwXPB(89.8)); + BEAST_EXPECT(env.balance(gw, minter["XPB"]) == gwXPB(-10)); + BEAST_EXPECT(env.balance(gw, buyer["XPB"]) == gwXPB(-89.8)); + } + { + // Minter attempts to sell the token for XPB 10 and the buyer + // has it but the minter has no trust line. Trust line is + // created as a result of the tx (buyside). + reinitializeTrustLineBalances(); + env(pay(gw, buyer, gwXPB(100))); + env.close(); + + auto const nftID = mintNFT(minter); + auto const offerID = + createBuyOffer(buyer, minter, nftID, gwXPB(10)); + env(token::acceptBuyOffer(minter, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXPB) == gwXPB(10)); + BEAST_EXPECT(env.balance(buyer, gwXPB) == gwXPB(89.8)); + BEAST_EXPECT(env.balance(gw, minter["XPB"]) == gwXPB(-10)); + BEAST_EXPECT(env.balance(gw, buyer["XPB"]) == gwXPB(-89.8)); + } + { + // There is a transfer fee on the NFT and buyer has exact + // amount (sellside) + reinitializeTrustLineBalances(); + + // secondarySeller has to sell it because transfer fees only + // happen on secondary sales + auto const nftID = mintNFT(minter, 3000); // 3% + auto const primaryOfferID = + createSellOffer(minter, nftID, XRP(0)); + env(token::acceptSellOffer(secondarySeller, primaryOfferID)); + env.close(); + + // now we can do a secondary sale + auto const offerID = + createSellOffer(secondarySeller, nftID, gwXAU(1000)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tecINSUFFICIENT_FUNDS) + : static_cast(tesSUCCESS); + env(token::acceptSellOffer(buyer, offerID), ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + expectInitialState(); + else + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(30)); + BEAST_EXPECT( + env.balance(secondarySeller, gwXAU) == gwXAU(970)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(-20)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-30)); + BEAST_EXPECT( + env.balance(gw, secondarySeller["XAU"]) == gwXAU(-970)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(20)); + } + } + { + // There is a transfer fee on the NFT and buyer has exact + // amount (buyside) + reinitializeTrustLineBalances(); + + // secondarySeller has to sell it because transfer fees only + // happen on secondary sales + auto const nftID = mintNFT(minter, 3000); // 3% + auto const primaryOfferID = + createSellOffer(minter, nftID, XRP(0)); + env(token::acceptSellOffer(secondarySeller, primaryOfferID)); + env.close(); + + // now we can do a secondary sale + auto const offerID = + createBuyOffer(buyer, secondarySeller, nftID, gwXAU(1000)); + auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + ? static_cast(tecINSUFFICIENT_FUNDS) + : static_cast(tesSUCCESS); + env(token::acceptBuyOffer(secondarySeller, offerID), + ter(sellTER)); + env.close(); + + if (tweakedFeatures[fixUnburnableNFToken]) + expectInitialState(); + else + { + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(30)); + BEAST_EXPECT( + env.balance(secondarySeller, gwXAU) == gwXAU(970)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(-20)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-30)); + BEAST_EXPECT( + env.balance(gw, secondarySeller["XAU"]) == gwXAU(-970)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(20)); + } + } + { + // There is a transfer fee on the NFT and buyer has enough + // (sellside) + reinitializeTrustLineBalances(); + + // secondarySeller has to sell it because transfer fees only + // happen on secondary sales + auto const nftID = mintNFT(minter, 3000); // 3% + auto const primaryOfferID = + createSellOffer(minter, nftID, XRP(0)); + env(token::acceptSellOffer(secondarySeller, primaryOfferID)); + env.close(); + + // now we can do a secondary sale + auto const offerID = + createSellOffer(secondarySeller, nftID, gwXAU(900)); + env(token::acceptSellOffer(buyer, offerID)); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(27)); + BEAST_EXPECT(env.balance(secondarySeller, gwXAU) == gwXAU(873)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(82)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-27)); + BEAST_EXPECT( + env.balance(gw, secondarySeller["XAU"]) == gwXAU(-873)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(-82)); + } + { + // There is a transfer fee on the NFT and buyer has enough + // (buyside) + reinitializeTrustLineBalances(); + + // secondarySeller has to sell it because transfer fees only + // happen on secondary sales + auto const nftID = mintNFT(minter, 3000); // 3% + auto const primaryOfferID = + createSellOffer(minter, nftID, XRP(0)); + env(token::acceptSellOffer(secondarySeller, primaryOfferID)); + env.close(); + + // now we can do a secondary sale + auto const offerID = + createBuyOffer(buyer, secondarySeller, nftID, gwXAU(900)); + env(token::acceptBuyOffer(secondarySeller, offerID)); + env.close(); + + // receives 3% of 900 - 27 + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(27)); + // receives 97% of 900 - 873 + BEAST_EXPECT(env.balance(secondarySeller, gwXAU) == gwXAU(873)); + // pays 900 plus 2% transfer fee on XAU - 918 + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(82)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-27)); + BEAST_EXPECT( + env.balance(gw, secondarySeller["XAU"]) == gwXAU(-873)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(-82)); + } + { + // There is a broker fee on the NFT. XAU transfer fee is only + // calculated from the buyer's output, not deducted from + // broker fee. + // + // For a payment of 500 with a 2% IOU transfee fee and 100 + // broker fee: + // + // A) Total sale amount + IOU transfer fee is paid by buyer + // (Buyer pays (1.02 * 500) = 510) + // B) GW receives the additional IOU transfer fee + // (GW receives 10 from buyer calculated above) + // C) Broker receives broker fee (no IOU transfer fee) + // (Broker receives 100 from buyer) + // D) Seller receives balance (no IOU transfer fee) + // (Seller receives (510 - 10 - 100) = 400) + reinitializeTrustLineBalances(); + + auto const nftID = mintNFT(minter); + auto const sellOffer = + createSellOffer(minter, nftID, gwXAU(300)); + auto const buyOffer = + createBuyOffer(buyer, minter, nftID, gwXAU(500)); + env(token::brokerOffers(broker, buyOffer, sellOffer), + token::brokerFee(gwXAU(100))); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(400)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(490)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(5100)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-400)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(-490)); + BEAST_EXPECT(env.balance(gw, broker["XAU"]) == gwXAU(-5100)); + } + { + // There is broker and transfer fee on the NFT + // + // For a payment of 500 with a 2% IOU transfer fee, 3% NFT + // transfer fee, and 100 broker fee: + // + // A) Total sale amount + IOU transfer fee is paid by buyer + // (Buyer pays (1.02 * 500) = 510) + // B) GW receives the additional IOU transfer fee + // (GW receives 10 from buyer calculated above) + // C) Broker receives broker fee (no IOU transfer fee) + // (Broker receives 100 from buyer) + // D) Minter receives transfer fee (no IOU transfer fee) + // (Minter receives 0.03 * (510 - 10 - 100) = 12) + // E) Seller receives balance (no IOU transfer fee) + // (Seller receives (510 - 10 - 100 - 12) = 388) + reinitializeTrustLineBalances(); + + // secondarySeller has to sell it because transfer fees only + // happen on secondary sales + auto const nftID = mintNFT(minter, 3000); // 3% + auto const primaryOfferID = + createSellOffer(minter, nftID, XRP(0)); + env(token::acceptSellOffer(secondarySeller, primaryOfferID)); + env.close(); + + // now we can do a secondary sale + auto const sellOffer = + createSellOffer(secondarySeller, nftID, gwXAU(300)); + auto const buyOffer = + createBuyOffer(buyer, secondarySeller, nftID, gwXAU(500)); + env(token::brokerOffers(broker, buyOffer, sellOffer), + token::brokerFee(gwXAU(100))); + env.close(); + + BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(12)); + BEAST_EXPECT(env.balance(buyer, gwXAU) == gwXAU(490)); + BEAST_EXPECT(env.balance(secondarySeller, gwXAU) == gwXAU(388)); + BEAST_EXPECT(env.balance(broker, gwXAU) == gwXAU(5100)); + BEAST_EXPECT(env.balance(gw, minter["XAU"]) == gwXAU(-12)); + BEAST_EXPECT(env.balance(gw, buyer["XAU"]) == gwXAU(-490)); + BEAST_EXPECT( + env.balance(gw, secondarySeller["XAU"]) == gwXAU(-388)); + BEAST_EXPECT(env.balance(gw, broker["XAU"]) == gwXAU(-5100)); + } + } + } + void testWithFeats(FeatureBitset features) { @@ -5076,6 +5923,7 @@ class NFToken_test : public beast::unit_test::suite testNFTokenDeleteAccount(features); testNftXxxOffers(features); testFixNFTokenNegOffer(features); + testIOUWithTransferFee(features); } public: @@ -5086,6 +5934,8 @@ class NFToken_test : public beast::unit_test::suite FeatureBitset const all{supported_amendments()}; FeatureBitset const fixNFTDir{fixNFTokenDirV1}; + // TODO too many tests are being run - ths fixNFTDir check should be + // pushed into the tests that use it testWithFeats(all - fixNFTDir); testWithFeats(all - disallowIncoming); testWithFeats(all); From 39c32561bdd47e4c7feeaffe4c71f02c2fd8b5c0 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Thu, 9 Feb 2023 21:15:22 -0800 Subject: [PATCH 102/508] Prevent brokered sale of NFToken to owner: (#4403) Fixes #4374 It was possible for a broker to combine a sell and a buy offer from an account that already owns an NFT. Such brokering extracts money from the NFT owner and provides no benefit in return. With this amendment, the code detects when a broker is returning an NFToken to its initial owner and prohibits the transaction. This forbids a broker from selling an NFToken to the account that already owns the token. This fixes a bug in the original implementation of XLS-20. Thanks to @nixer89 for suggesting this fix. --- src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp | 6 + src/test/app/NFToken_test.cpp | 119 +++++++++++++++++- 2 files changed, 120 insertions(+), 5 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp index c335f8d28fd..257bda5c051 100644 --- a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp @@ -107,6 +107,12 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) if ((*bo)[sfAmount].issue() != (*so)[sfAmount].issue()) return tecNFTOKEN_BUY_SELL_MISMATCH; + // The two offers may not form a loop. A broker may not sell the + // token to the current owner of the token. + if (ctx.view.rules().enabled(fixUnburnableNFToken) && + ((*bo)[sfOwner] == (*so)[sfOwner])) + return tecCANT_ACCEPT_OWN_NFTOKEN_OFFER; + // Ensure that the buyer is willing to pay at least as much as the // seller is requesting: if ((*so)[sfAmount] > (*bo)[sfAmount]) diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 33d725e5a17..0c428e6fac9 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -4613,7 +4613,7 @@ class NFToken_test : public beast::unit_test::suite txflags(tfTransferable)); env.close(); - // At the momement issuer and minter cannot delete themselves. + // At the moment issuer and minter cannot delete themselves. // o issuer has an issued NFT in the ledger. // o minter owns an NFT. env(acctdelete(issuer, daria), fee(XRP(50)), ter(tecHAS_OBLIGATIONS)); @@ -5894,6 +5894,115 @@ class NFToken_test : public beast::unit_test::suite } } + void + testBrokeredSaleToSelf(FeatureBitset features) + { + // There was a bug that if an account had... + // + // 1. An NFToken, and + // 2. An offer on the ledger to buy that same token, and + // 3. Also an offer of the ledger to sell that same token, + // + // Then someone could broker the two offers. This would result in + // the NFToken being bought and returned to the original owner and + // the broker pocketing the profit. + // + // This unit test verifies that the fixUnburnableNFToken amendment + // fixes that bug. + testcase("Brokered sale to self"); + + using namespace test::jtx; + + Account const alice{"alice"}; + Account const bob{"bob"}; + Account const broker{"broker"}; + + Env env{*this, features}; + env.fund(XRP(10000), alice, bob, broker); + env.close(); + + // For this scenario to occur we need the following steps: + // + // 1. alice mints NFT. + // 2. bob creates a buy offer for it for 5 XRP. + // 3. alice decides to gift the NFT to bob for 0. + // creating a sell offer (hopefully using a destination too) + // 4. Bob accepts the sell offer, because it is better than + // paying 5 XRP. + // 5. At this point, bob has the NFT and still has their buy + // offer from when they did not have the NFT! This is because + // the order book is not cleared when an NFT changes hands. + // 6. Now that Bob owns the NFT, he cannot create new buy offers. + // However he still has one left over from when he did not own + // it. He can create new sell offers and does. + // 7. Now that bob has both a buy and a sell offer for the same NFT, + // a broker can sell the NFT that bob owns to bob and pocket the + // difference. + uint256 const nftId{token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + + // Bob creates a buy offer for 5 XRP. Alice creates a sell offer + // for 0 XRP. + uint256 const bobBuyOfferIndex = + keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftId, XRP(5)), token::owner(alice)); + + uint256 const aliceSellOfferIndex = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId, XRP(0)), + token::destination(bob), + txflags(tfSellNFToken)); + env.close(); + + // bob accepts alice's offer but forgets to remove the old buy offer. + env(token::acceptSellOffer(bob, aliceSellOfferIndex)); + env.close(); + + // Note that bob still has a buy offer on the books. + BEAST_EXPECT(env.le(keylet::nftoffer(bobBuyOfferIndex))); + + // Bob creates a sell offer for the gift NFT from alice. + uint256 const bobSellOfferIndex = + keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftId, XRP(4)), txflags(tfSellNFToken)); + env.close(); + + // bob now has a buy offer and a sell offer on the books. A broker + // spots this and swoops in to make a profit. + BEAST_EXPECT(nftCount(env, bob) == 1); + auto const bobsPriorBalance = env.balance(bob); + auto const brokersPriorBalance = env.balance(broker); + TER expectTer = features[fixUnburnableNFToken] + ? TER(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER) + : TER(tesSUCCESS); + env(token::brokerOffers(broker, bobBuyOfferIndex, bobSellOfferIndex), + token::brokerFee(XRP(1)), + ter(expectTer)); + env.close(); + + if (expectTer == tesSUCCESS) + { + // bob should still have the NFT from alice, but be XRP(1) poorer. + // broker should be almost XRP(1) richer because they also paid a + // transaction fee. + BEAST_EXPECT(nftCount(env, bob) == 1); + BEAST_EXPECT(env.balance(bob) == bobsPriorBalance - XRP(1)); + BEAST_EXPECT( + env.balance(broker) == + brokersPriorBalance + XRP(1) - drops(10)); + } + else + { + // A tec result was returned, so no state should change other + // than the broker burning their transaction fee. + BEAST_EXPECT(nftCount(env, bob) == 1); + BEAST_EXPECT(env.balance(bob) == bobsPriorBalance); + BEAST_EXPECT( + env.balance(broker) == brokersPriorBalance - drops(10)); + } + } + void testWithFeats(FeatureBitset features) { @@ -5924,6 +6033,7 @@ class NFToken_test : public beast::unit_test::suite testNftXxxOffers(features); testFixNFTokenNegOffer(features); testIOUWithTransferFee(features); + testBrokeredSaleToSelf(features); } public: @@ -5934,10 +6044,9 @@ class NFToken_test : public beast::unit_test::suite FeatureBitset const all{supported_amendments()}; FeatureBitset const fixNFTDir{fixNFTokenDirV1}; - // TODO too many tests are being run - ths fixNFTDir check should be - // pushed into the tests that use it - testWithFeats(all - fixNFTDir); - testWithFeats(all - disallowIncoming); + testWithFeats(all - fixNFTDir - fixUnburnableNFToken); + testWithFeats(all - disallowIncoming - fixUnburnableNFToken); + testWithFeats(all - fixUnburnableNFToken); testWithFeats(all); } }; From b72a87c7d3629c19e37b5f5fc86232e60540a80f Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Mon, 13 Feb 2023 13:02:24 -0500 Subject: [PATCH 103/508] Only account specified as destination can settle through brokerage: (#4399) Without this amendment, for NFTs using broker mode, if the sell offer contains a destination and that destination is the buyer account, anyone can broker the transaction. Also, if a buy offer contains a destination and that destination is the seller account, anyone can broker the transaction. This is not ideal and is misleading. Instead, with this amendment: If you set a destination, that destination needs to be the account settling the transaction. So, the broker must be the destination if they want to settle. If the buyer is the destination, then the buyer must accept the sell offer, as you cannot broker your own offers. If users want their offers open to the public, then they should not set a destination. On the other hand, if users want to limit who can settle the offers, then they would set a destination. Unit tests: 1. The broker cannot broker a destination offer to the buyer and the buyer must accept the sell offer. (0 transfer) 2. If the broker is the destination, the broker will take the difference. (broker mode) --- src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp | 36 ++++-- src/test/app/NFToken_test.cpp | 113 +++++++++++------- 2 files changed, 101 insertions(+), 48 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp index 257bda5c051..c420bfc6197 100644 --- a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp @@ -118,20 +118,40 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) if ((*so)[sfAmount] > (*bo)[sfAmount]) return tecINSUFFICIENT_PAYMENT; - // If the buyer specified a destination, that destination must be - // the seller or the broker. + // If the buyer specified a destination if (auto const dest = bo->at(~sfDestination)) { - if (*dest != so->at(sfOwner) && *dest != ctx.tx[sfAccount]) - return tecNFTOKEN_BUY_SELL_MISMATCH; + // fixUnburnableNFToken + if (ctx.view.rules().enabled(fixUnburnableNFToken)) + { + // the destination may only be the account brokering the offer + if (*dest != ctx.tx[sfAccount]) + return tecNO_PERMISSION; + } + else + { + // the destination must be the seller or the broker. + if (*dest != so->at(sfOwner) && *dest != ctx.tx[sfAccount]) + return tecNFTOKEN_BUY_SELL_MISMATCH; + } } - // If the seller specified a destination, that destination must be - // the buyer or the broker. + // If the seller specified a destination if (auto const dest = so->at(~sfDestination)) { - if (*dest != bo->at(sfOwner) && *dest != ctx.tx[sfAccount]) - return tecNFTOKEN_BUY_SELL_MISMATCH; + // fixUnburnableNFToken + if (ctx.view.rules().enabled(fixUnburnableNFToken)) + { + // the destination may only be the account brokering the offer + if (*dest != ctx.tx[sfAccount]) + return tecNO_PERMISSION; + } + else + { + // the destination must be the buyer or the broker. + if (*dest != bo->at(sfOwner) && *dest != ctx.tx[sfAccount]) + return tecNFTOKEN_BUY_SELL_MISMATCH; + } } // The broker can specify an amount that represents their cut; if they diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 0c428e6fac9..d581a6d0d90 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -2878,15 +2878,20 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, minter) == 2); BEAST_EXPECT(ownerCount(env, buyer) == 1); - // issuer cannot broker the offers, because they are not the - // Destination. - env(token::brokerOffers( - issuer, offerBuyerToMinter, offerMinterToBroker), - ter(tecNFTOKEN_BUY_SELL_MISMATCH)); - env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 0); - BEAST_EXPECT(ownerCount(env, minter) == 2); - BEAST_EXPECT(ownerCount(env, buyer) == 1); + { + // issuer cannot broker the offers, because they are not the + // Destination. + TER const expectTer = features[fixUnburnableNFToken] + ? tecNO_PERMISSION + : tecNFTOKEN_BUY_SELL_MISMATCH; + env(token::brokerOffers( + issuer, offerBuyerToMinter, offerMinterToBroker), + ter(expectTer)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 2); + BEAST_EXPECT(ownerCount(env, buyer) == 1); + } // Since broker is the sell offer's destination, they can broker // the two offers. @@ -2923,29 +2928,52 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, minter) == 1); BEAST_EXPECT(ownerCount(env, buyer) == 2); - // Cannot broker offers when the sell destination is not the buyer. - env(token::brokerOffers( - broker, offerIssuerToBuyer, offerBuyerToMinter), - ter(tecNFTOKEN_BUY_SELL_MISMATCH)); - env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 1); - BEAST_EXPECT(ownerCount(env, minter) == 1); - BEAST_EXPECT(ownerCount(env, buyer) == 2); + { + // Cannot broker offers when the sell destination is not the + // buyer. + TER const expectTer = features[fixUnburnableNFToken] + ? tecNO_PERMISSION + : tecNFTOKEN_BUY_SELL_MISMATCH; + env(token::brokerOffers( + broker, offerIssuerToBuyer, offerBuyerToMinter), + ter(expectTer)); + env.close(); - // Broker is successful when destination is buyer. - env(token::brokerOffers( - broker, offerMinterToBuyer, offerBuyerToMinter)); - env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 1); - BEAST_EXPECT(ownerCount(env, minter) == 1); - BEAST_EXPECT(ownerCount(env, buyer) == 0); + BEAST_EXPECT(ownerCount(env, issuer) == 1); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 2); - // Clean out the unconsumed offer. - env(token::cancelOffer(issuer, {offerIssuerToBuyer})); - env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 0); - BEAST_EXPECT(ownerCount(env, minter) == 1); - BEAST_EXPECT(ownerCount(env, buyer) == 0); + // amendment switch: When enabled the broker fails, when + // disabled the broker succeeds if the destination is the buyer. + TER const eexpectTer = features[fixUnburnableNFToken] + ? tecNO_PERMISSION + : TER(tesSUCCESS); + env(token::brokerOffers( + broker, offerMinterToBuyer, offerBuyerToMinter), + ter(eexpectTer)); + env.close(); + + if (features[fixUnburnableNFToken]) + // Buyer is successful with acceptOffer. + env(token::acceptBuyOffer(buyer, offerMinterToBuyer)); + env.close(); + + // Clean out the unconsumed offer. + env(token::cancelOffer(buyer, {offerBuyerToMinter})); + env.close(); + + BEAST_EXPECT(ownerCount(env, issuer) == 1); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 0); + + // Clean out the unconsumed offer. + env(token::cancelOffer(issuer, {offerIssuerToBuyer})); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 1); + BEAST_EXPECT(ownerCount(env, buyer) == 0); + return; + } } // Show that if a buy and a sell offer both have the same destination, @@ -2963,15 +2991,20 @@ class NFToken_test : public beast::unit_test::suite token::owner(minter), token::destination(broker)); - // Cannot broker offers when the sell destination is not the buyer - // or the broker. - env(token::brokerOffers( - issuer, offerBuyerToBroker, offerMinterToBroker), - ter(tecNFTOKEN_BUY_SELL_MISMATCH)); - env.close(); - BEAST_EXPECT(ownerCount(env, issuer) == 0); - BEAST_EXPECT(ownerCount(env, minter) == 2); - BEAST_EXPECT(ownerCount(env, buyer) == 1); + { + // Cannot broker offers when the sell destination is not the + // buyer or the broker. + TER const expectTer = features[fixUnburnableNFToken] + ? tecNO_PERMISSION + : tecNFTOKEN_BUY_SELL_MISMATCH; + env(token::brokerOffers( + issuer, offerBuyerToBroker, offerMinterToBroker), + ter(expectTer)); + env.close(); + BEAST_EXPECT(ownerCount(env, issuer) == 0); + BEAST_EXPECT(ownerCount(env, minter) == 2); + BEAST_EXPECT(ownerCount(env, buyer) == 1); + } // Broker is successful if they are the destination of both offers. env(token::brokerOffers( @@ -6053,4 +6086,4 @@ class NFToken_test : public beast::unit_test::suite BEAST_DEFINE_TESTSUITE_PRIO(NFToken, tx, ripple, 2); -} // namespace ripple +} // namespace ripple \ No newline at end of file From ac78b7a9a7beccfe397c3725ca18a680d7502278 Mon Sep 17 00:00:00 2001 From: ledhed2222 Date: Mon, 13 Feb 2023 15:30:01 -0500 Subject: [PATCH 104/508] Rename to fixNonFungibleTokensV1_2 and some cosmetic changes (#4419) --- src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp | 36 ++++---- src/ripple/app/tx/impl/NFTokenBurn.cpp | 4 +- src/ripple/app/tx/impl/NFTokenCreateOffer.cpp | 2 +- src/ripple/protocol/Feature.h | 2 +- src/ripple/protocol/impl/Feature.cpp | 2 +- src/test/app/NFTokenBurn_test.cpp | 24 +++--- src/test/app/NFToken_test.cpp | 84 ++++++++++--------- 7 files changed, 74 insertions(+), 80 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp index c420bfc6197..61aa7e0629a 100644 --- a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp @@ -109,7 +109,7 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) // The two offers may not form a loop. A broker may not sell the // token to the current owner of the token. - if (ctx.view.rules().enabled(fixUnburnableNFToken) && + if (ctx.view.rules().enabled(fixNonFungibleTokensV1_2) && ((*bo)[sfOwner] == (*so)[sfOwner])) return tecCANT_ACCEPT_OWN_NFTOKEN_OFFER; @@ -121,37 +121,29 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) // If the buyer specified a destination if (auto const dest = bo->at(~sfDestination)) { - // fixUnburnableNFToken - if (ctx.view.rules().enabled(fixUnburnableNFToken)) + // Before this fix the destination could be either the seller or + // a broker. After, it must be whoever is submitting the tx. + if (ctx.view.rules().enabled(fixNonFungibleTokensV1_2)) { - // the destination may only be the account brokering the offer if (*dest != ctx.tx[sfAccount]) return tecNO_PERMISSION; } - else - { - // the destination must be the seller or the broker. - if (*dest != so->at(sfOwner) && *dest != ctx.tx[sfAccount]) - return tecNFTOKEN_BUY_SELL_MISMATCH; - } + else if (*dest != so->at(sfOwner) && *dest != ctx.tx[sfAccount]) + return tecNFTOKEN_BUY_SELL_MISMATCH; } // If the seller specified a destination if (auto const dest = so->at(~sfDestination)) { - // fixUnburnableNFToken - if (ctx.view.rules().enabled(fixUnburnableNFToken)) + // Before this fix the destination could be either the seller or + // a broker. After, it must be whoever is submitting the tx. + if (ctx.view.rules().enabled(fixNonFungibleTokensV1_2)) { - // the destination may only be the account brokering the offer if (*dest != ctx.tx[sfAccount]) return tecNO_PERMISSION; } - else - { - // the destination must be the buyer or the broker. - if (*dest != bo->at(sfOwner) && *dest != ctx.tx[sfAccount]) - return tecNFTOKEN_BUY_SELL_MISMATCH; - } + else if (*dest != bo->at(sfOwner) && *dest != ctx.tx[sfAccount]) + return tecNFTOKEN_BUY_SELL_MISMATCH; } // The broker can specify an amount that represents their cut; if they @@ -200,7 +192,7 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) // After this amendment, we allow an IOU issuer to buy an NFT with their // own currency auto const needed = bo->at(sfAmount); - if (ctx.view.rules().enabled(fixUnburnableNFToken)) + if (ctx.view.rules().enabled(fixNonFungibleTokensV1_2)) { if (accountFunds( ctx.view, (*bo)[sfOwner], needed, fhZERO_IF_FROZEN, ctx.j) < @@ -243,7 +235,7 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) // The account offering to buy must have funds: auto const needed = so->at(sfAmount); - if (!ctx.view.rules().enabled(fixUnburnableNFToken)) + if (!ctx.view.rules().enabled(fixNonFungibleTokensV1_2)) { if (accountHolds( ctx.view, @@ -298,7 +290,7 @@ NFTokenAcceptOffer::pay( // their own currency, we know that something went wrong. This was // originally found in the context of IOU transfer fees. Since there are // several payouts in this tx, just confirm that the end state is OK. - if (!view().rules().enabled(fixUnburnableNFToken)) + if (!view().rules().enabled(fixNonFungibleTokensV1_2)) return result; if (result != tesSUCCESS) return result; diff --git a/src/ripple/app/tx/impl/NFTokenBurn.cpp b/src/ripple/app/tx/impl/NFTokenBurn.cpp index e8693c7c6fb..99acfd61dca 100644 --- a/src/ripple/app/tx/impl/NFTokenBurn.cpp +++ b/src/ripple/app/tx/impl/NFTokenBurn.cpp @@ -77,7 +77,7 @@ NFTokenBurn::preclaim(PreclaimContext const& ctx) } } - if (!ctx.view.rules().enabled(fixUnburnableNFToken)) + if (!ctx.view.rules().enabled(fixNonFungibleTokensV1_2)) { // If there are too many offers, then burning the token would produce // too much metadata. Disallow burning a token with too many offers. @@ -109,7 +109,7 @@ NFTokenBurn::doApply() view().update(issuer); } - if (ctx_.view().rules().enabled(fixUnburnableNFToken)) + if (ctx_.view().rules().enabled(fixNonFungibleTokensV1_2)) { // Delete up to 500 offers in total. // Because the number of sell offers is likely to be less than diff --git a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp index ff8668e4488..6db31c69892 100644 --- a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp @@ -155,7 +155,7 @@ NFTokenCreateOffer::preclaim(PreclaimContext const& ctx) { // After this amendment, we allow an IOU issuer to make a buy offer // using their own currency. - if (ctx.view.rules().enabled(fixUnburnableNFToken)) + if (ctx.view.rules().enabled(fixNonFungibleTokensV1_2)) { if (accountFunds( ctx.view, diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 0bdfd224dda..d53d992d242 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -343,7 +343,7 @@ extern uint256 const featureImmediateOfferKilled; extern uint256 const featureDisallowIncoming; extern uint256 const featureXRPFees; extern uint256 const fixUniversalNumber; -extern uint256 const fixUnburnableNFToken; +extern uint256 const fixNonFungibleTokensV1_2; } // namespace ripple diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index f021ea4674d..4fb79e4cc48 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -453,7 +453,7 @@ REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no) REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no); REGISTER_FEATURE(XRPFees, Supported::yes, DefaultVote::no); REGISTER_FIX (fixUniversalNumber, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixUnburnableNFToken, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixNonFungibleTokensV1_2, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index 4896932acd2..096fd5ce1e8 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -524,8 +524,8 @@ class NFTokenBurn_test : public beast::unit_test::suite using namespace test::jtx; // Test what happens if a NFT is unburnable when there are - // more than 500 offers, before fixUnburnableNFToken goes live - if (!features[fixUnburnableNFToken]) + // more than 500 offers, before fixNonFungibleTokensV1_2 goes live + if (!features[fixNonFungibleTokensV1_2]) { Env env{*this, features}; @@ -620,10 +620,10 @@ class NFTokenBurn_test : public beast::unit_test::suite } // Test that up to 499 buy/sell offers will be removed when NFT is - // burned after fixUnburnableNFToken is enabled. This is to test that we - // can successfully remove all offers if the number of offers is less - // than 500. - if (features[fixUnburnableNFToken]) + // burned after fixNonFungibleTokensV1_2 is enabled. This is to test + // that we can successfully remove all offers if the number of offers is + // less than 500. + if (features[fixNonFungibleTokensV1_2]) { Env env{*this, features}; @@ -673,8 +673,8 @@ class NFTokenBurn_test : public beast::unit_test::suite } // Test that up to 500 buy offers are removed when NFT is burned - // after fixUnburnableNFToken is enabled - if (features[fixUnburnableNFToken]) + // after fixNonFungibleTokensV1_2 is enabled + if (features[fixNonFungibleTokensV1_2]) { Env env{*this, features}; @@ -718,8 +718,8 @@ class NFTokenBurn_test : public beast::unit_test::suite } // Test that up to 500 buy/sell offers are removed when NFT is burned - // after fixUnburnableNFToken is enabled - if (features[fixUnburnableNFToken]) + // after fixNonFungibleTokensV1_2 is enabled + if (features[fixNonFungibleTokensV1_2]) { Env env{*this, features}; @@ -786,8 +786,8 @@ class NFTokenBurn_test : public beast::unit_test::suite FeatureBitset const all{supported_amendments()}; FeatureBitset const fixNFTDir{fixNFTokenDirV1}; - testWithFeats(all - fixUnburnableNFToken - fixNFTDir); - testWithFeats(all - fixUnburnableNFToken); + testWithFeats(all - fixNonFungibleTokensV1_2 - fixNFTDir); + testWithFeats(all - fixNonFungibleTokensV1_2); testWithFeats(all); } }; diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index d581a6d0d90..40202e07dce 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -2881,7 +2881,7 @@ class NFToken_test : public beast::unit_test::suite { // issuer cannot broker the offers, because they are not the // Destination. - TER const expectTer = features[fixUnburnableNFToken] + TER const expectTer = features[fixNonFungibleTokensV1_2] ? tecNO_PERMISSION : tecNFTOKEN_BUY_SELL_MISMATCH; env(token::brokerOffers( @@ -2931,7 +2931,7 @@ class NFToken_test : public beast::unit_test::suite { // Cannot broker offers when the sell destination is not the // buyer. - TER const expectTer = features[fixUnburnableNFToken] + TER const expectTer = features[fixNonFungibleTokensV1_2] ? tecNO_PERMISSION : tecNFTOKEN_BUY_SELL_MISMATCH; env(token::brokerOffers( @@ -2945,7 +2945,7 @@ class NFToken_test : public beast::unit_test::suite // amendment switch: When enabled the broker fails, when // disabled the broker succeeds if the destination is the buyer. - TER const eexpectTer = features[fixUnburnableNFToken] + TER const eexpectTer = features[fixNonFungibleTokensV1_2] ? tecNO_PERMISSION : TER(tesSUCCESS); env(token::brokerOffers( @@ -2953,7 +2953,7 @@ class NFToken_test : public beast::unit_test::suite ter(eexpectTer)); env.close(); - if (features[fixUnburnableNFToken]) + if (features[fixNonFungibleTokensV1_2]) // Buyer is successful with acceptOffer. env(token::acceptBuyOffer(buyer, offerMinterToBuyer)); env.close(); @@ -2994,7 +2994,7 @@ class NFToken_test : public beast::unit_test::suite { // Cannot broker offers when the sell destination is not the // buyer or the broker. - TER const expectTer = features[fixUnburnableNFToken] + TER const expectTer = features[fixNonFungibleTokensV1_2] ? tecNO_PERMISSION : tecNFTOKEN_BUY_SELL_MISMATCH; env(token::brokerOffers( @@ -3861,7 +3861,8 @@ class NFToken_test : public beast::unit_test::suite using namespace test::jtx; for (auto const& tweakedFeatures : - {features - fixUnburnableNFToken, features | fixUnburnableNFToken}) + {features - fixNonFungibleTokensV1_2, + features | fixNonFungibleTokensV1_2}) { Env env{*this, tweakedFeatures}; @@ -4400,7 +4401,7 @@ class NFToken_test : public beast::unit_test::suite token::owner(minter)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) { env(token::brokerOffers( broker, buyOfferIndex, minterOfferIndex), @@ -4946,12 +4947,12 @@ class NFToken_test : public beast::unit_test::suite IOU const gwXAU(gw["XAU"]); // Test both with and without fixNFTokenNegOffer and - // fixUnburnableNFToken. Need to turn off fixUnburnableNFToken as well - // because that amendment came later and addressed the acceptance - // side of this issue. + // fixNonFungibleTokensV1_2. Need to turn off fixNonFungibleTokensV1_2 + // as well because that amendment came later and addressed the + // acceptance side of this issue. for (auto const& tweakedFeatures : {features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1 - - fixUnburnableNFToken, + fixNonFungibleTokensV1_2, features - fixNFTokenNegOffer - featureNonFungibleTokensV1_1, features | fixNFTokenNegOffer}) { @@ -5042,7 +5043,7 @@ class NFToken_test : public beast::unit_test::suite } { // 1. If fixNFTokenNegOffer is enabled get tecOBJECT_NOT_FOUND - // 2. If it is not enabled, but fixUnburnableNFToken is + // 2. If it is not enabled, but fixNonFungibleTokensV1_2 is // enabled, get tecOBJECT_NOT_FOUND. // 3. If neither are enabled, get tesSUCCESS. TER const offerAcceptTER = tweakedFeatures[fixNFTokenNegOffer] @@ -5184,7 +5185,8 @@ class NFToken_test : public beast::unit_test::suite testcase("Payments with IOU transfer fees"); for (auto const& tweakedFeatures : - {features - fixUnburnableNFToken, features | fixUnburnableNFToken}) + {features - fixNonFungibleTokensV1_2, + features | fixNonFungibleTokensV1_2}) { Env env{*this, tweakedFeatures}; @@ -5336,13 +5338,13 @@ class NFToken_test : public beast::unit_test::suite auto const nftID = mintNFT(minter); auto const offerID = createSellOffer(minter, nftID, gwXAU(1000)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tecINSUFFICIENT_FUNDS) : static_cast(tesSUCCESS); env(token::acceptSellOffer(buyer, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) expectInitialState(); else { @@ -5360,13 +5362,13 @@ class NFToken_test : public beast::unit_test::suite auto const nftID = mintNFT(minter); auto const offerID = createBuyOffer(buyer, minter, nftID, gwXAU(1000)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tecINSUFFICIENT_FUNDS) : static_cast(tesSUCCESS); env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) expectInitialState(); else { @@ -5384,13 +5386,13 @@ class NFToken_test : public beast::unit_test::suite reinitializeTrustLineBalances(); auto const nftID = mintNFT(minter); auto const offerID = createSellOffer(minter, nftID, gwXAU(995)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tecINSUFFICIENT_FUNDS) : static_cast(tesSUCCESS); env(token::acceptSellOffer(buyer, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) expectInitialState(); else { @@ -5408,13 +5410,13 @@ class NFToken_test : public beast::unit_test::suite auto const nftID = mintNFT(minter); auto const offerID = createBuyOffer(buyer, minter, nftID, gwXAU(995)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tecINSUFFICIENT_FUNDS) : static_cast(tesSUCCESS); env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) expectInitialState(); else { @@ -5509,13 +5511,13 @@ class NFToken_test : public beast::unit_test::suite auto const nftID = mintNFT(minter); auto const offerID = createSellOffer(minter, nftID, gwXAU(1000)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tesSUCCESS) : static_cast(tecINSUFFICIENT_FUNDS); env(token::acceptSellOffer(gw, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) { BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); BEAST_EXPECT( @@ -5530,18 +5532,18 @@ class NFToken_test : public beast::unit_test::suite reinitializeTrustLineBalances(); auto const nftID = mintNFT(minter); - auto const offerTER = tweakedFeatures[fixUnburnableNFToken] + auto const offerTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tesSUCCESS) : static_cast(tecUNFUNDED_OFFER); auto const offerID = createBuyOffer(gw, minter, nftID, gwXAU(1000), {offerTER}); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tesSUCCESS) : static_cast(tecOBJECT_NOT_FOUND); env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) { BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(1000)); BEAST_EXPECT( @@ -5557,13 +5559,13 @@ class NFToken_test : public beast::unit_test::suite auto const nftID = mintNFT(minter); auto const offerID = createSellOffer(minter, nftID, gwXAU(5000)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tesSUCCESS) : static_cast(tecINSUFFICIENT_FUNDS); env(token::acceptSellOffer(gw, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) { BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(5000)); BEAST_EXPECT( @@ -5578,18 +5580,18 @@ class NFToken_test : public beast::unit_test::suite reinitializeTrustLineBalances(); auto const nftID = mintNFT(minter); - auto const offerTER = tweakedFeatures[fixUnburnableNFToken] + auto const offerTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tesSUCCESS) : static_cast(tecUNFUNDED_OFFER); auto const offerID = createBuyOffer(gw, minter, nftID, gwXAU(5000), {offerTER}); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tesSUCCESS) : static_cast(tecOBJECT_NOT_FOUND); env(token::acceptBuyOffer(minter, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) { BEAST_EXPECT(env.balance(minter, gwXAU) == gwXAU(5000)); BEAST_EXPECT( @@ -5731,13 +5733,13 @@ class NFToken_test : public beast::unit_test::suite // now we can do a secondary sale auto const offerID = createSellOffer(secondarySeller, nftID, gwXAU(1000)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tecINSUFFICIENT_FUNDS) : static_cast(tesSUCCESS); env(token::acceptSellOffer(buyer, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) expectInitialState(); else { @@ -5767,14 +5769,14 @@ class NFToken_test : public beast::unit_test::suite // now we can do a secondary sale auto const offerID = createBuyOffer(buyer, secondarySeller, nftID, gwXAU(1000)); - auto const sellTER = tweakedFeatures[fixUnburnableNFToken] + auto const sellTER = tweakedFeatures[fixNonFungibleTokensV1_2] ? static_cast(tecINSUFFICIENT_FUNDS) : static_cast(tesSUCCESS); env(token::acceptBuyOffer(secondarySeller, offerID), ter(sellTER)); env.close(); - if (tweakedFeatures[fixUnburnableNFToken]) + if (tweakedFeatures[fixNonFungibleTokensV1_2]) expectInitialState(); else { @@ -5940,7 +5942,7 @@ class NFToken_test : public beast::unit_test::suite // the NFToken being bought and returned to the original owner and // the broker pocketing the profit. // - // This unit test verifies that the fixUnburnableNFToken amendment + // This unit test verifies that the fixNonFungibleTokensV1_2 amendment // fixes that bug. testcase("Brokered sale to self"); @@ -6006,7 +6008,7 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(nftCount(env, bob) == 1); auto const bobsPriorBalance = env.balance(bob); auto const brokersPriorBalance = env.balance(broker); - TER expectTer = features[fixUnburnableNFToken] + TER expectTer = features[fixNonFungibleTokensV1_2] ? TER(tecCANT_ACCEPT_OWN_NFTOKEN_OFFER) : TER(tesSUCCESS); env(token::brokerOffers(broker, bobBuyOfferIndex, bobSellOfferIndex), @@ -6077,13 +6079,13 @@ class NFToken_test : public beast::unit_test::suite FeatureBitset const all{supported_amendments()}; FeatureBitset const fixNFTDir{fixNFTokenDirV1}; - testWithFeats(all - fixNFTDir - fixUnburnableNFToken); - testWithFeats(all - disallowIncoming - fixUnburnableNFToken); - testWithFeats(all - fixUnburnableNFToken); + testWithFeats(all - fixNFTDir - fixNonFungibleTokensV1_2); + testWithFeats(all - disallowIncoming - fixNonFungibleTokensV1_2); + testWithFeats(all - fixNonFungibleTokensV1_2); testWithFeats(all); } }; BEAST_DEFINE_TESTSUITE_PRIO(NFToken, tx, ripple, 2); -} // namespace ripple \ No newline at end of file +} // namespace ripple From aebf2ac990729d6ed6c0d4c7b396903ab1e4c25d Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 15 Feb 2023 14:34:32 -0500 Subject: [PATCH 105/508] Resolve a couple of Github Action CI annoyances: (#4413) * Set "fail-fast: false" so that multiple jobs in one workflow can finish independently. By default, if one job fails, other running jobs will be aborted, even if the other jobs are working fine and are almost done. This leads to wasted time and resources if the failure is, for example, OS specific, or due to a flaky unit test, and the failed job needs to be re-run, because all the jobs end up re-running. * Put conditions back into the windows.yml job (manual, and for a specific branch name and that job). This prevents Github Actions from sending "No jobs were run" failure emails on every commit. --- .github/workflows/nix.yml | 1 + .github/workflows/windows.yml | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 34030b3429b..ca351354d43 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -5,6 +5,7 @@ jobs: test: strategy: + fail-fast: false matrix: platform: - ubuntu-latest diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 1cc6bd5ef8e..637eba4ea51 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -7,11 +7,19 @@ name: windows # then, we leave this tombstone as a reminder that we have tried (but failed) # to write a reliable test for Windows. # on: [push, pull_request] +on: + workflow_dispatch: + push: + branches: + - 'action' + paths: + - '.github/workflow/windows.yml' jobs: test: strategy: + fail-fast: false matrix: generator: - Visual Studio 16 2019 From 96aab1288f48c67d1cd1cfa48a6c3634f226b281 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Fri, 17 Feb 2023 22:27:27 -0800 Subject: [PATCH 106/508] README: Add a few source code starting points (#4421) Co-authored-by: John Freeman --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 267ecfb258b..fbbc9d29a02 100644 --- a/README.md +++ b/README.md @@ -34,8 +34,12 @@ The server software that powers the XRP Ledger is called `rippled` and is availa ## Source Code -[![travis-ci.com: Build Status](https://travis-ci.com/ripple/rippled.svg?branch=develop)](https://travis-ci.com/ripple/rippled) -[![codecov.io: Code Coverage](https://codecov.io/gh/ripple/rippled/branch/develop/graph/badge.svg)](https://codecov.io/gh/ripple/rippled) + +Here are some good places to start learning the source code: + +- Read the markdown files in the source tree: `src/ripple/**/*.md`. +- Read [the levelization document](./Builds/levelization) to get an idea of the internal dependency graph. +- In the big picture, the `main` function constructs an `ApplicationImp` object, which implements the `Application` virtual interface. Almost every component in the application takes an `Application&` parameter in its constructor, typically named `app` and stored as a member variable `app_`. This allows most components to depend on any other component. ### Repository Contents From 5ec8783d35a379b0811365e45758a863d3593609 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 21 Feb 2023 15:40:09 -0800 Subject: [PATCH 107/508] README: Update "Build from source" section (#4426) Fix #2987 --- Builds/VisualStudio2017/README.md | 1 + Builds/linux/README.md | 270 +----------------------------- Builds/macos/README.md | 4 +- README.md | 4 +- 4 files changed, 4 insertions(+), 275 deletions(-) create mode 100644 Builds/VisualStudio2017/README.md diff --git a/Builds/VisualStudio2017/README.md b/Builds/VisualStudio2017/README.md new file mode 100644 index 00000000000..fce8e824a05 --- /dev/null +++ b/Builds/VisualStudio2017/README.md @@ -0,0 +1 @@ +[Build instructions are currently located in `BUILD.md`](../../BUILD.md) diff --git a/Builds/linux/README.md b/Builds/linux/README.md index 15a84a33e43..fce8e824a05 100644 --- a/Builds/linux/README.md +++ b/Builds/linux/README.md @@ -1,269 +1 @@ -# Linux Build Instructions - -This document focuses on building rippled for development purposes under recent -Ubuntu linux distributions. To build rippled for Redhat, Fedora or Centos -builds, including docker based builds for those distributions, please consult -the [rippled-package-builder](https://github.com/ripple/rippled-package-builder) -repository. - -Note: Ubuntu 16.04 users may need to update their compiler (see the dependencies -section). For non Ubuntu distributions, the steps below should work by -installing the appropriate dependencies using that distribution's package -management tools. - - -## Dependencies - -gcc-8 or later is required. - -Use `apt-get` to install the dependencies provided by the distribution - -``` -$ apt-get update -$ apt-get install -y gcc g++ wget git cmake pkg-config libprotoc-dev protobuf-compiler libprotobuf-dev libssl-dev -``` - -To build the software in reporting mode, install these additional dependencies: -``` -$ apt-get install -y autoconf flex bison -``` - -Advanced users can choose to install newer versions of gcc, or the clang compiler. - -### Build Boost - -Boost 1.70 or later is required. We recommend downloading and compiling boost -with the following process: After changing to the directory where -you wish to download and compile boost, run -``` -$ wget https://boostorg.jfrog.io/artifactory/main/release/1.70.0/source/boost_1_70_0.tar.gz -$ tar -xzf boost_1_70_0.tar.gz -$ cd boost_1_70_0 -$ ./bootstrap.sh -$ ./b2 headers -$ ./b2 -j $(echo $(nproc)-2 | bc) -``` - -### (Optional) Dependencies for Building Source Documentation - -Source code documentation is not required for running/debugging rippled. That -said, the documentation contains some helpful information about specific -components of the application. For more information on how to install and run -the necessary components, see [this document](../../docs/README.md) - -## Build - -### Clone the rippled repository - -From a shell: - -``` -git clone git@github.com:ripple/rippled.git -cd rippled -``` - -For a stable release, choose the `master` branch or one of the tagged releases -listed on [GitHub](https://github.com/ripple/rippled/releases). - -``` -git checkout master -``` - -or to test the latest release candidate, choose the `release` branch. - -``` -git checkout release -``` - -If you are doing development work and want the latest set of untested -features, you can consider using the `develop` branch instead. - -``` -git checkout develop -``` - -### Configure Library Paths - -If you didn't persistently set the `BOOST_ROOT` environment variable to the -directory in which you compiled boost, then you should set it temporarily. - -For example, if you built Boost in your home directory `~/boost_1_70_0`, you -would run the following shell command: - -``` -export BOOST_ROOT=~/boost_1_70_0 -``` - -Alternatively, you can add `DBOOST_ROOT=~/boost_1_70_0` to the command line when -invoking `cmake`. - -### Generate Configuration - -All builds should be done in a separate directory from the source tree root -(a subdirectory is fine). For example, from the root of the ripple source tree: - -``` -mkdir build -cd build -``` - -followed by: - -``` -cmake -DCMAKE_BUILD_TYPE=Debug .. -``` - -If your operating system does not provide static libraries (Arch Linux, and -Manjaro Linux, for example), you must configure a non-static build by adding -`-Dstatic=OFF` to the above cmake line. - -`CMAKE_BUILD_TYPE` can be changed as desired for `Debug` vs. -`Release` builds (all four standard cmake build types are supported). - -To select a different compiler (most likely gcc will be found by default), pass -`-DCMAKE_C_COMPILER=` and -`-DCMAKE_CXX_COMPILER=` when configuring. If you prefer, -you can instead set `CC` and `CXX` environment variables which cmake will honor. - -#### Options During Configuration: - -The CMake file defines a number of configure-time options which can be -examined by running `cmake-gui` or `ccmake` to generated the build. In -particular, the `unity` option allows you to select between the unity and -non-unity builds. `unity` builds are faster to compile since they combine -multiple sources into a single compiliation unit - this is the default if you -don't specify. `nounity` builds can be helpful for detecting include omissions -or for finding other build-related issues, but aren't generally needed for -testing and running. - -* `-Dunity=ON` to enable/disable unity builds (defaults to ON) -* `-Dassert=ON` to enable asserts -* `-Djemalloc=ON` to enable jemalloc support for heap checking -* `-Dsan=thread` to enable the thread sanitizer with clang -* `-Dsan=address` to enable the address sanitizer with clang -* `-Dstatic=ON` to enable static linking library dependencies -* `-Dreporting=ON` to build code necessary for reporting mode (defaults to OFF) - -Several other infrequently used options are available - run `ccmake` or -`cmake-gui` for a list of all options. - -### Build - -Once you have generated the build system, you can run the build via cmake: - -``` -cmake --build . -- -j $(echo $(nproc)-2 | bc) -``` - -the `-j` parameter in this example tells the build tool to compile several -files in parallel. This value should be chosen roughly based on the number of -cores you have available and/or want to use for building. - -When the build completes successfully, you will have a `rippled` executable in -the current directory, which can be used to connect to the network (when -properly configured) or to run unit tests. - - -#### Optional Installation - -The rippled cmake build supports an installation target that will install -rippled as well as a support library that can be used to sign transactions. In -order to build and install the files, specify the `install` target when -building, e.g.: - -``` -cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=/opt/local .. -cmake --build . --target install -- -j $(echo $(nproc)-2 | bc) -``` - -We recommend specifying `CMAKE_INSTALL_PREFIX` when configuring in order to -explicitly control the install location for your files. Without this setting, -cmake will typically install in `/usr/local`. It is also possible to "rehome" -the installation by specifying the `DESTDIR` env variable during the install phase, -e.g.: - -``` -DESTDIR=~/mylibs cmake --build . --target install -- -j $(echo $(nproc)-2 | bc) -``` - -in which case, the files would be installed in the `CMAKE_INSTALL_PREFIX` within -the specified `DESTDIR` path. - -#### Signing Library - -If you want to use the signing support library to create an application, there -are two simple mechanisms with cmake + git that facilitate this. - -With either option below, you will have access to a library from the -rippled project that you can link to in your own project's CMakeLists.txt, e.g.: - -``` -target_link_libraries (my-signing-app Ripple::xrpl_core) -``` - -##### Option 1: git submodules + add_subdirectory - -First, add the rippled repo as a submodule to your project repo: - -``` -git submodule add -b master https://github.com/ripple/rippled.git vendor/rippled -``` - -change the `vendor/rippled` path as desired for your repo layout. Furthermore, -change the branch name if you want to track a different rippled branch, such -as `develop`. - -Second, to bring this submodule into your project, just add the rippled subdirectory: - -``` -add_subdirectory (vendor/rippled) -``` - -##### Option 2: installed rippled + find_package - -First, follow the "Optional Installation" instructions above to -build and install the desired version of rippled. - -To make use of the installed files, add the following to your CMakeLists.txt file: - -``` -set (CMAKE_MODULE_PATH /opt/local/lib/cmake/ripple ${CMAKE_MODULE_PATH}) -find_package(Ripple REQUIRED) -``` - -change the `/opt/local` module path above to match your chosen installation prefix. - -## Unit Tests (Recommended) - -`rippled` builds a set of unit tests into the server executable. To run these unit -tests after building, pass the `--unittest` option to the compiled `rippled` -executable. The executable will exit with summary info after running the unit tests. - -## Workaround for a compile error in soci - -Compilation errors have been observed with Apple Clang 13.1.6+ and soci v4.x. soci compiles with the `-Werror` flag which causes warnings to be treated as errors. These warnings pertain to style (not correctness). However, they cause the cmake process to fail. - -Here's an example of how this looks: -``` -.../rippled/.nih_c/unix_makefiles/AppleClang_13.1.6.13160021/Debug/src/soci/src/core/session.cpp:450:66: note: in instantiation of function template specialization 'soci::use' requested here - return prepare << backEnd_->get_column_descriptions_query(), use(table_name, "t"); - ^ -1 error generated. -``` - -Please apply the below patch (courtesy of Scott Determan) to remove these errors. `.nih_c/unix_makefiles/AppleClang_13.1.6.13160021/Debug/src/soci/cmake/SociConfig.cmake` file needs to be edited. This file is an example for Mac OS and it might be slightly different for other OS/Architectures. - -``` -diff --git a/cmake/SociConfig.cmake b/cmake/SociConfig.cmake -index 97d907e4..11bcd1f3 100644 ---- a/cmake/SociConfig.cmake -+++ b/cmake/SociConfig.cmake -@@ -58,8 +58,8 @@ if (MSVC) - - else() - -- set(SOCI_GCC_CLANG_COMMON_FLAGS -- "-pedantic -Werror -Wno-error=parentheses -Wall -Wextra -Wpointer-arith -Wcast-align -Wcast-qual -Wfloat-equal -Woverloaded-virtual -Wredundant-decls -Wno-long-long") -+ set(SOCI_GCC_CLANG_COMMON_FLAGS "") -+ # "-pedantic -Werror -Wno-error=parentheses -Wall -Wextra -Wpointer-arith -Wcast-align -Wcast-qual -Wfloat-equal -Woverloaded-virtual -Wredundant-decls -Wno-long-long") -``` +[Build instructions are currently located in `BUILD.md`](../../BUILD.md) diff --git a/Builds/macos/README.md b/Builds/macos/README.md index 2a4e28deb68..fce8e824a05 100644 --- a/Builds/macos/README.md +++ b/Builds/macos/README.md @@ -1,3 +1 @@ -# macOS Build Instructions - -[Build and Run rippled on macOS](https://xrpl.org/build-run-rippled-macos.html) +[Build instructions are currently located in `BUILD.md`](../../BUILD.md) diff --git a/README.md b/README.md index fbbc9d29a02..b2d0ff42a3d 100644 --- a/README.md +++ b/README.md @@ -10,9 +10,7 @@ The server software that powers the XRP Ledger is called `rippled` and is availa ### Build from Source -* [Linux](Builds/linux/README.md) -* [Mac](Builds/macos/README.md) (Not recommended for production) -* [Windows](Builds/VisualStudio2017/README.md) (Not recommended for production) +* [Read the build instructions in `BUILD.md`](BUILD.md) ## Key Features of the XRP Ledger From 292974889830d2c0a0c547236b458a291e7a2c84 Mon Sep 17 00:00:00 2001 From: Levin Winter <33220502+levinwinter@users.noreply.github.com> Date: Thu, 23 Feb 2023 00:20:15 +0100 Subject: [PATCH 108/508] Refactor getTrustedForLedger() (#4424) Look for validations associated with a specific ledger ID and sequence number. --- src/ripple/app/consensus/RCLConsensus.cpp | 2 +- src/ripple/app/ledger/impl/LedgerMaster.cpp | 10 +++++---- src/ripple/app/misc/NegativeUNLVote.cpp | 2 +- src/ripple/consensus/Validations.h | 5 +++-- src/test/consensus/Validations_test.cpp | 24 ++++++++++++++------- 5 files changed, 27 insertions(+), 16 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index 12a150a4a54..7ba059881c0 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -337,7 +337,7 @@ RCLConsensus::Adaptor::onClose( // pseudo-transactions auto validations = app_.validators().negativeUNLFilter( app_.getValidations().getTrustedForLedger( - prevLedger->info().parentHash)); + prevLedger->info().parentHash, prevLedger->seq() - 1)); if (validations.size() >= app_.validators().quorum()) { feeVote_->doVoting(prevLedger, validations, initialSet); diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index ad08b18dd58..7476698ee3a 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -341,7 +341,8 @@ LedgerMaster::setValidLedger(std::shared_ptr const& l) if (!standalone_) { auto validations = app_.validators().negativeUNLFilter( - app_.getValidations().getTrustedForLedger(l->info().hash)); + app_.getValidations().getTrustedForLedger( + l->info().hash, l->info().seq)); times.reserve(validations.size()); for (auto const& val : validations) times.push_back(val->getSignTime()); @@ -987,7 +988,7 @@ LedgerMaster::checkAccept(uint256 const& hash, std::uint32_t seq) return; auto validations = app_.validators().negativeUNLFilter( - app_.getValidations().getTrustedForLedger(hash)); + app_.getValidations().getTrustedForLedger(hash, seq)); valCount = validations.size(); if (valCount >= app_.validators().quorum()) { @@ -1053,7 +1054,8 @@ LedgerMaster::checkAccept(std::shared_ptr const& ledger) auto const minVal = getNeededValidations(); auto validations = app_.validators().negativeUNLFilter( - app_.getValidations().getTrustedForLedger(ledger->info().hash)); + app_.getValidations().getTrustedForLedger( + ledger->info().hash, ledger->info().seq)); auto const tvc = validations.size(); if (tvc < minVal) // nothing we can do { @@ -1128,7 +1130,7 @@ LedgerMaster::checkAccept(std::shared_ptr const& ledger) { // Have not printed the warning before, check if need to print. auto const vals = app_.getValidations().getTrustedForLedger( - ledger->info().parentHash); + ledger->info().parentHash, ledger->info().seq - 1); std::size_t higherVersionCount = 0; std::size_t rippledCount = 0; for (auto const& v : vals) diff --git a/src/ripple/app/misc/NegativeUNLVote.cpp b/src/ripple/app/misc/NegativeUNLVote.cpp index fba02637e51..5c133403b46 100644 --- a/src/ripple/app/misc/NegativeUNLVote.cpp +++ b/src/ripple/app/misc/NegativeUNLVote.cpp @@ -198,7 +198,7 @@ NegativeUNLVote::buildScoreTable( for (int i = 0; i < FLAG_LEDGER_INTERVAL; ++i) { for (auto const& v : validations.getTrustedForLedger( - ledgerAncestors[numAncestors - 1 - i])) + ledgerAncestors[numAncestors - 1 - i], seq - 2 - i)) { if (scoreTable.count(v->getNodeID())) ++scoreTable[v->getNodeID()]; diff --git a/src/ripple/consensus/Validations.h b/src/ripple/consensus/Validations.h index 46bf4322a5a..a9dbd5585e2 100644 --- a/src/ripple/consensus/Validations.h +++ b/src/ripple/consensus/Validations.h @@ -1049,10 +1049,11 @@ class Validations /** Get trusted full validations for a specific ledger @param ledgerID The identifier of ledger of interest + @param seq The sequence number of ledger of interest @return Trusted validations associated with ledger */ std::vector - getTrustedForLedger(ID const& ledgerID) + getTrustedForLedger(ID const& ledgerID, Seq const& seq) { std::vector res; std::lock_guard lock{mutex_}; @@ -1061,7 +1062,7 @@ class Validations ledgerID, [&](std::size_t numValidations) { res.reserve(numValidations); }, [&](NodeID const&, Validation const& v) { - if (v.trusted() && v.full()) + if (v.trusted() && v.full() && v.seq() == seq) res.emplace_back(v.unwrap()); }); diff --git a/src/test/consensus/Validations_test.cpp b/src/test/consensus/Validations_test.cpp index 79de1fc8063..7dc2086e55c 100644 --- a/src/test/consensus/Validations_test.cpp +++ b/src/test/consensus/Validations_test.cpp @@ -613,7 +613,8 @@ class Validations_test : public beast::unit_test::suite c.setLoadFee(12); e.setLoadFee(12); - hash_map> trustedValidations; + hash_map, std::vector> + trustedValidations; //---------------------------------------------------------------------- // checkers @@ -624,14 +625,15 @@ class Validations_test : public beast::unit_test::suite auto compare = [&]() { for (auto& it : trustedValidations) { - auto const& id = it.first; + auto const& id = it.first.first; + auto const& seq = it.first.second; auto const& expectedValidations = it.second; BEAST_EXPECT( harness.vals().numTrustedForLedger(id) == expectedValidations.size()); BEAST_EXPECT( - sorted(harness.vals().getTrustedForLedger(id)) == + sorted(harness.vals().getTrustedForLedger(id, seq)) == sorted(expectedValidations)); std::uint32_t baseFee = 0; @@ -653,7 +655,7 @@ class Validations_test : public beast::unit_test::suite Ledger ledgerAC = h["ac"]; // Add a dummy ID to cover unknown ledger identifiers - trustedValidations[Ledger::ID{100}] = {}; + trustedValidations[{Ledger::ID{100}, Ledger::Seq{100}}] = {}; // first round a,b,c agree for (auto const& node : {a, b, c}) @@ -661,13 +663,14 @@ class Validations_test : public beast::unit_test::suite auto const val = node.validate(ledgerA); BEAST_EXPECT(ValStatus::current == harness.add(val)); if (val.trusted()) - trustedValidations[val.ledgerID()].emplace_back(val); + trustedValidations[{val.ledgerID(), val.seq()}].emplace_back( + val); } // d disagrees { auto const val = d.validate(ledgerB); BEAST_EXPECT(ValStatus::current == harness.add(val)); - trustedValidations[val.ledgerID()].emplace_back(val); + trustedValidations[{val.ledgerID(), val.seq()}].emplace_back(val); } // e only issues partials { @@ -681,7 +684,8 @@ class Validations_test : public beast::unit_test::suite auto const val = node.validate(ledgerAC); BEAST_EXPECT(ValStatus::current == harness.add(val)); if (val.trusted()) - trustedValidations[val.ledgerID()].emplace_back(val); + trustedValidations[{val.ledgerID(), val.seq()}].emplace_back( + val); } // d now thinks ledger 1, but cannot re-issue a previously used seq // and attempting it should generate a conflict. @@ -1035,6 +1039,9 @@ class Validations_test : public beast::unit_test::suite std::vector const& trustedVals) { Ledger::ID testID = trustedVals.empty() ? this->genesisLedger.id() : trustedVals[0].ledgerID(); + Ledger::Seq testSeq = trustedVals.empty() + ? this->genesisLedger.seq() + : trustedVals[0].seq(); BEAST_EXPECT(vals.currentTrusted() == trustedVals); BEAST_EXPECT(vals.getCurrentNodeIDs() == listed); BEAST_EXPECT( @@ -1046,7 +1053,8 @@ class Validations_test : public beast::unit_test::suite else BEAST_EXPECT( vals.getPreferred(this->genesisLedger)->second == testID); - BEAST_EXPECT(vals.getTrustedForLedger(testID) == trustedVals); + BEAST_EXPECT( + vals.getTrustedForLedger(testID, testSeq) == trustedVals); BEAST_EXPECT( vals.numTrustedForLedger(testID) == trustedVals.size()); }; From 60c276d90bbc7ca6a88379fadf7dfdfae8f673a9 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 22 Feb 2023 21:54:51 -0600 Subject: [PATCH 109/508] Fix Conan version constraint in workflows (#4430) Conan 2.0 is not compatible with the current workflows. --- .github/workflows/nix.yml | 2 +- .github/workflows/windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index ca351354d43..2aa22bfd776 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -44,7 +44,7 @@ jobs: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ hashFiles('.github/workflows/nix.yml') }} - name: install Conan - run: pip install wheel 'conan>=1.52.0' + run: pip install wheel 'conan~=1.52' - name: check environment run: | echo ${PATH} | tr ':' '\n' diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 637eba4ea51..b50f9ab550a 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -46,7 +46,7 @@ jobs: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }} - name: install Conan - run: pip install wheel 'conan>=1.52.0' + run: pip install wheel 'conan~=1.52' - name: check environment run: | $env:PATH -split ';' From c2b03fecca19a304b37467b01fa78593d3dce3fb Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Thu, 23 Feb 2023 12:41:18 -0800 Subject: [PATCH 110/508] Remove recipe for RocksDB and add recipe for Snappy (#4431) - Copies the recipe for Snappy from Conan Center, but removes three lines that explicitly link the standard library, which prevents builders from statically linking it. - Removes the recipe for RocksDB now that an official recipe for version 6.27.3 is in Conan Center. Developers will likely need to remove cached versions of both RocksDB and Snappy: ``` conan remove -f rocksdb conan remove -f snappy ``` --------- Co-authored-by: John Freeman --- .github/workflows/nix.yml | 4 +- .github/workflows/windows.yml | 4 +- BUILD.md | 10 +- external/rocksdb/conanfile.py | 193 ------------------ external/rocksdb/thirdparty.inc | 62 ------ external/snappy/conandata.yml | 16 ++ external/snappy/conanfile.py | 89 ++++++++ .../1.1.9-0001-fix-inlining-failure.patch | 14 ++ .../snappy/patches/1.1.9-0002-no-Werror.patch | 12 ++ ...1.9-0003-fix-clobber-list-older-llvm.patch | 12 ++ .../patches/1.1.9-0004-rtti-by-default.patch | 20 ++ 11 files changed, 172 insertions(+), 264 deletions(-) delete mode 100644 external/rocksdb/conanfile.py delete mode 100644 external/rocksdb/thirdparty.inc create mode 100644 external/snappy/conandata.yml create mode 100644 external/snappy/conanfile.py create mode 100644 external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch create mode 100644 external/snappy/patches/1.1.9-0002-no-Werror.patch create mode 100644 external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch create mode 100644 external/snappy/patches/1.1.9-0004-rtti-by-default.patch diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 2aa22bfd776..8ccb1f52733 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -69,8 +69,8 @@ jobs: with: path: ${{ steps.conan-cache.outputs.dir }} key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/nix.yml') }} - - name: export RocksDB - run: conan export external/rocksdb + - name: export Snappy + run: conan export external/snappy snappy/1.1.9@ - name: install dependencies run: | mkdir ${build_dir} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index b50f9ab550a..51fdc5d27cb 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -69,8 +69,8 @@ jobs: with: path: ${{ steps.conan-cache.outputs.dir }} key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/windows.yml') }} - - name: export RocksDB - run: conan export external/rocksdb + - name: export Snappy + run: conan export external/snappy snappy/1.1.9@ - name: install dependencies run: | mkdir $env:build_dir diff --git a/BUILD.md b/BUILD.md index 7bb2d4d8ebb..a1376bdc5df 100644 --- a/BUILD.md +++ b/BUILD.md @@ -243,7 +243,7 @@ The first is for a single-configuration generator (e.g. Unix Makefiles) on Linux or MacOS: ``` -conan export external/rocksdb +conan export external/snappy snappy/1.1.9@ mkdir .build cd .build conan install .. --output-folder . --build missing --settings build_type=Release @@ -256,7 +256,7 @@ The second is for a multi-configuration generator (e.g. Visual Studio) on Windows: ``` -conan export external/rocksdb +conan export external/snappy snappy/1.1.9@ mkdir .build cd .build conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT @@ -270,10 +270,10 @@ cmake --build . --config Debug Now to explain the individual steps in each example: -1. Export our [Conan recipe for RocksDB](./external/rocksdb). +1. Export our [Conan recipe for Snappy](./external/snappy). - It builds version 6.27.3, which, as of July 8, 2022, - is not available in [Conan Center](https://conan.io/center/rocksdb). + It does not explicitly link the C++ standard library, + which allows us to statically link it. 1. Create a build directory (and move into it). diff --git a/external/rocksdb/conanfile.py b/external/rocksdb/conanfile.py deleted file mode 100644 index a219f4f5547..00000000000 --- a/external/rocksdb/conanfile.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import shutil -from conans import ConanFile, CMake -from conan.tools import microsoft as ms - -class RocksDB(ConanFile): - name = 'rocksdb' - version = '6.27.3' - - license = ('GPL-2.0-only', 'Apache-2.0') - url = 'https://github.com/conan-io/conan-center-index' - description = 'A library that provides an embeddable, persistent key-value store for fast storage' - topics = ('rocksdb', 'database', 'leveldb', 'facebook', 'key-value') - - settings = 'os', 'compiler', 'build_type', 'arch' - options = { - 'enable_sse': [False, 'sse42', 'avx2'], - 'fPIC': [True, False], - 'lite': [True, False], - 'shared': [True, False], - 'use_rtti': [True, False], - 'with_gflags': [True, False], - 'with_jemalloc': [True, False], - 'with_lz4': [True, False], - 'with_snappy': [True, False], - 'with_tbb': [True, False], - 'with_zlib': [True, False], - 'with_zstd': [True, False], - } - default_options = { - 'enable_sse': False, - 'fPIC': True, - 'lite': False, - 'shared': False, - 'use_rtti': False, - 'with_gflags': False, - 'with_jemalloc': False, - 'with_lz4': False, - 'with_snappy': False, - 'with_tbb': False, - 'with_zlib': False, - 'with_zstd': False, - } - - def requirements(self): - if self.options.with_gflags: - self.requires('gflags/2.2.2') - if self.options.with_jemalloc: - self.requires('jemalloc/5.2.1') - if self.options.with_lz4: - self.requires('lz4/1.9.3') - if self.options.with_snappy: - self.requires('snappy/1.1.9') - if self.options.with_tbb: - self.requires('onetbb/2020.3') - if self.options.with_zlib: - self.requires('zlib/1.2.11') - if self.options.with_zstd: - self.requires('zstd/1.5.2') - - def config_options(self): - if self.settings.os == 'Windows': - del self.options.fPIC - - def configure(self): - if self.options.shared: - del self.options.fPIC - - generators = 'cmake', 'cmake_find_package' - - scm = { - 'type': 'git', - 'url': 'https://github.com/facebook/rocksdb.git', - 'revision': 'v6.27.3', - } - - exports_sources = 'thirdparty.inc' - # For out-of-source build. - no_copy_source = True - - _cmake = None - - def _configure_cmake(self): - if self._cmake: - return - - self._cmake = CMake(self) - - self._cmake.definitions['CMAKE_POSITION_INDEPENDENT_CODE'] = True - - self._cmake.definitions['DISABLE_STALL_NOTIF'] = False - self._cmake.definitions['FAIL_ON_WARNINGS'] = False - self._cmake.definitions['OPTDBG'] = True - self._cmake.definitions['WITH_TESTS'] = False - self._cmake.definitions['WITH_TOOLS'] = False - - self._cmake.definitions['WITH_GFLAGS'] = self.options.with_gflags - self._cmake.definitions['WITH_JEMALLOC'] = self.options.with_jemalloc - self._cmake.definitions['WITH_LZ4'] = self.options.with_lz4 - self._cmake.definitions['WITH_SNAPPY'] = self.options.with_snappy - self._cmake.definitions['WITH_TBB'] = self.options.with_tbb - self._cmake.definitions['WITH_ZLIB'] = self.options.with_zlib - self._cmake.definitions['WITH_ZSTD'] = self.options.with_zstd - - self._cmake.definitions['USE_RTTI'] = self.options.use_rtti - self._cmake.definitions['ROCKSDB_LITE'] = self.options.lite - self._cmake.definitions['ROCKSDB_INSTALL_ON_WINDOWS'] = ( - self.settings.os == 'Windows' - ) - - if not self.options.enable_sse: - self._cmake.definitions['PORTABLE'] = True - self._cmake.definitions['FORCE_SSE42'] = False - elif self.options.enable_sse == 'sse42': - self._cmake.definitions['PORTABLE'] = True - self._cmake.definitions['FORCE_SSE42'] = True - elif self.options.enable_sse == 'avx2': - self._cmake.definitions['PORTABLE'] = False - self._cmake.definitions['FORCE_SSE42'] = False - - self._cmake.definitions['WITH_ASAN'] = False - self._cmake.definitions['WITH_BZ2'] = False - self._cmake.definitions['WITH_JNI'] = False - self._cmake.definitions['WITH_LIBRADOS'] = False - if ms.is_msvc(self): - self._cmake.definitions['WITH_MD_LIBRARY'] = ( - ms.msvc_runtime_flag(self).startswith('MD') - ) - self._cmake.definitions['WITH_RUNTIME_DEBUG'] = ( - ms.msvc_runtime_flag(self).endswith('d') - ) - self._cmake.definitions['WITH_NUMA'] = False - self._cmake.definitions['WITH_TSAN'] = False - self._cmake.definitions['WITH_UBSAN'] = False - self._cmake.definitions['WITH_WINDOWS_UTF8_FILENAMES'] = False - self._cmake.definitions['WITH_XPRESS'] = False - self._cmake.definitions['WITH_FALLOCATE'] = True - - - def build(self): - if ms.is_msvc(self): - file = os.path.join( - self.recipe_folder, '..', 'export_source', 'thirdparty.inc' - ) - shutil.copy(file, self.build_folder) - self._configure_cmake() - self._cmake.configure() - self._cmake.build() - - def package(self): - self._configure_cmake() - self._cmake.install() - - def package_info(self): - self.cpp_info.filenames['cmake_find_package'] = 'RocksDB' - self.cpp_info.filenames['cmake_find_package_multi'] = 'RocksDB' - self.cpp_info.set_property('cmake_file_name', 'RocksDB') - - self.cpp_info.names['cmake_find_package'] = 'RocksDB' - self.cpp_info.names['cmake_find_package_multi'] = 'RocksDB' - - self.cpp_info.components['librocksdb'].names['cmake_find_package'] = 'rocksdb' - self.cpp_info.components['librocksdb'].names['cmake_find_package_multi'] = 'rocksdb' - self.cpp_info.components['librocksdb'].set_property( - 'cmake_target_name', 'RocksDB::rocksdb' - ) - - self.cpp_info.components['librocksdb'].libs = ['rocksdb'] - - if self.settings.os == "Windows": - self.cpp_info.components["librocksdb"].system_libs = ["shlwapi", "rpcrt4"] - if self.options.shared: - self.cpp_info.components["librocksdb"].defines = ["ROCKSDB_DLL"] - elif self.settings.os in ["Linux", "FreeBSD"]: - self.cpp_info.components["librocksdb"].system_libs = ["pthread", "m"] - - if self.options.lite: - self.cpp_info.components["librocksdb"].defines.append("ROCKSDB_LITE") - - if self.options.with_gflags: - self.cpp_info.components["librocksdb"].requires.append("gflags::gflags") - if self.options.with_jemalloc: - self.cpp_info.components["librocksdb"].requires.append("jemalloc::jemalloc") - if self.options.with_lz4: - self.cpp_info.components["librocksdb"].requires.append("lz4::lz4") - if self.options.with_snappy: - self.cpp_info.components["librocksdb"].requires.append("snappy::snappy") - if self.options.with_tbb: - self.cpp_info.components["librocksdb"].requires.append("onetbb::onetbb") - if self.options.with_zlib: - self.cpp_info.components["librocksdb"].requires.append("zlib::zlib") - if self.options.with_zstd: - self.cpp_info.components["librocksdb"].requires.append("zstd::zstd") diff --git a/external/rocksdb/thirdparty.inc b/external/rocksdb/thirdparty.inc deleted file mode 100644 index fce11784b92..00000000000 --- a/external/rocksdb/thirdparty.inc +++ /dev/null @@ -1,62 +0,0 @@ -if(WITH_GFLAGS) - # Config with namespace available since gflags 2.2.2 - find_package(gflags REQUIRED) - set(GFLAGS_LIB gflags::gflags) - list(APPEND THIRDPARTY_LIBS ${GFLAGS_LIB}) - add_definitions(-DGFLAGS=1) -endif() - -if(WITH_SNAPPY) - find_package(Snappy REQUIRED) - add_definitions(-DSNAPPY) - list(APPEND THIRDPARTY_LIBS Snappy::snappy) -endif() - -if(WITH_LZ4) - find_package(lz4 REQUIRED) - add_definitions(-DLZ4) - list(APPEND THIRDPARTY_LIBS lz4::lz4) -endif() - -if(WITH_ZLIB) - find_package(ZLIB REQUIRED) - add_definitions(-DZLIB) - list(APPEND THIRDPARTY_LIBS ZLIB::ZLIB) -endif() - -option(WITH_BZ2 "build with bzip2" OFF) -if(WITH_BZ2) - find_package(BZip2 REQUIRED) - add_definitions(-DBZIP2) - list(APPEND THIRDPARTY_LIBS BZip2::BZip2) -endif() - -if(WITH_ZSTD) - find_package(zstd REQUIRED) - add_definitions(-DZSTD) - list(APPEND THIRDPARTY_LIBS zstd::zstd) -endif() - -# ================================================== XPRESS ================================================== -# This makes use of built-in Windows API, no additional includes, links to a system lib - -if(WITH_XPRESS) - message(STATUS "XPRESS is enabled") - add_definitions(-DXPRESS) - # We are using the implementation provided by the system - list(APPEND SYSTEM_LIBS Cabinet.lib) -else() - message(STATUS "XPRESS is disabled") -endif() - -# ================================================== JEMALLOC ================================================== -if(WITH_JEMALLOC) - message(STATUS "JEMALLOC library is enabled") - add_definitions(-DROCKSDB_JEMALLOC -DJEMALLOC_EXPORT= -DJEMALLOC_NO_RENAME) - list(APPEND THIRDPARTY_LIBS jemalloc::jemalloc) - set(ARTIFACT_SUFFIX "_je") - -else () - set(ARTIFACT_SUFFIX "") - message(STATUS "JEMALLOC library is disabled") -endif () diff --git a/external/snappy/conandata.yml b/external/snappy/conandata.yml new file mode 100644 index 00000000000..6c12aed54a1 --- /dev/null +++ b/external/snappy/conandata.yml @@ -0,0 +1,16 @@ +sources: + "1.1.9": + url: "https://github.com/google/snappy/archive/1.1.9.tar.gz" + sha256: "75c1fbb3d618dd3a0483bff0e26d0a92b495bbe5059c8b4f1c962b478b6e06e7" + "1.1.8": + url: "https://github.com/google/snappy/archive/1.1.8.tar.gz" + sha256: "16b677f07832a612b0836178db7f374e414f94657c138e6993cbfc5dcc58651f" + "1.1.7": + url: "https://github.com/google/snappy/archive/1.1.7.tar.gz" + sha256: "3dfa02e873ff51a11ee02b9ca391807f0c8ea0529a4924afa645fbf97163f9d4" +patches: + "1.1.9": + - patch_file: "patches/1.1.9-0001-fix-inlining-failure.patch" + - patch_file: "patches/1.1.9-0002-no-Werror.patch" + - patch_file: "patches/1.1.9-0003-fix-clobber-list-older-llvm.patch" + - patch_file: "patches/1.1.9-0004-rtti-by-default.patch" diff --git a/external/snappy/conanfile.py b/external/snappy/conanfile.py new file mode 100644 index 00000000000..23558639f46 --- /dev/null +++ b/external/snappy/conanfile.py @@ -0,0 +1,89 @@ +from conan import ConanFile +from conan.tools.build import check_min_cppstd +from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout +from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir +from conan.tools.scm import Version +import os + +required_conan_version = ">=1.54.0" + + +class SnappyConan(ConanFile): + name = "snappy" + description = "A fast compressor/decompressor" + topics = ("google", "compressor", "decompressor") + url = "https://github.com/conan-io/conan-center-index" + homepage = "https://github.com/google/snappy" + license = "BSD-3-Clause" + + package_type = "library" + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + } + + def export_sources(self): + export_conandata_patches(self) + + def config_options(self): + if self.settings.os == 'Windows': + del self.options.fPIC + + def configure(self): + if self.options.shared: + self.options.rm_safe("fPIC") + + def layout(self): + cmake_layout(self, src_folder="src") + + def validate(self): + if self.settings.compiler.get_safe("cppstd"): + check_min_cppstd(self, 11) + + def source(self): + get(self, **self.conan_data["sources"][self.version], strip_root=True) + + def generate(self): + tc = CMakeToolchain(self) + tc.variables["SNAPPY_BUILD_TESTS"] = False + if Version(self.version) >= "1.1.8": + tc.variables["SNAPPY_FUZZING_BUILD"] = False + tc.variables["SNAPPY_REQUIRE_AVX"] = False + tc.variables["SNAPPY_REQUIRE_AVX2"] = False + tc.variables["SNAPPY_INSTALL"] = True + if Version(self.version) >= "1.1.9": + tc.variables["SNAPPY_BUILD_BENCHMARKS"] = False + tc.generate() + + def build(self): + apply_conandata_patches(self) + cmake = CMake(self) + cmake.configure() + cmake.build() + + def package(self): + copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) + cmake = CMake(self) + cmake.install() + rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) + + def package_info(self): + self.cpp_info.set_property("cmake_file_name", "Snappy") + self.cpp_info.set_property("cmake_target_name", "Snappy::snappy") + # TODO: back to global scope in conan v2 once cmake_find_package* generators removed + self.cpp_info.components["snappylib"].libs = ["snappy"] + if not self.options.shared: + if self.settings.os in ["Linux", "FreeBSD"]: + self.cpp_info.components["snappylib"].system_libs.append("m") + + # TODO: to remove in conan v2 once cmake_find_package* generators removed + self.cpp_info.names["cmake_find_package"] = "Snappy" + self.cpp_info.names["cmake_find_package_multi"] = "Snappy" + self.cpp_info.components["snappylib"].names["cmake_find_package"] = "snappy" + self.cpp_info.components["snappylib"].names["cmake_find_package_multi"] = "snappy" + self.cpp_info.components["snappylib"].set_property("cmake_target_name", "Snappy::snappy") diff --git a/external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch b/external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch new file mode 100644 index 00000000000..cdc119c0d58 --- /dev/null +++ b/external/snappy/patches/1.1.9-0001-fix-inlining-failure.patch @@ -0,0 +1,14 @@ +Fixes the following error: +error: inlining failed in call to ‘always_inline’ ‘size_t snappy::AdvanceToNextTag(const uint8_t**, size_t*)’: function body can be overwritten at link time + +--- snappy-stubs-internal.h ++++ snappy-stubs-internal.h +@@ -100,7 +100,7 @@ + + // Inlining hints. + #ifdef HAVE_ATTRIBUTE_ALWAYS_INLINE +-#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) ++#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE + #else + #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE + #endif diff --git a/external/snappy/patches/1.1.9-0002-no-Werror.patch b/external/snappy/patches/1.1.9-0002-no-Werror.patch new file mode 100644 index 00000000000..d86e4e0a9df --- /dev/null +++ b/external/snappy/patches/1.1.9-0002-no-Werror.patch @@ -0,0 +1,12 @@ +--- CMakeLists.txt ++++ CMakeLists.txt +@@ -69,7 +69,7 @@ +- # Use -Werror for clang only. ++if(0) + if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + if(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") + endif(NOT CMAKE_CXX_FLAGS MATCHES "-Werror") + endif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") +- ++endif() diff --git a/external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch b/external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch new file mode 100644 index 00000000000..84bc674fdd5 --- /dev/null +++ b/external/snappy/patches/1.1.9-0003-fix-clobber-list-older-llvm.patch @@ -0,0 +1,12 @@ +asm clobbers do not work for clang < 9 and apple-clang < 11 (found by SpaceIm) +--- snappy.cc ++++ snappy.cc +@@ -1026,7 +1026,7 @@ + size_t literal_len = *tag >> 2; + size_t tag_type = *tag; + bool is_literal; +-#if defined(__GNUC__) && defined(__x86_64__) ++#if defined(__GNUC__) && defined(__x86_64__) && ( (!defined(__clang__) && !defined(__APPLE__)) || (!defined(__APPLE__) && defined(__clang__) && (__clang_major__ >= 9)) || (defined(__APPLE__) && defined(__clang__) && (__clang_major__ > 11)) ) + // TODO clang misses the fact that the (c & 3) already correctly + // sets the zero flag. + asm("and $3, %k[tag_type]\n\t" diff --git a/external/snappy/patches/1.1.9-0004-rtti-by-default.patch b/external/snappy/patches/1.1.9-0004-rtti-by-default.patch new file mode 100644 index 00000000000..c353a489d0e --- /dev/null +++ b/external/snappy/patches/1.1.9-0004-rtti-by-default.patch @@ -0,0 +1,20 @@ +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -53,8 +53,6 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + add_definitions(-D_HAS_EXCEPTIONS=0) + + # Disable RTTI. +- string(REGEX REPLACE "/GR" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") +- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /GR-") + else(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # Use -Wall for clang and gcc. + if(NOT CMAKE_CXX_FLAGS MATCHES "-Wall") +@@ -78,8 +76,6 @@ endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions") + + # Disable RTTI. +- string(REGEX REPLACE "-frtti" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") +- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti") + endif(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + + # BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to make From caf4827c0b47fa04b3c25c5ab0de42e1c3db7715 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Fri, 24 Feb 2023 08:41:28 -0800 Subject: [PATCH 111/508] Undo API changes introduced in XRPFees: (#4429) * Original changes: https://github.com/XRPLF/rippled/commit/e4b17d1cf2c43e33128dd70b6036fe10f38b0c0d * Resolve #4425 --- src/ripple/app/misc/NetworkOPs.cpp | 7 +++---- src/ripple/protocol/jss.h | 3 --- src/test/rpc/Subscribe_test.cpp | 19 ++----------------- 3 files changed, 5 insertions(+), 24 deletions(-) diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 610b8e71adc..d6816b1d420 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -2184,16 +2184,15 @@ NetworkOPsImp::pubValidation(std::shared_ptr const& val) // simplifies later operations) if (auto const baseFeeXRP = ~val->at(~sfBaseFeeDrops); baseFeeXRP && baseFeeXRP->native()) - jvObj[jss::base_fee_drops] = baseFeeXRP->xrp().jsonClipped(); + jvObj[jss::base_fee] = baseFeeXRP->xrp().jsonClipped(); if (auto const reserveBaseXRP = ~val->at(~sfReserveBaseDrops); reserveBaseXRP && reserveBaseXRP->native()) - jvObj[jss::reserve_base_drops] = - reserveBaseXRP->xrp().jsonClipped(); + jvObj[jss::reserve_base] = reserveBaseXRP->xrp().jsonClipped(); if (auto const reserveIncXRP = ~val->at(~sfReserveIncrementDrops); reserveIncXRP && reserveIncXRP->native()) - jvObj[jss::reserve_inc_drops] = reserveIncXRP->xrp().jsonClipped(); + jvObj[jss::reserve_inc] = reserveIncXRP->xrp().jsonClipped(); for (auto i = mStreamMaps[sValidations].begin(); i != mStreamMaps[sValidations].end();) diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 01e30dd9327..1c5bf8463b0 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -151,7 +151,6 @@ JSS(balance); // out: AccountLines JSS(balances); // out: GatewayBalances JSS(base); // out: LogLevel JSS(base_fee); // out: NetworkOPs -JSS(base_fee_drops); // out: NetworkOPs JSS(base_fee_xrp); // out: NetworkOPs JSS(bids); // out: Subscribe JSS(binary); // in: AccountTX, LedgerEntry, @@ -496,10 +495,8 @@ JSS(request); // RPC JSS(requested); // out: Manifest JSS(reservations); // out: Reservations JSS(reserve_base); // out: NetworkOPs -JSS(reserve_base_drops); // out: NetworkOPs JSS(reserve_base_xrp); // out: NetworkOPs JSS(reserve_inc); // out: NetworkOPs -JSS(reserve_inc_drops); // out: NetworkOPs JSS(reserve_inc_xrp); // out: NetworkOPs JSS(response); // websocket JSS(result); // RPC diff --git a/src/test/rpc/Subscribe_test.cpp b/src/test/rpc/Subscribe_test.cpp index 783f5eb7e38..3bb0cce611c 100644 --- a/src/test/rpc/Subscribe_test.cpp +++ b/src/test/rpc/Subscribe_test.cpp @@ -411,25 +411,10 @@ class Subscribe_test : public beast::unit_test::suite if (jv.isMember(jss::server_version) != isFlagLedger) return false; - bool xrpFees = env.closed()->rules().enabled(featureXRPFees); - if ((!xrpFees && - jv.isMember(jss::reserve_base) != isFlagLedger) || - (xrpFees && jv.isMember(jss::reserve_base))) + if (jv.isMember(jss::reserve_base) != isFlagLedger) return false; - if ((!xrpFees && - jv.isMember(jss::reserve_inc) != isFlagLedger) || - (xrpFees && jv.isMember(jss::reserve_inc))) - return false; - - if ((xrpFees && - jv.isMember(jss::reserve_base_drops) != isFlagLedger) || - (!xrpFees && jv.isMember(jss::reserve_base_drops))) - return false; - - if ((xrpFees && - jv.isMember(jss::reserve_inc_drops) != isFlagLedger) || - (!xrpFees && jv.isMember(jss::reserve_inc_drops))) + if (jv.isMember(jss::reserve_inc) != isFlagLedger) return false; return true; From 74594d5348c22276611cf7f70f560d70534ab44e Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 28 Feb 2023 14:12:28 -0800 Subject: [PATCH 112/508] Disable duplicate detector: (#4438) Partially revert the functionality introduced with #4195 / 5a15229 (part of 1.10.0-b1). Acknowledgements: Aaron Hook for responsibly disclosing this issue. Bug Bounties and Responsible Disclosures: We welcome reviews of the rippled code and urge researchers to responsibly disclose any issues they may find. To report a bug, please send a detailed report to: bugs@xrpl.org --------- Co-authored-by: Nik Bougalis --- src/ripple/overlay/impl/Handshake.cpp | 33 +++------------------------ 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/src/ripple/overlay/impl/Handshake.cpp b/src/ripple/overlay/impl/Handshake.cpp index 793dec19eef..9fe025787f8 100644 --- a/src/ripple/overlay/impl/Handshake.cpp +++ b/src/ripple/overlay/impl/Handshake.cpp @@ -301,36 +301,6 @@ verifyHandshake( throw std::runtime_error("Bad node public key"); }(); - if (publicKey == app.nodeIdentity().first) - { - auto const peerInstanceID = [&headers]() { - std::uint64_t iid = 0; - - if (auto const iter = headers.find("Instance-Cookie"); - iter != headers.end()) - { - if (!beast::lexicalCastChecked(iid, iter->value().to_string())) - throw std::runtime_error("Invalid instance cookie"); - - if (iid == 0) - throw std::runtime_error("Invalid instance cookie"); - } - - return iid; - }(); - - // Attempt to differentiate self-connections as opposed to accidental - // node identity reuse caused by accidental misconfiguration. When we - // detect this, we stop the process and log an error message. - if (peerInstanceID != app.instanceID()) - { - app.signalStop("Remote server is using our node identity"); - throw std::runtime_error("Node identity reuse detected"); - } - - throw std::runtime_error("Self connection"); - } - // This check gets two birds with one stone: // // 1) it verifies that the node we are talking to has access to the @@ -349,6 +319,9 @@ verifyHandshake( throw std::runtime_error("Failed to verify session"); } + if (publicKey == app.nodeIdentity().first) + throw std::runtime_error("Self connection"); + if (auto const iter = headers.find("Local-IP"); iter != headers.end()) { boost::system::error_code ec; From e13676f709c5f2edb21bf8527356dc60a0090970 Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 28 Feb 2023 14:17:07 -0800 Subject: [PATCH 113/508] Update package building scripts and images to use Conan (#4435) * Set version to 1.10.0-rc3 * Test on Fedora 37 * Dependency builds are handled by Conan --- .gitignore | 3 + Builds/CMake/RippledRelease.cmake | 39 +--- Builds/containers/centos-builder/Dockerfile | 29 +-- .../containers/centos-builder/centos_setup.sh | 25 +-- Builds/containers/centos-builder/extras.sh | 33 ---- Builds/containers/gitlab-ci/pkgbuild.yml | 164 ++-------------- .../gitlab-ci/push_to_artifactory.sh | 9 +- Builds/containers/gitlab-ci/smoketest.sh | 6 +- .../containers/packaging/dpkg/build_dpkg.sh | 10 +- .../containers/packaging/dpkg/debian/control | 8 - .../packaging/dpkg/debian/rippled.install | 2 +- Builds/containers/packaging/dpkg/debian/rules | 45 +++-- Builds/containers/packaging/rpm/build_rpm.sh | 7 +- Builds/containers/packaging/rpm/rippled.spec | 70 ++++++- Builds/containers/shared/build_deps.sh | 147 -------------- Builds/containers/shared/install_boost.sh | 93 --------- Builds/containers/shared/install_cmake.sh | 11 +- Builds/containers/ubuntu-builder/Dockerfile | 20 +- .../containers/ubuntu-builder/ubuntu_setup.sh | 181 +++++------------- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 20 files changed, 211 insertions(+), 693 deletions(-) delete mode 100755 Builds/containers/centos-builder/extras.sh delete mode 100755 Builds/containers/shared/build_deps.sh delete mode 100755 Builds/containers/shared/install_boost.sh diff --git a/.gitignore b/.gitignore index 5c8b2443700..352d68e735e 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,6 @@ compile_commands.json .clangd packages pkg_out +pkg +CMakeUserPresets.json +bld.rippled/ diff --git a/Builds/CMake/RippledRelease.cmake b/Builds/CMake/RippledRelease.cmake index 2b1cf3666fc..8e4dcfcd6d0 100644 --- a/Builds/CMake/RippledRelease.cmake +++ b/Builds/CMake/RippledRelease.cmake @@ -2,12 +2,6 @@ package/container targets - (optional) #]===================================================================] -# Early return if the `containers` directory is missing, -# e.g. when we are building a Conan package. -if(NOT EXISTS containers) - return() -endif() - if (is_root_project) if (NOT DOCKER) find_program (DOCKER docker) @@ -22,19 +16,7 @@ if (is_root_project) message (STATUS "using [${container_label}] as build container tag...") file (MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages) - if (is_linux) - execute_process (COMMAND id -u - OUTPUT_VARIABLE DOCKER_USER_ID - OUTPUT_STRIP_TRAILING_WHITESPACE) - message (STATUS "docker local user id: ${DOCKER_USER_ID}") - execute_process (COMMAND id -g - OUTPUT_VARIABLE DOCKER_GROUP_ID - OUTPUT_STRIP_TRAILING_WHITESPACE) - message (STATUS "docker local group id: ${DOCKER_GROUP_ID}") - endif () - if (DOCKER_USER_ID AND DOCKER_GROUP_ID) - set(map_user TRUE) - endif () + #[===================================================================[ rpm #]===================================================================] @@ -42,7 +24,7 @@ if (is_root_project) docker build --pull --build-arg GIT_COMMIT=${commit_hash} - -t rippled-rpm-builder:${container_label} + -t rippleci/rippled-rpm-builder:${container_label} $<$:--cache-from=${rpm_cache_from}> -f centos-builder/Dockerfile . WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/Builds/containers @@ -52,12 +34,10 @@ if (is_root_project) SOURCES Builds/containers/centos-builder/Dockerfile Builds/containers/centos-builder/centos_setup.sh - Builds/containers/centos-builder/extras.sh Builds/containers/shared/update-rippled.sh Builds/containers/shared/update_sources.sh Builds/containers/shared/rippled.service Builds/containers/shared/rippled-reporting.service - Builds/containers/shared/build_deps.sh Builds/containers/packaging/rpm/rippled.spec Builds/containers/packaging/rpm/build_rpm.sh Builds/containers/packaging/rpm/50-rippled.preset @@ -69,8 +49,7 @@ if (is_root_project) docker run -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out - "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" - -t rippled-rpm-builder:${container_label} + -t rippleci/rippled-rpm-builder:${container_label} /bin/bash -c "cp -fpu rippled/Builds/containers/packaging/rpm/build_rpm.sh . && ./build_rpm.sh" VERBATIM USES_TERMINAL @@ -92,9 +71,9 @@ if (is_root_project) add_custom_target (dpkg_container docker build --pull - --build-arg DIST_TAG=18.04 + --build-arg DIST_TAG=20.04 --build-arg GIT_COMMIT=${commit_hash} - -t rippled-dpkg-builder:${container_label} + -t rippleci/rippled-dpkg-builder:${container_label} $<$:--cache-from=${dpkg_cache_from}> -f ubuntu-builder/Dockerfile . WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/Builds/containers @@ -128,10 +107,8 @@ if (is_root_project) Builds/containers/ubuntu-builder/ubuntu_setup.sh bin/getRippledInfo Builds/containers/shared/install_cmake.sh - Builds/containers/shared/install_boost.sh Builds/containers/shared/update-rippled.sh Builds/containers/shared/update_sources.sh - Builds/containers/shared/build_deps.sh Builds/containers/shared/rippled.service Builds/containers/shared/rippled-reporting.service Builds/containers/shared/rippled-logrotate @@ -142,8 +119,7 @@ if (is_root_project) docker run -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out - "$<$:--volume=/etc/passwd:/etc/passwd;--volume=/etc/group:/etc/group;--user=${DOCKER_USER_ID}:${DOCKER_GROUP_ID}>" - -t rippled-dpkg-builder:${container_label} + -t rippleci/rippled-dpkg-builder:${container_label} /bin/bash -c "cp -fpu rippled/Builds/containers/packaging/dpkg/build_dpkg.sh . && ./build_dpkg.sh" VERBATIM USES_TERMINAL @@ -183,7 +159,7 @@ if (is_root_project) add_custom_target (ci_container docker build --pull - --build-arg DIST_TAG=18.04 + --build-arg DIST_TAG=20.04 --build-arg GIT_COMMIT=${commit_hash} --build-arg CI_USE=true -t rippled-ci-builder:${container_label} @@ -196,7 +172,6 @@ if (is_root_project) SOURCES Builds/containers/ubuntu-builder/Dockerfile Builds/containers/ubuntu-builder/ubuntu_setup.sh - Builds/containers/shared/build_deps.sh ) exclude_from_default (ci_container) else () diff --git a/Builds/containers/centos-builder/Dockerfile b/Builds/containers/centos-builder/Dockerfile index 26da564b525..ef5f80f7bfd 100644 --- a/Builds/containers/centos-builder/Dockerfile +++ b/Builds/containers/centos-builder/Dockerfile @@ -1,43 +1,26 @@ -FROM centos:7 +FROM rippleci/centos:7 ARG GIT_COMMIT=unknown ARG CI_USE=false LABEL git-commit=$GIT_COMMIT COPY centos-builder/centos_setup.sh /tmp/ -COPY shared/build_deps.sh /tmp/ COPY shared/install_cmake.sh /tmp/ -COPY centos-builder/extras.sh /tmp/ -COPY shared/install_boost.sh /tmp/ RUN chmod +x /tmp/centos_setup.sh && \ - chmod +x /tmp/build_deps.sh && \ - chmod +x /tmp/install_boost.sh && \ - chmod +x /tmp/install_cmake.sh && \ - chmod +x /tmp/extras.sh + chmod +x /tmp/install_cmake.sh RUN /tmp/centos_setup.sh -RUN /tmp/install_cmake.sh 3.16.1 /opt/local/cmake-3.16 +RUN /tmp/install_cmake.sh 3.16.3 /opt/local/cmake-3.16 RUN ln -s /opt/local/cmake-3.16 /opt/local/cmake ENV PATH="/opt/local/cmake/bin:$PATH" -# also install min supported cmake for testing -RUN if [ "${CI_USE}" = true ] ; then /tmp/install_cmake.sh 3.9.0 /opt/local/cmake-3.9; fi +# TODO: Install latest CMake for testing +RUN if [ "${CI_USE}" = true ] ; then /tmp/install_cmake.sh 3.16.3 /opt/local/cmake-3.16; fi -RUN source scl_source enable devtoolset-7 python27 && \ - /tmp/build_deps.sh -ENV BOOST_ROOT="/opt/local/boost/_INSTALLED_" -ENV PLANTUML_JAR="/opt/plantuml/plantuml.jar" -ENV OPENSSL_ROOT="/opt/local/openssl" -ENV GDB_ROOT="/opt/local/gdb" -RUN source scl_source enable devtoolset-7 python27 && \ - /tmp/extras.sh - -# prep files for package building RUN mkdir -m 777 -p /opt/rippled_bld/pkg + WORKDIR /opt/rippled_bld/pkg RUN mkdir -m 777 ./rpmbuild RUN mkdir -m 777 ./rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS} COPY packaging/rpm/build_rpm.sh ./ CMD ./build_rpm.sh - - diff --git a/Builds/containers/centos-builder/centos_setup.sh b/Builds/containers/centos-builder/centos_setup.sh index a81226c2dd2..ea110058bb8 100755 --- a/Builds/containers/centos-builder/centos_setup.sh +++ b/Builds/containers/centos-builder/centos_setup.sh @@ -7,31 +7,16 @@ yum -y upgrade yum -y update yum -y install epel-release centos-release-scl yum -y install \ - wget curl time gcc-c++ time yum-utils autoconf automake pkgconfig libtool \ + wget curl time gcc-c++ yum-utils autoconf automake pkgconfig libtool \ libstdc++-static rpm-build gnupg which make cmake \ - devtoolset-7 devtoolset-7-gdb devtoolset-7-libasan-devel devtoolset-7-libtsan-devel devtoolset-7-libubsan-devel \ - devtoolset-8 devtoolset-8-gdb devtoolset-8-binutils devtoolset-8-libstdc++-devel \ - devtoolset-8-libasan-devel devtoolset-8-libtsan-devel devtoolset-8-libubsan-devel devtoolset-8-liblsan-devel \ + devtoolset-11 devtoolset-11-gdb devtoolset-11-binutils devtoolset-11-libstdc++-devel \ + devtoolset-11-libasan-devel devtoolset-11-libtsan-devel devtoolset-11-libubsan-devel devtoolset-11-liblsan-devel \ flex flex-devel bison bison-devel parallel \ ncurses ncurses-devel ncurses-libs graphviz graphviz-devel \ lzip p7zip bzip2 bzip2-devel lzma-sdk lzma-sdk-devel xz-devel \ zlib zlib-devel zlib-static texinfo openssl openssl-static \ jemalloc jemalloc-devel \ libicu-devel htop \ - python27-python rh-python35-python \ - python-devel python27-python-devel rh-python35-python-devel \ - python27 rh-python35 \ + rh-python38 \ ninja-build git svn \ - swig perl-Digest-MD5 python2-pip - -if [ "${CI_USE}" = true ] ; then - # TODO need permanent link - yum -y install ftp://ftp.pbone.net/mirror/archive.fedoraproject.org/fedora-secondary/updates/26/i386/Packages/p/python2-six-1.10.0-9.fc26.noarch.rpm - - yum -y install \ - llvm-toolset-7 llvm-toolset-7-runtime llvm-toolset-7-build llvm-toolset-7-clang \ - llvm-toolset-7-clang-analyzer llvm-toolset-7-clang-devel llvm-toolset-7-clang-libs \ - llvm-toolset-7-clang-tools-extra llvm-toolset-7-compiler-rt llvm-toolset-7-lldb \ - llvm-toolset-7-lldb-devel llvm-toolset-7-python-lldb - -fi + swig perl-Digest-MD5 diff --git a/Builds/containers/centos-builder/extras.sh b/Builds/containers/centos-builder/extras.sh deleted file mode 100755 index 8db373d0ca6..00000000000 --- a/Builds/containers/centos-builder/extras.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -set -ex - -if [ "${CI_USE}" = true ] ; then - cd /tmp - wget https://ftp.gnu.org/gnu/gdb/gdb-8.3.1.tar.xz - tar xf gdb-8.3.1.tar.xz - cd gdb-8.3 - ./configure CFLAGS="-w -O2" CXXFLAGS="-std=gnu++11 -g -O2 -w" --prefix=/opt/local/gdb-8.3 - make -j$(nproc) - make install - ln -s /opt/local/gdb-8.3 /opt/local/gdb - cd .. - rm -f gdb-8.3.tar.xz - rm -rf gdb-8.3 - - # clang from source - cd /tmp - git clone https://github.com/llvm/llvm-project.git - cd llvm-project - git checkout llvmorg-9.0.0 - INSTALL=/opt/llvm-9/ - mkdir mybuilddir && cd mybuilddir - # TODO figure out necessary options - cmake ../llvm -G Ninja \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_ENABLE_PROJECTS='clang;clang-tools-extra;libcxx;libcxxabi;lldb;compiler-rt;lld;polly' \ - -DCMAKE_INSTALL_PREFIX=${INSTALL} \ - -DLLVM_LIBDIR_SUFFIX=64 - cmake --build . --parallel --target install - cd /tmp - rm -rf llvm-project -fi diff --git a/Builds/containers/gitlab-ci/pkgbuild.yml b/Builds/containers/gitlab-ci/pkgbuild.yml index 53828ab779c..8a602c84cdc 100644 --- a/Builds/containers/gitlab-ci/pkgbuild.yml +++ b/Builds/containers/gitlab-ci/pkgbuild.yml @@ -11,11 +11,11 @@ variables: # these containers are built manually using the rippled # cmake build (container targets) and tagged/pushed so they # can be used here - RPM_CONTAINER_TAG: "2020-02-10" - RPM_CONTAINER_NAME: "rippled-rpm-builder" + RPM_CONTAINER_TAG: "2023-02-13" + RPM_CONTAINER_NAME: "rippleci/rippled-rpm-builder" RPM_CONTAINER_FULLNAME: "${RPM_CONTAINER_NAME}:${RPM_CONTAINER_TAG}" - DPKG_CONTAINER_TAG: "2020-02-10" - DPKG_CONTAINER_NAME: "rippled-dpkg-builder" + DPKG_CONTAINER_TAG: "2023-02-13" + DPKG_CONTAINER_NAME: "rippleci/rippled-dpkg-builder" DPKG_CONTAINER_FULLNAME: "${DPKG_CONTAINER_NAME}:${DPKG_CONTAINER_TAG}" ARTIFACTORY_HOST: "artifactory.ops.ripple.com" ARTIFACTORY_HUB: "${ARTIFACTORY_HOST}:6555" @@ -83,6 +83,7 @@ stages: ######################################################################### rpm_build: + timeout: "1h 30m" stage: build_packages <<: *dind_param artifacts: @@ -92,6 +93,7 @@ rpm_build: - . ./Builds/containers/gitlab-ci/build_package.sh rpm dpkg_build: + timeout: "1h 30m" stage: build_packages <<: *dind_param artifacts: @@ -179,94 +181,54 @@ centos_7_smoketest: stage: smoketest dependencies: - rpm_build - - rpm_sign image: name: artifactory.ops.ripple.com/centos:7 <<: *run_local_smoketest -# TODO: Remove "allow_failure" when tests fixed rocky_8_smoketest: stage: smoketest dependencies: - rpm_build - - rpm_sign image: name: rockylinux/rockylinux:8 <<: *run_local_smoketest - allow_failure: true -fedora_34_smoketest: +fedora_37_smoketest: stage: smoketest dependencies: - rpm_build - - rpm_sign image: - name: artifactory.ops.ripple.com/fedora:34 + name: artifactory.ops.ripple.com/fedora:37 <<: *run_local_smoketest - allow_failure: true -fedora_35_smoketest: +fedora_38_smoketest: stage: smoketest dependencies: - rpm_build - - rpm_sign - image: - name: artifactory.ops.ripple.com/fedora:35 - <<: *run_local_smoketest - allow_failure: true - -ubuntu_18_smoketest: - stage: smoketest - dependencies: - - dpkg_build - - dpkg_sign image: - name: artifactory.ops.ripple.com/ubuntu:18.04 + name: artifactory.ops.ripple.com/fedora:38 <<: *run_local_smoketest ubuntu_20_smoketest: stage: smoketest dependencies: - dpkg_build - - dpkg_sign image: name: artifactory.ops.ripple.com/ubuntu:20.04 <<: *run_local_smoketest -# TODO: remove "allow_failure" when 22.04 released in 4/2022... ubuntu_22_smoketest: stage: smoketest dependencies: - dpkg_build - - dpkg_sign image: name: artifactory.ops.ripple.com/ubuntu:22.04 <<: *run_local_smoketest - allow_failure: true - -debian_9_smoketest: - stage: smoketest - dependencies: - - dpkg_build - - dpkg_sign - image: - name: artifactory.ops.ripple.com/debian:9 - <<: *run_local_smoketest - -debian_10_smoketest: - stage: smoketest - dependencies: - - dpkg_build - - dpkg_sign - image: - name: artifactory.ops.ripple.com/debian:10 - <<: *run_local_smoketest debian_11_smoketest: stage: smoketest dependencies: - dpkg_build - - dpkg_sign image: name: artifactory.ops.ripple.com/debian:11 <<: *run_local_smoketest @@ -377,43 +339,28 @@ rocky_8_verify_repo_test: - rpm_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true -fedora_34_verify_repo_test: +fedora_37_verify_repo_test: stage: verify_from_test variables: RPM_REPO: "rippled-rpm-test-mirror" image: - name: artifactory.ops.ripple.com/fedora:34 + name: artifactory.ops.ripple.com/fedora:37 dependencies: - rpm_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true -fedora_35_verify_repo_test: +fedora_38_verify_repo_test: stage: verify_from_test variables: RPM_REPO: "rippled-rpm-test-mirror" image: - name: artifactory.ops.ripple.com/fedora:35 + name: artifactory.ops.ripple.com/fedora:38 dependencies: - rpm_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true - -ubuntu_18_verify_repo_test: - stage: verify_from_test - variables: - DISTRO: "bionic" - DEB_REPO: "rippled-deb-test-mirror" - image: - name: artifactory.ops.ripple.com/ubuntu:18.04 - dependencies: - - dpkg_sign - <<: *only_primary - <<: *run_repo_smoketest ubuntu_20_verify_repo_test: stage: verify_from_test @@ -427,7 +374,6 @@ ubuntu_20_verify_repo_test: <<: *only_primary <<: *run_repo_smoketest -# TODO: remove "allow_failure" when 22.04 released in 4/2022... ubuntu_22_verify_repo_test: stage: verify_from_test variables: @@ -439,31 +385,6 @@ ubuntu_22_verify_repo_test: - dpkg_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true - -debian_9_verify_repo_test: - stage: verify_from_test - variables: - DISTRO: "stretch" - DEB_REPO: "rippled-deb-test-mirror" - image: - name: artifactory.ops.ripple.com/debian:9 - dependencies: - - dpkg_sign - <<: *only_primary - <<: *run_repo_smoketest - -debian_10_verify_repo_test: - stage: verify_from_test - variables: - DISTRO: "buster" - DEB_REPO: "rippled-deb-test-mirror" - image: - name: artifactory.ops.ripple.com/debian:10 - dependencies: - - dpkg_sign - <<: *only_primary - <<: *run_repo_smoketest debian_11_verify_repo_test: stage: verify_from_test @@ -542,53 +463,38 @@ centos_7_verify_repo_prod: <<: *only_primary <<: *run_repo_smoketest -rocky_8_verify_repo_test: - stage: verify_from_test +rocky_8_verify_repo_prod: + stage: verify_from_prod variables: - RPM_REPO: "rippled-rpm-test-mirror" + RPM_REPO: "rippled-rpm" image: name: rockylinux/rockylinux:8 dependencies: - rpm_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true -fedora_34_verify_repo_prod: +fedora_37_verify_repo_prod: stage: verify_from_prod variables: RPM_REPO: "rippled-rpm" image: - name: artifactory.ops.ripple.com/fedora:34 + name: artifactory.ops.ripple.com/fedora:37 dependencies: - rpm_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true -fedora_35_verify_repo_prod: +fedora_38_verify_repo_prod: stage: verify_from_prod variables: RPM_REPO: "rippled-rpm" image: - name: artifactory.ops.ripple.com/fedora:35 + name: artifactory.ops.ripple.com/fedora:38 dependencies: - rpm_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true - -ubuntu_18_verify_repo_prod: - stage: verify_from_prod - variables: - DISTRO: "bionic" - DEB_REPO: "rippled-deb" - image: - name: artifactory.ops.ripple.com/ubuntu:18.04 - dependencies: - - dpkg_sign - <<: *only_primary - <<: *run_repo_smoketest ubuntu_20_verify_repo_prod: stage: verify_from_prod @@ -602,7 +508,6 @@ ubuntu_20_verify_repo_prod: <<: *only_primary <<: *run_repo_smoketest -# TODO: remove "allow_failure" when 22.04 released in 4/2022... ubuntu_22_verify_repo_prod: stage: verify_from_prod variables: @@ -614,31 +519,6 @@ ubuntu_22_verify_repo_prod: - dpkg_sign <<: *only_primary <<: *run_repo_smoketest - allow_failure: true - -debian_9_verify_repo_prod: - stage: verify_from_prod - variables: - DISTRO: "stretch" - DEB_REPO: "rippled-deb" - image: - name: artifactory.ops.ripple.com/debian:9 - dependencies: - - dpkg_sign - <<: *only_primary - <<: *run_repo_smoketest - -debian_10_verify_repo_prod: - stage: verify_from_prod - variables: - DISTRO: "buster" - DEB_REPO: "rippled-deb" - image: - name: artifactory.ops.ripple.com/debian:10 - dependencies: - - dpkg_sign - <<: *only_primary - <<: *run_repo_smoketest debian_11_verify_repo_prod: stage: verify_from_prod @@ -693,11 +573,9 @@ build_centos_container: <<: *dind_param script: - . ./Builds/containers/gitlab-ci/build_container.sh rpm - allow_failure: true build_ubuntu_container: stage: build_containers <<: *dind_param script: - . ./Builds/containers/gitlab-ci/build_container.sh dpkg - allow_failure: true diff --git a/Builds/containers/gitlab-ci/push_to_artifactory.sh b/Builds/containers/gitlab-ci/push_to_artifactory.sh index fc60567056d..255f4247ae0 100644 --- a/Builds/containers/gitlab-ci/push_to_artifactory.sh +++ b/Builds/containers/gitlab-ci/push_to_artifactory.sh @@ -14,18 +14,17 @@ cd $TOPDIR cd build/dpkg/packages CURLARGS="-sk -X${action} -urippled:${ARTIFACTORY_DEPLOY_KEY_RIPPLED}" RIPPLED_PKG=$(ls rippled_*.deb) -RIPPLED_DEV_PKG=$(ls rippled-dev_*.deb) RIPPLED_REPORTING_PKG=$(ls rippled-reporting_*.deb) -RIPPLED_DBG_PKG=$(ls rippled-dbgsym_*.deb) -RIPPLED_REPORTING_DBG_PKG=$(ls rippled-reporting-dbgsym_*.deb) +RIPPLED_DBG_PKG=$(ls rippled-dbgsym_*.*deb) +RIPPLED_REPORTING_DBG_PKG=$(ls rippled-reporting-dbgsym_*.*deb) # TODO - where to upload src tgz? RIPPLED_SRC=$(ls rippled_*.orig.tar.gz) DEB_MATRIX=";deb.component=${COMPONENT};deb.architecture=amd64" -for dist in stretch buster bullseye bionic focal jammy; do +for dist in bullseye focal jammy; do DEB_MATRIX="${DEB_MATRIX};deb.distribution=${dist}" done echo "{ \"debs\": {" > "${TOPDIR}/files.info" -for deb in ${RIPPLED_PKG} ${RIPPLED_DEV_PKG} ${RIPPLED_DBG_PKG} ${RIPPLED_REPORTING_PKG} ${RIPPLED_REPORTING_DBG_PKG}; do +for deb in ${RIPPLED_PKG} ${RIPPLED_DBG_PKG} ${RIPPLED_REPORTING_PKG} ${RIPPLED_REPORTING_DBG_PKG}; do # first item doesn't get a comma separator if [ $deb != $RIPPLED_PKG ] ; then echo "," >> "${TOPDIR}/files.info" diff --git a/Builds/containers/gitlab-ci/smoketest.sh b/Builds/containers/gitlab-ci/smoketest.sh index 267c0aa7b53..32a8e3407f6 100644 --- a/Builds/containers/gitlab-ci/smoketest.sh +++ b/Builds/containers/gitlab-ci/smoketest.sh @@ -81,8 +81,10 @@ else elif [ "${install_from}" = "local" ] ; then # cached pkg install pkgs=("yum-utils openssl-static zlib-static") - if [ "$ID" = "rocky" ]; then - sed -i 's/enabled=0/enabled=1/g' /etc/yum.repos.d/Rocky-PowerTools.repo + if [[ "$ID" =~ rocky|fedora ]]; then + if [[ "$ID" =~ "rocky" ]]; then + sed -i 's/enabled=0/enabled=1/g' /etc/yum.repos.d/Rocky-PowerTools.repo + fi pkgs="${pkgs[@]/openssl-static}" fi yum install -y $pkgs diff --git a/Builds/containers/packaging/dpkg/build_dpkg.sh b/Builds/containers/packaging/dpkg/build_dpkg.sh index 3c73f1314e2..fb67ea2ee10 100755 --- a/Builds/containers/packaging/dpkg/build_dpkg.sh +++ b/Builds/containers/packaging/dpkg/build_dpkg.sh @@ -15,7 +15,7 @@ RIPPLED_DPKG_VERSION=$(echo "${RIPPLED_VERSION}" | sed 's!-!~!g') # TODO - decide how to handle the trailing/release # version here (hardcoded to 1). Does it ever need to change? RIPPLED_DPKG_FULL_VERSION="${RIPPLED_DPKG_VERSION}-1" - +git config --global --add safe.directory /opt/rippled_bld/pkg/rippled cd /opt/rippled_bld/pkg/rippled if [[ -n $(git status --porcelain) ]]; then git status @@ -45,8 +45,8 @@ CHANGELOG # PATH must be preserved for our more modern cmake in /opt/local # TODO : consider allowing lintian to run in future ? export DH_BUILD_DDEBS=1 -export CC=gcc-8 -export CXX=g++-8 +export CC=gcc-11 +export CXX=g++-11 debuild --no-lintian --preserve-envvar PATH --preserve-env -us -uc rc=$?; if [[ $rc != 0 ]]; then error "error building dpkg" @@ -54,7 +54,6 @@ fi cd .. # copy artifacts -cp rippled-dev_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR} cp rippled-reporting_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR} cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.deb ${PKG_OUTDIR} cp rippled_${RIPPLED_DPKG_FULL_VERSION}.dsc ${PKG_OUTDIR} @@ -84,15 +83,12 @@ DBG_SHA256=$(cat shasums | \ grep "rippled-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1) REPORTING_DBG_SHA256=$(cat shasums | \ grep "rippled-reporting-dbgsym_${RIPPLED_DPKG_VERSION}-1_amd64.*" | cut -d " " -f 1) -DEV_SHA256=$(cat shasums | \ - grep "rippled-dev_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1) REPORTING_SHA256=$(cat shasums | \ grep "rippled-reporting_${RIPPLED_DPKG_VERSION}-1_amd64.deb" | cut -d " " -f 1) SRC_SHA256=$(cat shasums | \ grep "rippled_${RIPPLED_DPKG_VERSION}.orig.tar.gz" | cut -d " " -f 1) echo "deb_sha256=${DEB_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "dbg_sha256=${DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars -echo "dev_sha256=${DEV_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "reporting_sha256=${REPORTING_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "reporting_dbg_sha256=${REPORTING_DBG_SHA256}" >> ${PKG_OUTDIR}/build_vars echo "src_sha256=${SRC_SHA256}" >> ${PKG_OUTDIR}/build_vars diff --git a/Builds/containers/packaging/dpkg/debian/control b/Builds/containers/packaging/dpkg/debian/control index 7e55143334c..b840eb228be 100644 --- a/Builds/containers/packaging/dpkg/debian/control +++ b/Builds/containers/packaging/dpkg/debian/control @@ -17,11 +17,3 @@ Architecture: any Multi-Arch: foreign Depends: ${misc:Depends}, ${shlibs:Depends} Description: rippled reporting daemon - -Package: rippled-dev -Section: devel -Recommends: rippled (= ${binary:Version}) -Architecture: any -Multi-Arch: same -Depends: ${misc:Depends}, ${shlibs:Depends}, libprotobuf-dev, libprotoc-dev, protobuf-compiler -Description: development files for applications using xrpl core library (serialize + sign) diff --git a/Builds/containers/packaging/dpkg/debian/rippled.install b/Builds/containers/packaging/dpkg/debian/rippled.install index ac9d946294e..3ce9f60fb3a 100644 --- a/Builds/containers/packaging/dpkg/debian/rippled.install +++ b/Builds/containers/packaging/dpkg/debian/rippled.install @@ -5,4 +5,4 @@ opt/ripple/bin/getRippledInfo opt/ripple/etc/rippled.cfg opt/ripple/etc/validators.txt opt/ripple/etc/update-rippled-cron -etc/logrotate.d/rippled \ No newline at end of file +etc/logrotate.d/rippled diff --git a/Builds/containers/packaging/dpkg/debian/rules b/Builds/containers/packaging/dpkg/debian/rules index f7c8123bd1c..83af329cd6d 100755 --- a/Builds/containers/packaging/dpkg/debian/rules +++ b/Builds/containers/packaging/dpkg/debian/rules @@ -16,31 +16,46 @@ override_dh_systemd_start: override_dh_auto_configure: env - rm -rf bld && mkdir -p bld/rippled - cd bld/rippled && \ - cmake ../.. -G Ninja \ - -DCMAKE_INSTALL_PREFIX=/opt/ripple \ + rm -rf bld + + conan export external/snappy snappy/1.1.9@ + + conan install . \ + --install-folder bld/rippled \ + --build missing \ + --settings build_type=Release + + cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ + -G Ninja \ -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/opt/ripple \ -Dstatic=ON \ -Dunity=OFF \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ -Dvalidator_keys=ON \ - -Dunity=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=OFF + -B bld/rippled + conan install . \ + --install-folder bld/rippled-reporting \ + --build missing \ + --settings build_type=Release \ + --settings compiler.cppstd=17 \ + --options reporting=True - cmake -S . \ - -B bld/rippled-reporting \ + cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ -G Ninja \ - -DCMAKE_INSTALL_PREFIX=/opt/rippled-reporting \ -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/opt/rippled-reporting \ -Dstatic=ON \ -Dunity=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=OFF \ - -Dreporting=ON + -DCMAKE_VERBOSE_MAKEFILE=ON \ + -Dreporting=ON \ + -B bld/rippled-reporting override_dh_auto_build: - cmake --build bld/rippled --target rippled --target validator-keys --parallel - cmake --build bld/rippled-reporting --target rippled --parallel + cmake --build bld/rippled --target rippled --target validator-keys -j${nproc} + + cmake --build bld/rippled-reporting --target rippled -j${nproc} override_dh_auto_install: cmake --install bld/rippled --prefix debian/tmp/opt/ripple @@ -52,10 +67,10 @@ override_dh_auto_install: rm -rf debian/tmp/opt/ripple/lib64/cmake/date mkdir -p debian/tmp/opt/rippled-reporting/etc + mkdir -p debian/tmp/opt/rippled-reporting/bin cp cfg/validators-example.txt debian/tmp/opt/rippled-reporting/etc/validators.txt - install -D bld/rippled/validator-keys/validator-keys debian/tmp/opt/rippled-reporting/bin/validator-keys sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled.sh > debian/tmp/opt/rippled-reporting/bin/update-rippled-reporting.sh sed -E 's/rippled?/rippled-reporting/g' bin/getRippledInfo > debian/tmp/opt/rippled-reporting/bin/getRippledReportingInfo sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/update-rippled-cron > debian/tmp/opt/rippled-reporting/etc/update-rippled-reporting-cron - sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/rippled-logrotate > debian/tmp/etc/logrotate.d/rippled-reporting \ No newline at end of file + sed -E 's/rippled?/rippled-reporting/g' Builds/containers/shared/rippled-logrotate > debian/tmp/etc/logrotate.d/rippled-reporting diff --git a/Builds/containers/packaging/rpm/build_rpm.sh b/Builds/containers/packaging/rpm/build_rpm.sh index f0141e9ff53..57e576b510b 100755 --- a/Builds/containers/packaging/rpm/build_rpm.sh +++ b/Builds/containers/packaging/rpm/build_rpm.sh @@ -29,17 +29,20 @@ if [[ $RPM_PATCH ]]; then fi cd /opt/rippled_bld/pkg/rippled + if [[ -n $(git status --porcelain) ]]; then git status error "Unstaged changes in this repo - please commit first" fi + git archive --format tar.gz --prefix rippled/ -o ../rpmbuild/SOURCES/rippled.tar.gz HEAD -# TODO include validator-keys sources + cd .. -source /opt/rh/devtoolset-8/enable +source /opt/rh/devtoolset-11/enable rpmbuild --define "_topdir ${PWD}/rpmbuild" -ba rippled.spec + rc=$?; if [[ $rc != 0 ]]; then error "error building rpm" fi diff --git a/Builds/containers/packaging/rpm/rippled.spec b/Builds/containers/packaging/rpm/rippled.spec index 1ad2e278bf8..fe451d645d4 100644 --- a/Builds/containers/packaging/rpm/rippled.spec +++ b/Builds/containers/packaging/rpm/rippled.spec @@ -36,16 +36,64 @@ History server for XRP Ledger %setup -c -n rippled %build +rm -rf ~/.conan/profiles/default + +cp /opt/libcstd/libstdc++.so.6.0.22 /usr/lib64 +cp /opt/libcstd/libstdc++.so.6.0.22 /lib64 +ln -sf /usr/lib64/libstdc++.so.6.0.22 /usr/lib64/libstdc++.so.6 +ln -sf /lib64/libstdc++.so.6.0.22 /usr/lib64/libstdc++.so.6 + +source /opt/rh/rh-python38/enable +pip install "conan<2" +conan profile new default --detect +conan profile update settings.compiler.libcxx=libstdc++11 default +conan profile update settings.compiler.cppstd=20 default + cd rippled + mkdir -p bld.rippled +conan export external/snappy snappy/1.1.9@ + pushd bld.rippled -cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix} -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dvalidator_keys=ON +conan install .. \ + --settings build_type=Release \ + --output-folder . \ + --build missing + +cmake -G Ninja \ + -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ + -DCMAKE_INSTALL_PREFIX=%{_prefix} \ + -DCMAKE_BUILD_TYPE=Release \ + -Dunity=OFF \ + -Dstatic=ON \ + -Dvalidator_keys=ON \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ + .. + cmake --build . --parallel $(nproc) --target rippled --target validator-keys popd mkdir -p bld.rippled-reporting -cd bld.rippled-reporting -cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=%{_prefix}-reporting -DCMAKE_BUILD_TYPE=Release -Dunity=OFF -Dstatic=true -DCMAKE_VERBOSE_MAKEFILE=OFF -Dreporting=ON +pushd bld.rippled-reporting + +conan install .. \ + --settings build_type=Release \ + --output-folder . \ + --build missing \ + --settings compiler.cppstd=17 \ + --options reporting=True + +cmake -G Ninja \ + -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ + -DCMAKE_INSTALL_PREFIX=%{_prefix} \ + -DCMAKE_BUILD_TYPE=Release \ + -Dunity=OFF \ + -Dstatic=ON \ + -Dvalidator_keys=ON \ + -Dreporting=ON \ + -DCMAKE_VERBOSE_MAKEFILE=ON \ + .. + cmake --build . --parallel $(nproc) --target rippled %pre @@ -53,13 +101,18 @@ test -e /etc/pki/tls || { mkdir -p /etc/pki; ln -s /usr/lib/ssl /etc/pki/tls; } %install rm -rf $RPM_BUILD_ROOT -DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.rippled --target install -- -v -rm -rf ${RPM_BUILD_ROOT}/%{_prefix}/lib64/cmake/date +DESTDIR=$RPM_BUILD_ROOT cmake --build rippled/bld.rippled --target install #-- -v +mkdir -p $RPM_BUILD_ROOT +rm -rf ${RPM_BUILD_ROOT}/%{_prefix}/lib64/ install -d ${RPM_BUILD_ROOT}/etc/opt/ripple install -d ${RPM_BUILD_ROOT}/usr/local/bin -ln -s %{_prefix}/etc/rippled.cfg ${RPM_BUILD_ROOT}/etc/opt/ripple/rippled.cfg -ln -s %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/ripple/validators.txt -ln -s %{_prefix}/bin/rippled ${RPM_BUILD_ROOT}/usr/local/bin/rippled + +install -D ./rippled/cfg/rippled-example.cfg ${RPM_BUILD_ROOT}/%{_prefix}/etc/rippled.cfg +install -D ./rippled/cfg/validators-example.txt ${RPM_BUILD_ROOT}/%{_prefix}/etc/validators.txt + +ln -sf %{_prefix}/etc/rippled.cfg ${RPM_BUILD_ROOT}/etc/opt/ripple/rippled.cfg +ln -sf %{_prefix}/etc/validators.txt ${RPM_BUILD_ROOT}/etc/opt/ripple/validators.txt +ln -sf %{_prefix}/bin/rippled ${RPM_BUILD_ROOT}/usr/local/bin/rippled install -D rippled/bld.rippled/validator-keys/validator-keys ${RPM_BUILD_ROOT}%{_bindir}/validator-keys install -D ./rippled/Builds/containers/shared/rippled.service ${RPM_BUILD_ROOT}/usr/lib/systemd/system/rippled.service install -D ./rippled/Builds/containers/packaging/rpm/50-rippled.preset ${RPM_BUILD_ROOT}/usr/lib/systemd/system-preset/50-rippled.preset @@ -141,6 +194,7 @@ chmod -x /usr/lib/systemd/system/rippled-reporting.service %config(noreplace) /etc/logrotate.d/rippled %config(noreplace) /usr/lib/systemd/system/rippled.service %config(noreplace) /usr/lib/systemd/system-preset/50-rippled.preset + %dir /var/log/rippled/ %dir /var/lib/rippled/ diff --git a/Builds/containers/shared/build_deps.sh b/Builds/containers/shared/build_deps.sh deleted file mode 100755 index 8e11d01508f..00000000000 --- a/Builds/containers/shared/build_deps.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/usr/bin/env bash -set -ex - -function build_boost() -{ - local boost_ver=$1 - local do_link=$2 - local boost_path=$(echo "${boost_ver}" | sed -e 's!\.!_!g') - mkdir -p /opt/local - cd /opt/local - BOOST_ROOT=/opt/local/boost_${boost_path} - BOOST_URL="https://boostorg.jfrog.io/artifactory/main/release/${boost_ver}/source/boost_${boost_path}.tar.gz" - BOOST_BUILD_ALL=true - . /tmp/install_boost.sh - if [ "$do_link" = true ] ; then - ln -s ./boost_${boost_path} boost - fi -} - -build_boost "1.70.0" true - -# installed in opt, so won't be used -# unless specified by OPENSSL_ROOT_DIR -cd /tmp -OPENSSL_VER=1.1.1d -wget https://www.openssl.org/source/openssl-${OPENSSL_VER}.tar.gz -tar xf openssl-${OPENSSL_VER}.tar.gz -cd openssl-${OPENSSL_VER} -# NOTE: add -g to the end of the following line if we want debug symbols for openssl -SSLDIR=$(openssl version -d | cut -d: -f2 | tr -d [:space:]\") -./config -fPIC --prefix=/opt/local/openssl --openssldir=${SSLDIR} zlib shared -make -j$(nproc) >> make_output.txt 2>&1 -make install >> make_output.txt 2>&1 -cd .. -rm -f openssl-${OPENSSL_VER}.tar.gz -rm -rf openssl-${OPENSSL_VER} -LD_LIBRARY_PATH=${LD_LIBRARY_PATH:-}:/opt/local/openssl/lib /opt/local/openssl/bin/openssl version -a - -cd /tmp -wget https://libarchive.org/downloads/libarchive-3.4.1.tar.gz -tar xzf libarchive-3.4.1.tar.gz -cd libarchive-3.4.1 -mkdir _bld && cd _bld -cmake -DCMAKE_BUILD_TYPE=Release .. -make -j$(nproc) >> make_output.txt 2>&1 -make install >> make_output.txt 2>&1 -cd ../.. -rm -f libarchive-3.4.1.tar.gz -rm -rf libarchive-3.4.1 - -cd /tmp -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.10.1/protobuf-all-3.10.1.tar.gz -tar xf protobuf-all-3.10.1.tar.gz -cd protobuf-3.10.1 -./autogen.sh -./configure -make -j$(nproc) >> make_output.txt 2>&1 -make install >> make_output.txt 2>&1 -ldconfig -cd .. -rm -f protobuf-all-3.10.1.tar.gz -rm -rf protobuf-3.10.1 - -cd /tmp -wget https://c-ares.haxx.se/download/c-ares-1.15.0.tar.gz -tar xf c-ares-1.15.0.tar.gz -cd c-ares-1.15.0 -mkdir _bld && cd _bld -cmake \ - -DHAVE_LIBNSL=OFF \ - -DCMAKE_BUILD_TYPE=Release \ - -DCARES_STATIC=ON \ - -DCARES_SHARED=OFF \ - -DCARES_INSTALL=ON \ - -DCARES_STATIC_PIC=ON \ - -DCARES_BUILD_TOOLS=OFF \ - -DCARES_BUILD_TESTS=OFF \ - -DCARES_BUILD_CONTAINER_TESTS=OFF \ - .. -make -j$(nproc) >> make_output.txt 2>&1 -make install >> make_output.txt 2>&1 -cd ../.. -rm -f c-ares-1.15.0.tar.gz -rm -rf c-ares-1.15.0 - -cd /tmp -wget https://github.com/grpc/grpc/archive/v1.25.0.tar.gz -tar xf v1.25.0.tar.gz -cd grpc-1.25.0 -mkdir _bld && cd _bld -cmake \ - -DCMAKE_BUILD_TYPE=Release \ - -DBUILD_SHARED_LIBS=OFF \ - -DgRPC_ZLIB_PROVIDER=package \ - -DgRPC_CARES_PROVIDER=package \ - -DgRPC_SSL_PROVIDER=package \ - -DgRPC_PROTOBUF_PROVIDER=package \ - -DProtobuf_USE_STATIC_LIBS=ON \ - .. -make -j$(nproc) >> make_output.txt 2>&1 -make install >> make_output.txt 2>&1 -cd ../.. -rm -f xf v1.25.0.tar.gz -rm -rf grpc-1.25.0 - -if [ "${CI_USE}" = true ] ; then - - build_boost "1.71.0" false - - cd /tmp - wget https://github.com/doxygen/doxygen/archive/Release_1_8_16.tar.gz - tar xf Release_1_8_16.tar.gz - cd doxygen-Release_1_8_16 - mkdir build - cd build - cmake -G "Unix Makefiles" .. - make -j$(nproc) >> make_output.txt 2>&1 - make install >> make_output.txt 2>&1 - cd ../.. - rm -f Release_1_8_16.tar.gz - rm -rf doxygen-Release_1_8_16 - - mkdir -p /opt/plantuml - wget -O /opt/plantuml/plantuml.jar https://downloads.sourceforge.net/project/plantuml/plantuml.jar - - cd /tmp - wget https://github.com/linux-test-project/lcov/releases/download/v1.14/lcov-1.14.tar.gz - tar xfz lcov-1.14.tar.gz - cd lcov-1.14 - make install PREFIX=/usr/local - cd .. - rm -r lcov-1.14 lcov-1.14.tar.gz - - cd /tmp - wget https://github.com/ccache/ccache/releases/download/v3.7.6/ccache-3.7.6.tar.gz - tar xf ccache-3.7.6.tar.gz - cd ccache-3.7.6 - ./configure --prefix=/usr/local - make >> make_output.txt 2>&1 - make install >> make_output.txt 2>&1 - cd .. - rm -f ccache-3.7.6.tar.gz - rm -rf ccache-3.7.6 - - pip install requests - pip install https://github.com/codecov/codecov-python/archive/master.zip -fi diff --git a/Builds/containers/shared/install_boost.sh b/Builds/containers/shared/install_boost.sh deleted file mode 100755 index 08be7ee6e51..00000000000 --- a/Builds/containers/shared/install_boost.sh +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env bash -# Assumptions: -# 1) BOOST_ROOT and BOOST_URL are already defined, -# and contain valid values. BOOST_URL2 may be defined -# as a fallback. BOOST_WGET_OPTIONS may be defined with -# retry options if the download(s) fail on the first try. -# 2) The last namepart of BOOST_ROOT matches the -# folder name internal to boost's .tar.gz -# When testing you can force a boost build by clearing travis caches: -# https://travis-ci.org/ripple/rippled/caches -set -exu - -odir=$(pwd) -: ${BOOST_TOOLSET:=msvc-14.1} - -if [[ -d "$BOOST_ROOT/lib" || -d "${BOOST_ROOT}/stage/lib" ]] ; then - echo "Using cached boost at $BOOST_ROOT" - exit -fi - -#fetch/unpack: -fn=$(basename -- "$BOOST_URL") -ext="${fn##*.}" -wopt="--quiet" -wget ${wopt} $BOOST_URL -O /tmp/boost.tar.${ext} || \ - ( [ -n "${BOOST_URL2}" ] && \ - wget ${wopt} $BOOST_URL2 -O /tmp/boost.tar.${ext} ) || \ - ( [ -n "${BOOST_WGET_OPTIONS}" ] && - ( wget ${wopt} ${BOOST_WGET_OPTIONS} $BOOST_URL -O /tmp/boost.tar.${ext} || \ - ( [ -n "${BOOST_URL2}" ] && \ - wget ${wopt} ${BOOST_WGET_OPTIONS} $BOOST_URL2 -O /tmp/boost.tar.${ext} ) - ) - ) -cd $(dirname $BOOST_ROOT) -rm -fr ${BOOST_ROOT} -mkdir ${BOOST_ROOT} -tar xf /tmp/boost.tar.${ext} -C ${BOOST_ROOT} --strip-components 1 -cd $BOOST_ROOT - -BLDARGS=() -if [[ ${BOOST_BUILD_ALL:-false} == "true" ]]; then - # we never need boost-python...so even for ALL - # option we can skip it - BLDARGS+=(--without-python) -else - BLDARGS+=(--with-chrono) - BLDARGS+=(--with-container) - BLDARGS+=(--with-context) - BLDARGS+=(--with-coroutine) - BLDARGS+=(--with-date_time) - BLDARGS+=(--with-filesystem) - BLDARGS+=(--with-program_options) - BLDARGS+=(--with-regex) - BLDARGS+=(--with-system) - BLDARGS+=(--with-atomic) - BLDARGS+=(--with-thread) -fi -BLDARGS+=(-j$((2*${NUM_PROCESSORS:-2}))) -BLDARGS+=(--prefix=${BOOST_ROOT}/_INSTALLED_) -BLDARGS+=(-d0) # suppress messages/output - -if [[ -z ${COMSPEC:-} ]]; then - if [[ "$(uname)" == "Darwin" ]] ; then - BLDARGS+=(cxxflags="-std=c++14 -fvisibility=default") - else - BLDARGS+=(cxxflags="-std=c++14") - BLDARGS+=(runtime-link="static,shared") - fi - BLDARGS+=(--layout=tagged) - ./bootstrap.sh - ./b2 "${BLDARGS[@]}" stage - ./b2 "${BLDARGS[@]}" install -else - BLDARGS+=(runtime-link="static,shared") - BLDARGS+=(--layout=versioned) - BLDARGS+=(--toolset="${BOOST_TOOLSET}") - BLDARGS+=(address-model=64) - BLDARGS+=(architecture=x86) - BLDARGS+=(link=static) - BLDARGS+=(threading=multi) - cmd /E:ON /D /S /C"bootstrap.bat" - ./b2.exe "${BLDARGS[@]}" stage - ./b2.exe "${BLDARGS[@]}" install -fi - -if [[ ${CI:-false} == "true" ]]; then - # save some disk space...these are mostly - # obj files and don't need to be kept in CI contexts - rm -rf bin.v2 -fi - -cd $odir - diff --git a/Builds/containers/shared/install_cmake.sh b/Builds/containers/shared/install_cmake.sh index 2c9fa10d1c5..e6f84dd6c03 100755 --- a/Builds/containers/shared/install_cmake.sh +++ b/Builds/containers/shared/install_cmake.sh @@ -21,8 +21,13 @@ if [[ "${installed}" != "" && ${installed} =~ ${cm_maj}.${cm_min}.${cm_rel} ]] ; echo "cmake already installed: ${installed}" exit fi - -pkgname="cmake-${cm_maj}.${cm_min}.${cm_rel}-$(uname)-x86_64.tar.gz" +# From CMake 20+ "Linux" is lowercase so using `uname` won't create be the correct path +if [ ${cm_min} -gt 19 ]; then + linux="linux" +else + linux=$(uname) +fi +pkgname="cmake-${cm_maj}.${cm_min}.${cm_rel}-${linux}-x86_64.tar.gz" tmppkg="/tmp/cmake.tar.gz" wget --quiet https://cmake.org/files/v${cm_maj}.${cm_min}/${pkgname} -O ${tmppkg} mkdir -p ${CMAKE_ROOT} @@ -30,5 +35,3 @@ cd ${CMAKE_ROOT} tar --strip-components 1 -xf ${tmppkg} rm -f ${tmppkg} echo "installed: $(cmake_version)" - - diff --git a/Builds/containers/ubuntu-builder/Dockerfile b/Builds/containers/ubuntu-builder/Dockerfile index 4550c64a096..478d9ad7590 100644 --- a/Builds/containers/ubuntu-builder/Dockerfile +++ b/Builds/containers/ubuntu-builder/Dockerfile @@ -1,4 +1,4 @@ -ARG DIST_TAG=18.04 +ARG DIST_TAG=20.04 FROM ubuntu:$DIST_TAG ARG GIT_COMMIT=unknown ARG CI_USE=false @@ -6,31 +6,19 @@ LABEL git-commit=$GIT_COMMIT # install/setup prerequisites: COPY ubuntu-builder/ubuntu_setup.sh /tmp/ -COPY shared/build_deps.sh /tmp/ COPY shared/install_cmake.sh /tmp/ -COPY shared/install_boost.sh /tmp/ RUN chmod +x /tmp/ubuntu_setup.sh && \ - chmod +x /tmp/build_deps.sh && \ - chmod +x /tmp/install_boost.sh && \ chmod +x /tmp/install_cmake.sh RUN /tmp/ubuntu_setup.sh -RUN /tmp/install_cmake.sh 3.16.1 /opt/local/cmake-3.16 +RUN /tmp/install_cmake.sh 3.16.3 /opt/local/cmake-3.16 RUN ln -s /opt/local/cmake-3.16 /opt/local/cmake ENV PATH="/opt/local/cmake/bin:$PATH" -# also install min supported cmake for testing -RUN if [ "${CI_USE}" = true ] ; then /tmp/install_cmake.sh 3.9.0 /opt/local/cmake-3.9; fi - -RUN /tmp/build_deps.sh -ENV PLANTUML_JAR="/opt/plantuml/plantuml.jar" -ENV BOOST_ROOT="/opt/local/boost/_INSTALLED_" -ENV OPENSSL_ROOT="/opt/local/openssl" # prep files for package building -RUN mkdir -m 777 -p /opt/rippled_bld/pkg/debian -RUN update-alternatives --set gcc /usr/bin/gcc-8 +RUN update-alternatives --set gcc /usr/bin/gcc-11 +RUN mkdir -m 777 -p /opt/rippled_bld/pkg/ WORKDIR /opt/rippled_bld/pkg COPY packaging/dpkg/build_dpkg.sh ./ CMD ./build_dpkg.sh - diff --git a/Builds/containers/ubuntu-builder/ubuntu_setup.sh b/Builds/containers/ubuntu-builder/ubuntu_setup.sh index c637faafdd1..379ca61e2b1 100755 --- a/Builds/containers/ubuntu-builder/ubuntu_setup.sh +++ b/Builds/containers/ubuntu-builder/ubuntu_setup.sh @@ -3,7 +3,7 @@ set -ex source /etc/os-release -if [[ ${VERSION_ID} =~ ^18\. || ${VERSION_ID} =~ ^16\. ]] ; then +if [[ ${VERSION_ID} =~ ^20\. || ${VERSION_ID} =~ ^22\. ]] ; then echo "setup for ${PRETTY_NAME}" else echo "${VERSION} not supported" @@ -19,12 +19,9 @@ apt-get update -o Acquire::CompressionTypes::Order::=gz apt-get -y update apt-get -y install apt-utils -apt-get -y install software-properties-common wget +apt-get -y install software-properties-common wget curl ca-certificates +apt-get -y install python3-pip apt-get -y upgrade -if [[ ${VERSION_ID} =~ ^18\. ]] ; then - apt-add-repository -y multiverse - apt-add-repository -y universe -fi add-apt-repository -y ppa:ubuntu-toolchain-r/test apt-get -y clean apt-get -y update @@ -34,7 +31,6 @@ apt-get -y --fix-missing install \ openssl libssl-dev \ liblzma-dev libbz2-dev zlib1g-dev \ libjemalloc-dev \ - python-pip \ gdb gdbserver \ libstdc++6 \ flex bison parallel \ @@ -42,148 +38,67 @@ apt-get -y --fix-missing install \ java-common javacc \ dpkg-dev debhelper devscripts fakeroot \ debmake git-buildpackage dh-make gitpkg debsums gnupg \ - dh-buildinfo dh-make dh-systemd \ + dh-buildinfo dh-make \ apt-transport-https -apt-get -y install gcc-7 g++-7 -update-alternatives --install \ - /usr/bin/gcc gcc /usr/bin/gcc-7 40 \ - --slave /usr/bin/g++ g++ /usr/bin/g++-7 \ - --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-7 \ - --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-7 \ - --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-7 \ - --slave /usr/bin/gcov gcov /usr/bin/gcov-7 \ - --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-7 \ - --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-7 +if [[ ${VERSION_ID} =~ ^20\. ]] ; then +apt-get install -y \ + dh-systemd +fi -apt-get -y install gcc-8 g++-8 +apt-get -y install gcc-11 g++-11 update-alternatives --install \ - /usr/bin/gcc gcc /usr/bin/gcc-8 20 \ - --slave /usr/bin/g++ g++ /usr/bin/g++-8 \ - --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-8 \ - --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-8 \ - --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-8 \ - --slave /usr/bin/gcov gcov /usr/bin/gcov-8 \ - --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-8 \ - --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-8 + /usr/bin/gcc gcc /usr/bin/gcc-11 20 \ + --slave /usr/bin/g++ g++ /usr/bin/g++-11 \ + --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-11 \ + --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-11 \ + --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-11 \ + --slave /usr/bin/gcov gcov /usr/bin/gcov-11 \ + --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-11 \ + --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-11 update-alternatives --auto gcc -update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-7 40 -update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-8 20 +update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-11 20 update-alternatives --auto cpp -if [ "${CI_USE}" = true ] ; then - apt-get -y install gcc-6 g++-6 - update-alternatives --install \ - /usr/bin/gcc gcc /usr/bin/gcc-6 10 \ - --slave /usr/bin/g++ g++ /usr/bin/g++-6 \ - --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-6 \ - --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-6 \ - --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-6 \ - --slave /usr/bin/gcov gcov /usr/bin/gcov-6 \ - --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-6 \ - --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-6 - - apt-get -y install gcc-9 g++-9 - update-alternatives --install \ - /usr/bin/gcc gcc /usr/bin/gcc-9 15 \ - --slave /usr/bin/g++ g++ /usr/bin/g++-9 \ - --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-9 \ - --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-9 \ - --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-9 \ - --slave /usr/bin/gcov gcov /usr/bin/gcov-9 \ - --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-9 \ - --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-9 -fi - -if [[ ${VERSION_ID} =~ ^18\. ]] ; then - apt-get -y install binutils -elif [[ ${VERSION_ID} =~ ^16\. ]] ; then - apt-get -y install python-software-properties binutils-gold -fi - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - -if [[ ${VERSION_ID} =~ ^18\. ]] ; then - cat << EOF > /etc/apt/sources.list.d/llvm.list -deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic main -deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic main -deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-7 main -deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic-7 main -deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main -deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic-8 main -deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main -deb-src http://apt.llvm.org/bionic/ llvm-toolchain-bionic-9 main -EOF -elif [[ ${VERSION_ID} =~ ^16\. ]] ; then + +if [[ ${VERSION_ID} =~ ^20\. ]] ; then cat << EOF > /etc/apt/sources.list.d/llvm.list -deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main -deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main -deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main -deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-7 main -deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-8 main -deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-8 main -deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main -deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-9 main +deb http://apt.llvm.org/focal/ llvm-toolchain-focal main +deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal main +deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main +deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main +deb http://apt.llvm.org/focal/ llvm-toolchain-focal-14 main +deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-14 main EOF + apt-get -y install binutils clang-12 fi -apt-get -y update -apt-get -y install \ - clang-7 libclang-common-7-dev libclang-7-dev libllvm7 llvm-7 \ - llvm-7-dev llvm-7-runtime clang-format-7 python-clang-7 \ - lld-7 libfuzzer-7-dev libc++-7-dev -update-alternatives --install \ - /usr/bin/clang clang /usr/bin/clang-7 40 \ - --slave /usr/bin/clang++ clang++ /usr/bin/clang++-7 \ - --slave /usr/bin/llvm-profdata llvm-profdata /usr/bin/llvm-profdata-7 \ - --slave /usr/bin/asan-symbolize asan-symbolize /usr/bin/asan_symbolize-7 \ - --slave /usr/bin/llvm-symbolizer llvm-symbolizer /usr/bin/llvm-symbolizer-7 \ - --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-7 \ - --slave /usr/bin/llvm-ar llvm-ar /usr/bin/llvm-ar-7 \ - --slave /usr/bin/llvm-cov llvm-cov /usr/bin/llvm-cov-7 \ - --slave /usr/bin/llvm-nm llvm-nm /usr/bin/llvm-nm-7 -apt-get -y install \ - clang-8 libclang-common-8-dev libclang-8-dev libllvm8 llvm-8 \ - llvm-8-dev llvm-8-runtime clang-format-8 python-clang-8 \ - lld-8 libfuzzer-8-dev libc++-8-dev -update-alternatives --install \ - /usr/bin/clang clang /usr/bin/clang-8 20 \ - --slave /usr/bin/clang++ clang++ /usr/bin/clang++-8 \ - --slave /usr/bin/llvm-profdata llvm-profdata /usr/bin/llvm-profdata-8 \ - --slave /usr/bin/asan-symbolize asan-symbolize /usr/bin/asan_symbolize-8 \ - --slave /usr/bin/llvm-symbolizer llvm-symbolizer /usr/bin/llvm-symbolizer-8 \ - --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-8 \ - --slave /usr/bin/llvm-ar llvm-ar /usr/bin/llvm-ar-8 \ - --slave /usr/bin/llvm-cov llvm-cov /usr/bin/llvm-cov-8 \ - --slave /usr/bin/llvm-nm llvm-nm /usr/bin/llvm-nm-8 -update-alternatives --auto clang -if [ "${CI_USE}" = true ] ; then +apt-get -y update +if [[ ${VERSION_ID} =~ ^20\. ]] ; then +for v in 12 14; do apt-get -y install \ - clang-9 libclang-common-9-dev libclang-9-dev libllvm9 llvm-9 \ - llvm-9-dev llvm-9-runtime clang-format-9 python-clang-9 \ - lld-9 libfuzzer-9-dev libc++-9-dev - update-alternatives --install \ - /usr/bin/clang clang /usr/bin/clang-9 20 \ - --slave /usr/bin/clang++ clang++ /usr/bin/clang++-9 \ - --slave /usr/bin/llvm-profdata llvm-profdata /usr/bin/llvm-profdata-9 \ - --slave /usr/bin/asan-symbolize asan-symbolize /usr/bin/asan_symbolize-9 \ - --slave /usr/bin/llvm-symbolizer llvm-symbolizer /usr/bin/llvm-symbolizer-9 \ - --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-9 \ - --slave /usr/bin/llvm-ar llvm-ar /usr/bin/llvm-ar-9 \ - --slave /usr/bin/llvm-cov llvm-cov /usr/bin/llvm-cov-9 \ - --slave /usr/bin/llvm-nm llvm-nm /usr/bin/llvm-nm-9 - - # only install latest lldb - apt-get -y install lldb-9 python-lldb-9 liblldb-9-dev + clang-$v libclang-common-$v-dev libclang-$v-dev libllvm$v llvm-$v \ + llvm-$v-dev llvm-$v-runtime clang-format-$v python3-clang-$v \ + lld-$v libfuzzer-$v-dev libc++-$v-dev python-is-python3 update-alternatives --install \ - /usr/bin/lldb lldb /usr/bin/lldb-9 50 \ - --slave /usr/bin/lldb-server lldb-server /usr/bin/lldb-server-9 \ - --slave /usr/bin/lldb-argdumper lldb-argdumper /usr/bin/lldb-argdumper-9 \ - --slave /usr/bin/lldb-instr lldb-instr /usr/bin/lldb-instr-9 \ - --slave /usr/bin/lldb-mi lldb-mi /usr/bin/lldb-mi-9 - update-alternatives --auto clang + /usr/bin/clang clang /usr/bin/clang-$v 40 \ + --slave /usr/bin/clang++ clang++ /usr/bin/clang++-$v \ + --slave /usr/bin/llvm-profdata llvm-profdata /usr/bin/llvm-profdata-$v \ + --slave /usr/bin/asan-symbolize asan-symbolize /usr/bin/asan_symbolize-$v \ + --slave /usr/bin/llvm-symbolizer llvm-symbolizer /usr/bin/llvm-symbolizer-$v \ + --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-$v \ + --slave /usr/bin/llvm-ar llvm-ar /usr/bin/llvm-ar-$v \ + --slave /usr/bin/llvm-cov llvm-cov /usr/bin/llvm-cov-$v \ + --slave /usr/bin/llvm-nm llvm-nm /usr/bin/llvm-nm-$v + done fi -apt-get -y autoremove +pip install "conan<2" && \ + conan profile new default --detect && \ + conan profile update settings.compiler.cppstd=20 default && \ + conan profile update settings.compiler.libcxx=libstdc++11 default +apt-get -y autoremove diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 22f2f17be02..b3fabe41ed9 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.0-rc1" +char const* const versionString = "1.10.0-rc3" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From c77a8d5ec6ce4e23b68164fd94f86bf19e4bd1fb Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Thu, 2 Mar 2023 10:07:09 -0800 Subject: [PATCH 114/508] Update Docker.md (#4432) * Add links to some related resources that may be helpful. * Docker images can make testing easier to do. --- docs/Docker.md | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/docs/Docker.md b/docs/Docker.md index d0103fbd9ae..9f67c87ee51 100644 --- a/docs/Docker.md +++ b/docs/Docker.md @@ -1,16 +1,5 @@ -# Rippled Docker Image +# `rippled` Docker Image -Rippled has a continuous deployment pipeline that turns every git commit into a -docker image for quick testing and deployment. - -To run the tip of the latest release via docker: - -```$ docker run -P -v /srv/rippled/ ripple/rippled:latest``` - -To run the tip of active development: - -```$ docker run -P -v /srv/rippled/ ripple/rippled:develop``` - -Where ```/srv/rippled``` points to a directory containing a rippled.cfg and -database files. By default, port 5005/tcp maps to the RPC port and 51235/udp to -the peer port. +- Some info relating to Docker containers can be found here: [../Builds/containers](../Builds/containers) +- Images for building and testing rippled can be found here: [thejohnfreeman/rippled-docker](https://github.com/thejohnfreeman/rippled-docker/) + - These images do not have rippled. They have all the tools necessary to build rippled. From ecd49e15353e24d9675da4a6a2c6cee093a9adab Mon Sep 17 00:00:00 2001 From: Kenny Lei <3003853+kennyzlei@users.noreply.github.com> Date: Thu, 2 Mar 2023 13:01:50 -0800 Subject: [PATCH 115/508] Rename 'NFT' to 'NFToken' in DisallowIncoming flags (#4442) * Follow-up to #4336 * NFToken is the naming convention in the codebase (rather than NFT) * Rename `lsfDisallowIncomingNFTOffer` to `lsfDisallowIncomingNFTokenOffer` * Rename `asfDisallowIncomingNFTOffer` to `asfDisallowIncomingNFTokenOffer` --- src/ripple/app/tx/impl/NFTokenCreateOffer.cpp | 4 +- src/ripple/app/tx/impl/SetAccount.cpp | 8 +- src/ripple/protocol/LedgerFormats.h | 2 +- src/ripple/protocol/TxFlags.h | 96 ++++++++++--------- src/test/app/NFToken_test.cpp | 14 +-- src/test/rpc/AccountSet_test.cpp | 2 +- 6 files changed, 64 insertions(+), 62 deletions(-) diff --git a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp index 6db31c69892..22eca2dffdd 100644 --- a/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenCreateOffer.cpp @@ -193,7 +193,7 @@ NFTokenCreateOffer::preclaim(PreclaimContext const& ctx) // flag cannot be set unless amendment is enabled but // out of an abundance of caution check anyway - if (sleDst->getFlags() & lsfDisallowIncomingNFTOffer) + if (sleDst->getFlags() & lsfDisallowIncomingNFTokenOffer) return tecNO_PERMISSION; } } @@ -210,7 +210,7 @@ NFTokenCreateOffer::preclaim(PreclaimContext const& ctx) if (!sleOwner) return tecNO_TARGET; - if (sleOwner->getFlags() & lsfDisallowIncomingNFTOffer) + if (sleOwner->getFlags() & lsfDisallowIncomingNFTokenOffer) return tecNO_PERMISSION; } } diff --git a/src/ripple/app/tx/impl/SetAccount.cpp b/src/ripple/app/tx/impl/SetAccount.cpp index 5c7d4369a76..167644f45fb 100644 --- a/src/ripple/app/tx/impl/SetAccount.cpp +++ b/src/ripple/app/tx/impl/SetAccount.cpp @@ -541,10 +541,10 @@ SetAccount::doApply() // Set or clear flags for disallowing various incoming instruments if (ctx_.view().rules().enabled(featureDisallowIncoming)) { - if (uSetFlag == asfDisallowIncomingNFTOffer) - uFlagsOut |= lsfDisallowIncomingNFTOffer; - else if (uClearFlag == asfDisallowIncomingNFTOffer) - uFlagsOut &= ~lsfDisallowIncomingNFTOffer; + if (uSetFlag == asfDisallowIncomingNFTokenOffer) + uFlagsOut |= lsfDisallowIncomingNFTokenOffer; + else if (uClearFlag == asfDisallowIncomingNFTokenOffer) + uFlagsOut &= ~lsfDisallowIncomingNFTokenOffer; if (uSetFlag == asfDisallowIncomingCheck) uFlagsOut |= lsfDisallowIncomingCheck; diff --git a/src/ripple/protocol/LedgerFormats.h b/src/ripple/protocol/LedgerFormats.h index 45258a3d02b..8245f352c8e 100644 --- a/src/ripple/protocol/LedgerFormats.h +++ b/src/ripple/protocol/LedgerFormats.h @@ -235,7 +235,7 @@ enum LedgerSpecificFlags { /* // reserved for Hooks amendment lsfTshCollect = 0x02000000, // True, allow TSH collect-calls to acc hooks */ - lsfDisallowIncomingNFTOffer = + lsfDisallowIncomingNFTokenOffer = 0x04000000, // True, reject new incoming NFT offers lsfDisallowIncomingCheck = 0x08000000, // True, reject new checks diff --git a/src/ripple/protocol/TxFlags.h b/src/ripple/protocol/TxFlags.h index c4218219868..c0dd080f6f7 100644 --- a/src/ripple/protocol/TxFlags.h +++ b/src/ripple/protocol/TxFlags.h @@ -51,81 +51,83 @@ namespace ripple { @ingroup protocol */ +// Formatting equals sign aligned 4 spaces after longest prefix, except for +// wrapped lines // clang-format off // Universal Transaction flags: -constexpr std::uint32_t tfFullyCanonicalSig = 0x80000000; -constexpr std::uint32_t tfUniversal = tfFullyCanonicalSig; -constexpr std::uint32_t tfUniversalMask = ~tfUniversal; +constexpr std::uint32_t tfFullyCanonicalSig = 0x80000000; +constexpr std::uint32_t tfUniversal = tfFullyCanonicalSig; +constexpr std::uint32_t tfUniversalMask = ~tfUniversal; // AccountSet flags: -constexpr std::uint32_t tfRequireDestTag = 0x00010000; -constexpr std::uint32_t tfOptionalDestTag = 0x00020000; -constexpr std::uint32_t tfRequireAuth = 0x00040000; -constexpr std::uint32_t tfOptionalAuth = 0x00080000; -constexpr std::uint32_t tfDisallowXRP = 0x00100000; -constexpr std::uint32_t tfAllowXRP = 0x00200000; +constexpr std::uint32_t tfRequireDestTag = 0x00010000; +constexpr std::uint32_t tfOptionalDestTag = 0x00020000; +constexpr std::uint32_t tfRequireAuth = 0x00040000; +constexpr std::uint32_t tfOptionalAuth = 0x00080000; +constexpr std::uint32_t tfDisallowXRP = 0x00100000; +constexpr std::uint32_t tfAllowXRP = 0x00200000; constexpr std::uint32_t tfAccountSetMask = ~(tfUniversal | tfRequireDestTag | tfOptionalDestTag | tfRequireAuth | tfOptionalAuth | tfDisallowXRP | tfAllowXRP); // AccountSet SetFlag/ClearFlag values -constexpr std::uint32_t asfRequireDest = 1; -constexpr std::uint32_t asfRequireAuth = 2; -constexpr std::uint32_t asfDisallowXRP = 3; -constexpr std::uint32_t asfDisableMaster = 4; -constexpr std::uint32_t asfAccountTxnID = 5; -constexpr std::uint32_t asfNoFreeze = 6; -constexpr std::uint32_t asfGlobalFreeze = 7; -constexpr std::uint32_t asfDefaultRipple = 8; -constexpr std::uint32_t asfDepositAuth = 9; -constexpr std::uint32_t asfAuthorizedNFTokenMinter = 10; +constexpr std::uint32_t asfRequireDest = 1; +constexpr std::uint32_t asfRequireAuth = 2; +constexpr std::uint32_t asfDisallowXRP = 3; +constexpr std::uint32_t asfDisableMaster = 4; +constexpr std::uint32_t asfAccountTxnID = 5; +constexpr std::uint32_t asfNoFreeze = 6; +constexpr std::uint32_t asfGlobalFreeze = 7; +constexpr std::uint32_t asfDefaultRipple = 8; +constexpr std::uint32_t asfDepositAuth = 9; +constexpr std::uint32_t asfAuthorizedNFTokenMinter = 10; /* // reserved for Hooks amendment -constexpr std::uint32_t asfTshCollect = 11; +constexpr std::uint32_t asfTshCollect = 11; */ -constexpr std::uint32_t asfDisallowIncomingNFTOffer = 12; -constexpr std::uint32_t asfDisallowIncomingCheck = 13; -constexpr std::uint32_t asfDisallowIncomingPayChan = 14; -constexpr std::uint32_t asfDisallowIncomingTrustline = 15; +constexpr std::uint32_t asfDisallowIncomingNFTokenOffer = 12; +constexpr std::uint32_t asfDisallowIncomingCheck = 13; +constexpr std::uint32_t asfDisallowIncomingPayChan = 14; +constexpr std::uint32_t asfDisallowIncomingTrustline = 15; // OfferCreate flags: -constexpr std::uint32_t tfPassive = 0x00010000; -constexpr std::uint32_t tfImmediateOrCancel = 0x00020000; -constexpr std::uint32_t tfFillOrKill = 0x00040000; -constexpr std::uint32_t tfSell = 0x00080000; +constexpr std::uint32_t tfPassive = 0x00010000; +constexpr std::uint32_t tfImmediateOrCancel = 0x00020000; +constexpr std::uint32_t tfFillOrKill = 0x00040000; +constexpr std::uint32_t tfSell = 0x00080000; constexpr std::uint32_t tfOfferCreateMask = ~(tfUniversal | tfPassive | tfImmediateOrCancel | tfFillOrKill | tfSell); // Payment flags: -constexpr std::uint32_t tfNoRippleDirect = 0x00010000; -constexpr std::uint32_t tfPartialPayment = 0x00020000; -constexpr std::uint32_t tfLimitQuality = 0x00040000; +constexpr std::uint32_t tfNoRippleDirect = 0x00010000; +constexpr std::uint32_t tfPartialPayment = 0x00020000; +constexpr std::uint32_t tfLimitQuality = 0x00040000; constexpr std::uint32_t tfPaymentMask = ~(tfUniversal | tfPartialPayment | tfLimitQuality | tfNoRippleDirect); // TrustSet flags: -constexpr std::uint32_t tfSetfAuth = 0x00010000; -constexpr std::uint32_t tfSetNoRipple = 0x00020000; -constexpr std::uint32_t tfClearNoRipple = 0x00040000; -constexpr std::uint32_t tfSetFreeze = 0x00100000; -constexpr std::uint32_t tfClearFreeze = 0x00200000; +constexpr std::uint32_t tfSetfAuth = 0x00010000; +constexpr std::uint32_t tfSetNoRipple = 0x00020000; +constexpr std::uint32_t tfClearNoRipple = 0x00040000; +constexpr std::uint32_t tfSetFreeze = 0x00100000; +constexpr std::uint32_t tfClearFreeze = 0x00200000; constexpr std::uint32_t tfTrustSetMask = ~(tfUniversal | tfSetfAuth | tfSetNoRipple | tfClearNoRipple | tfSetFreeze | tfClearFreeze); // EnableAmendment flags: -constexpr std::uint32_t tfGotMajority = 0x00010000; -constexpr std::uint32_t tfLostMajority = 0x00020000; +constexpr std::uint32_t tfGotMajority = 0x00010000; +constexpr std::uint32_t tfLostMajority = 0x00020000; // PaymentChannelClaim flags: -constexpr std::uint32_t tfRenew = 0x00010000; -constexpr std::uint32_t tfClose = 0x00020000; +constexpr std::uint32_t tfRenew = 0x00010000; +constexpr std::uint32_t tfClose = 0x00020000; constexpr std::uint32_t tfPayChanClaimMask = ~(tfUniversal | tfRenew | tfClose); // NFTokenMint flags: -constexpr std::uint32_t const tfBurnable = 0x00000001; -constexpr std::uint32_t const tfOnlyXRP = 0x00000002; -constexpr std::uint32_t const tfTrustLine = 0x00000004; -constexpr std::uint32_t const tfTransferable = 0x00000008; +constexpr std::uint32_t const tfBurnable = 0x00000001; +constexpr std::uint32_t const tfOnlyXRP = 0x00000002; +constexpr std::uint32_t const tfTrustLine = 0x00000004; +constexpr std::uint32_t const tfTransferable = 0x00000008; // Prior to fixRemoveNFTokenAutoTrustLine, transfer of an NFToken between // accounts allowed a TrustLine to be added to the issuer of that token @@ -147,15 +149,15 @@ constexpr std::uint32_t const tfNFTokenMintMask = ~(tfUniversal | tfBurnable | tfOnlyXRP | tfTransferable); // NFTokenCreateOffer flags: -constexpr std::uint32_t const tfSellNFToken = 0x00000001; +constexpr std::uint32_t const tfSellNFToken = 0x00000001; constexpr std::uint32_t const tfNFTokenCreateOfferMask = ~(tfUniversal | tfSellNFToken); // NFTokenCancelOffer flags: -constexpr std::uint32_t const tfNFTokenCancelOfferMask = ~(tfUniversal); +constexpr std::uint32_t const tfNFTokenCancelOfferMask = ~(tfUniversal); // NFTokenAcceptOffer flags: -constexpr std::uint32_t const tfNFTokenAcceptOfferMask = ~tfUniversal; +constexpr std::uint32_t const tfNFTokenAcceptOfferMask = ~tfUniversal; // clang-format on diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 40202e07dce..150622c731c 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -3028,11 +3028,11 @@ class NFToken_test : public beast::unit_test::suite Env env{*this, features - disallowIncoming}; Account const alice{"alice"}; env.fund(XRP(10000), alice); - env(fset(alice, asfDisallowIncomingNFTOffer)); + env(fset(alice, asfDisallowIncomingNFTokenOffer)); env.close(); auto const sle = env.le(alice); uint32_t flags = sle->getFlags(); - BEAST_EXPECT(!(flags & lsfDisallowIncomingNFTOffer)); + BEAST_EXPECT(!(flags & lsfDisallowIncomingNFTokenOffer)); } Env env{*this, features | disallowIncoming}; @@ -3055,7 +3055,7 @@ class NFToken_test : public beast::unit_test::suite env.close(); // enable flag - env(fset(buyer, asfDisallowIncomingNFTOffer)); + env(fset(buyer, asfDisallowIncomingNFTokenOffer)); env.close(); // a sell offer from the minter to the buyer should be rejected @@ -3071,7 +3071,7 @@ class NFToken_test : public beast::unit_test::suite } // disable the flag - env(fclear(buyer, asfDisallowIncomingNFTOffer)); + env(fclear(buyer, asfDisallowIncomingNFTokenOffer)); env.close(); // create offer (allowed now) then cancel @@ -3098,13 +3098,13 @@ class NFToken_test : public beast::unit_test::suite txflags(tfSellNFToken)); env.close(); - env(fset(buyer, asfDisallowIncomingNFTOffer)); + env(fset(buyer, asfDisallowIncomingNFTokenOffer)); env.close(); env(token::cancelOffer(minter, {offerIndex})); env.close(); - env(fclear(buyer, asfDisallowIncomingNFTOffer)); + env(fclear(buyer, asfDisallowIncomingNFTokenOffer)); env.close(); } @@ -3125,7 +3125,7 @@ class NFToken_test : public beast::unit_test::suite // buyer now owns the token // enable flag again - env(fset(buyer, asfDisallowIncomingNFTOffer)); + env(fset(buyer, asfDisallowIncomingNFTokenOffer)); env.close(); // a random offer to buy the token diff --git a/src/test/rpc/AccountSet_test.cpp b/src/test/rpc/AccountSet_test.cpp index b3ca4c9f017..f935e0a846f 100644 --- a/src/test/rpc/AccountSet_test.cpp +++ b/src/test/rpc/AccountSet_test.cpp @@ -86,7 +86,7 @@ class AccountSet_test : public beast::unit_test::suite if (flag == asfDisallowIncomingCheck || flag == asfDisallowIncomingPayChan || - flag == asfDisallowIncomingNFTOffer || + flag == asfDisallowIncomingNFTokenOffer || flag == asfDisallowIncomingTrustline) { // These flags are part of the DisallowIncoming amendment From 8687b5c3c993a89ff9a1731e187b99ef71616701 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Thu, 2 Mar 2023 14:28:08 -0800 Subject: [PATCH 116/508] Set version to 1.10.0-rc4 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index b3fabe41ed9..5e112371969 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.0-rc3" +char const* const versionString = "1.10.0-rc4" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 07f047b1e260e5b937af28e8f8da9c360ea832b9 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 14 Mar 2023 09:30:22 -0700 Subject: [PATCH 117/508] Set version to 1.10.0 Merge #4451 --- RELEASENOTES.md | 149 ++++++++++++++++++++++++- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 2 files changed, 149 insertions(+), 2 deletions(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 4403110e052..da81d2a5489 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -5,7 +5,154 @@ This document contains the release notes for `rippled`, the reference server implementation of the XRP Ledger protocol. To learn more about how to build, run or update a `rippled` server, visit https://xrpl.org/install-rippled.html -Have new ideas? Need help with setting up your node? Come visit us [here](https://github.com/xrplf/rippled/issues/new/choose) +Have new ideas? Need help with setting up your node? [Please open an issue here](https://github.com/xrplf/rippled/issues/new/choose). + +# Introducing XRP Ledger version 1.10.0 + +Version 1.10.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release introduces six new amendments, detailed below, and cleans up code to improve performance. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Action Required + +Six new amendments are now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. + +If you operate an XRP Ledger server, upgrade to version 1.10.0 by March 21 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. + + +## Install / Upgrade + +On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). + + +## New Amendments + +- **`featureImmediateOfferKilled`**: Changes the response code of an `OfferCreate` transaction with the `tfImmediateOrCancel` flag to return `tecKILLED` when no funds are moved. The previous return code of `tecSUCCESS` was unintuitive. [#4157](https://github.com/XRPLF/rippled/pull/4157) + +- **`featureDisallowIncoming`**: Enables an account to block incoming checks, payment channels, NFToken offers, and trust lines. [#4336](https://github.com/XRPLF/rippled/pull/4336) + +- **`featureXRPFees`**: Simplifies transaction cost calculations to use XRP directly, rather than calculating indirectly in "fee units" and translating the results to XRP. Updates all instances of "fee units" in the protocol and ledger data to be drops of XRP instead. [#4247](https://github.com/XRPLF/rippled/pull/4247) + +- **`fixUniversalNumber`**: Simplifies and unifies the code for decimal floating point math. In some cases, this provides slightly better accuracy than the previous code, resulting in calculations whose least significant digits are different than when calculated with the previous code. The different results may cause other edge case differences where precise calculations are used, such as ranking of offers or processing of payments that use several different paths. [#4192](https://github.com/XRPLF/rippled/pull/4192) + +- **`fixNonFungibleTokensV1_2`**: This amendment is a combination of NFToken fixes. [#4417](https://github.com/XRPLF/rippled/pull/4417) + - Fixes unburnable NFTokens when it has over 500 offers. [#4346](https://github.com/XRPLF/rippled/pull/4346) + - Fixes 3 NFToken offer acceptance issues. [#4380](https://github.com/XRPLF/rippled/pull/4380) + - Prevents brokered sales of NFTokens to owners. [#4403](https://github.com/XRPLF/rippled/pull/4403) + - Only allows the destination to settle NFToken offers through brokerage. [#4399](https://github.com/XRPLF/rippled/pull/4399) + +- **`fixTrustLinesToSelf`**: Trust lines must be between two different accounts, but two exceptions exist because of a bug that briefly existed. This amendment removes those trust lines. [69bb2be](https://github.com/XRPLF/rippled/pull/4270/commits/69bb2be446e3cc24c694c0835b48bd2ecd3d119e) + + +## Changelog + + +### New Features and Improvements + +- **Improve Handshake in the peer protocol**: Switched to using a cryptographically secure PRNG for the Instance Cookie. `rippled` now uses hex encoding for the `Closed-Ledger` and `Previous-Ledger` fields in the Handshake. Also added `--newnodeid` and `--nodeid` command line options. [5a15229](https://github.com/XRPLF/rippled/pull/4270/commits/5a15229eeb13b69c8adf1f653b88a8f8b9480546) + +- **RPC tooBusy response now has 503 HTTP status code**: Added ripplerpc 3.0, enabling RPC tooBusy responses to return relevant HTTP status codes. This is a non-breaking change that only applies to JSON-RPC when you include `"ripplerpc": "3.0"` in the request. [#4143](https://github.com/XRPLF/rippled/pull/4143) + +- **Use the Conan package manager**: Added a `conanfile.py` and Conan recipe for Snappy. Removed the RocksDB recipe from the repo; you can now get it from Conan Center. [#4367](https://github.com/XRPLF/rippled/pull/4367), [c2b03fe](https://github.com/XRPLF/rippled/commit/c2b03fecca19a304b37467b01fa78593d3dce3fb) + +- **Update Build Instructions**: Updated the build instructions to build with the Conan package manager and restructured info for easier comprehension. [#4376](https://github.com/XRPLF/rippled/pull/4376), [#4383](https://github.com/XRPLF/rippled/pull/4383) + +- **Revise CONTRIBUTING**: Updated code contribution guidelines. `rippled` is an open source project and contributions are very welcome. [#4382](https://github.com/XRPLF/rippled/pull/4382) + +- **Update documented pathfinding configuration defaults**: `417cfc2` changed the default Path Finding configuration values, but missed updating the values documented in rippled-example.cfg. Updated those defaults and added recommended values for nodes that want to support advanced pathfinding. [#4409](https://github.com/XRPLF/rippled/pull/4409) + +- **Remove gRPC code previously used for the Xpring SDK**: Removed gRPC code used for the Xpring SDK. The gRPC API is also enabled locally by default in `rippled-example.cfg`. This API is used for [Reporting Mode](https://xrpl.org/build-run-rippled-in-reporting-mode.html) and [Clio](https://github.com/XRPLF/clio). [28f4cc7](https://github.com/XRPLF/rippled/pull/4321/commits/28f4cc7817c2e477f0d7e9ade8f07a45ff2b81f1) + +- **Switch from C++17 to C++20**: Updated `rippled` to use C++20. [92d35e5](https://github.com/XRPLF/rippled/pull/4270/commits/92d35e54c7de6bbe44ff6c7c52cc0765b3f78258) + +- **Support for Boost 1.80.0:**: [04ef885](https://github.com/XRPLF/rippled/pull/4321/commits/04ef8851081f6ee9176783ad3725960b8a931ebb) + +- **Reduce default reserves to 10/2**: Updated the hard-coded default reserves to match the current settings on Mainnet. [#4329](https://github.com/XRPLF/rippled/pull/4329) + +- **Improve self-signed certificate generation**: Improved speed and security of TLS certificate generation on fresh startup. [0ecfc7c](https://github.com/XRPLF/rippled/pull/4270/commits/0ecfc7cb1a958b731e5f184876ea89ae2d4214ee) + + +### Bug Fixes + +- **Update command-line usage help message**: Added `manifest` and `validator_info` to the `rippled` CLI usage statement. [b88ed5a](https://github.com/XRPLF/rippled/pull/4270/commits/b88ed5a8ec2a0735031ca23dc6569d54787dc2f2) + +- **Work around gdb bug by changing a template parameter**: Added a workaround for a bug in gdb, where unsigned template parameters caused issues with RTTI. [#4332](https://github.com/XRPLF/rippled/pull/4332) + +- **Fix clang 15 warnings**: [#4325](https://github.com/XRPLF/rippled/pull/4325) + +- **Catch transaction deserialization error in doLedgerGrpc**: Fixed an issue in the gRPC API, so `Clio` can extract ledger headers and state objects from specific transactions that can't be deserialized by `rippled` code. [#4323](https://github.com/XRPLF/rippled/pull/4323) + +- **Update dependency: gRPC**: New Conan recipes broke the old version of gRPC, so the dependency was updated. [#4407](https://github.com/XRPLF/rippled/pull/4407) + +- **Fix Doxygen workflow**: Added options to build documentation that don't depend on the library dependencies of `rippled`. [#4372](https://github.com/XRPLF/rippled/pull/4372) + +- **Don't try to read SLE with key 0 from the ledger**: Fixed the `preclaim` function to check for 0 in `NFTokenSellOffer` and `NFTokenBuyOffer` before calling `Ledger::read`. This issue only affected debug builds. [#4351](https://github.com/XRPLF/rippled/pull/4351) + +- **Update broken link to hosted Doxygen content**: [5e1cb09](https://github.com/XRPLF/rippled/pull/4270/commits/5e1cb09b8892e650f6c34a66521b6b1673bd6b65) + + +### Code Cleanup + +- **Prevent unnecessary `shared_ptr` copies by accepting a value in `SHAMapInnerNode::setChild`**: [#4266](https://github.com/XRPLF/rippled/pull/4266) + +- **Release TaggedCache object memory outside the lock**: [3726f8b](https://github.com/XRPLF/rippled/pull/4321/commits/3726f8bf31b3eab8bab39dce139656fd705ae9a0) + +- **Rename SHAMapStoreImp::stopping() to healthWait()**: [7e9e910](https://github.com/XRPLF/rippled/pull/4321/commits/7e9e9104eabbf0391a0837de5630af17a788e233) + +- **Improve wrapper around OpenSSL RAND**: [7b3507b](https://github.com/XRPLF/rippled/pull/4270/commits/7b3507bb873495a974db33c57a888221ddabcacc) + +- **Improve AccountID string conversion caching**: Improved memory cache usage. [e2eed96](https://github.com/XRPLF/rippled/pull/4270/commits/e2eed966b0ecb6445027e6a023b48d702c5f4832) + +- **Build the command map at compile time**: [9aaa0df](https://github.com/XRPLF/rippled/pull/4270/commits/9aaa0dff5fd422e5f6880df8e20a1fd5ad3b4424) + +- **Avoid unnecessary copying and dynamic memory allocations**: [d318ab6](https://github.com/XRPLF/rippled/pull/4270/commits/d318ab612adc86f1fd8527a50af232f377ca89ef) + +- **Use constexpr to check memo validity**: [e67f905](https://github.com/XRPLF/rippled/pull/4270/commits/e67f90588a9050162881389d7e7d1d0fb31066b0) + +- **Remove charUnHex**: [83ac141](https://github.com/XRPLF/rippled/pull/4270/commits/83ac141f656b1a95b5661853951ebd95b3ffba99) + +- **Remove deprecated AccountTxOld.cpp**: [ce64f7a](https://github.com/XRPLF/rippled/pull/4270/commits/ce64f7a90f99c6b5e68d3c3d913443023de061a6) + +- **Remove const_cast usage**: [23ce431](https://github.com/XRPLF/rippled/pull/4321/commits/23ce4318768b718c82e01004d23f1abc9a9549ff) + +- **Remove inaccessible code paths and outdated data format wchar_t**: [95fabd5](https://github.com/XRPLF/rippled/pull/4321/commits/95fabd5762a4917753c06268192e4d4e4baef8e4) + +- **Improve move semantics in Expected**: [#4326](https://github.com/XRPLF/rippled/pull/4326) + + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + +### Credits + +The following people contributed directly to this release: + +- Alexander Kremer +- Alloy Networks <45832257+alloynetworks@users.noreply.github.com> +- CJ Cobb <46455409+cjcobb23@users.noreply.github.com> +- Chenna Keshava B S +- Crypto Brad Garlinghouse +- Denis Angell +- Ed Hennis +- Elliot Lee +- Gregory Popovitch +- Howard Hinnant +- J. Scott Branson <18340247+crypticrabbit@users.noreply.github.com> +- John Freeman +- ledhed2222 +- manojsdoshi +- Nik Bougalis +- RichardAH +- Scott Determan +- Scott Schurr +- seelabs +- Shawn Xie <35279399+shawnxie999@users.noreply.github.com> + # Introducing XRP Ledger version 1.9.4 diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 5e112371969..03e83dd36c9 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.0-rc4" +char const* const versionString = "1.10.0" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 1e7710eee25a3e0b7318f7a3a8682cff30a5e668 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 14 Mar 2023 13:08:56 -0700 Subject: [PATCH 118/508] docs: security bug bounty acknowledgements (#4460) --- RELEASENOTES.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index da81d2a5489..e814019b411 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -145,14 +145,26 @@ The following people contributed directly to this release: - J. Scott Branson <18340247+crypticrabbit@users.noreply.github.com> - John Freeman - ledhed2222 +- Levin Winter <33220502+levinwinter@users.noreply.github.com> - manojsdoshi - Nik Bougalis - RichardAH - Scott Determan - Scott Schurr -- seelabs - Shawn Xie <35279399+shawnxie999@users.noreply.github.com> +Security Bug Bounty Acknowledgements: +- Aaron Hook +- Levin Winter + +Bug Bounties and Responsible Disclosures: +We welcome reviews of the rippled code and urge researchers to +responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + + bugs@xrpl.org + # Introducing XRP Ledger version 1.9.4 From f7b3ddd87b8ef093a06ab1420bea57ed1e77643a Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Tue, 14 Mar 2023 20:49:40 -0700 Subject: [PATCH 119/508] Reporting Mode: Do not attempt to acquire missing data from peer network (#4458) In Reporting Mode, a server would core dump when it is not able to read from Cassandra. This patch prevents the core dump when Cassandra is down for reporting mode servers. This does not fix the root cause, but it cuts down on some of the resulting noise. --- src/ripple/app/ledger/Ledger.cpp | 2 +- src/ripple/shamap/Family.h | 16 +++++++++++-- src/ripple/shamap/NodeFamily.h | 4 ++-- src/ripple/shamap/ShardFamily.h | 5 ++-- src/ripple/shamap/impl/NodeFamily.cpp | 10 +++++++- src/ripple/shamap/impl/SHAMap.cpp | 32 +++++++++++++++++--------- src/ripple/shamap/impl/ShardFamily.cpp | 4 +++- src/test/shamap/common.h | 6 +++-- 8 files changed, 57 insertions(+), 22 deletions(-) diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 7757dac53bf..7552f755c6e 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -266,7 +266,7 @@ Ledger::Ledger( { info_.hash = calculateLedgerHash(info_); if (acquire && !config.reporting()) - family.missingNode(info_.hash, info_.seq); + family.missingNodeAcquireByHash(info_.hash, info_.seq); } } diff --git a/src/ripple/shamap/Family.h b/src/ripple/shamap/Family.h index 72c9a6cb07a..fea5545d31c 100644 --- a/src/ripple/shamap/Family.h +++ b/src/ripple/shamap/Family.h @@ -75,11 +75,23 @@ class Family virtual bool isShardBacked() const = 0; + /** Acquire ledger that has a missing node by ledger sequence + * + * Throw if in reporting mode. + * + * @param refNum Sequence of ledger to acquire. + * @param nodeHash Hash of missing node to report in throw. + */ virtual void - missingNode(std::uint32_t refNum) = 0; + missingNodeAcquireBySeq(std::uint32_t refNum, uint256 const& nodeHash) = 0; + /** Acquire ledger that has a missing node by ledger hash + * + * @param refHash Hash of ledger to acquire. + * @param refNum Ledger sequence with missing node. + */ virtual void - missingNode(uint256 const& refHash, std::uint32_t refNum) = 0; + missingNodeAcquireByHash(uint256 const& refHash, std::uint32_t refNum) = 0; virtual void reset() = 0; diff --git a/src/ripple/shamap/NodeFamily.h b/src/ripple/shamap/NodeFamily.h index 2d8236705b5..f20abccce9d 100644 --- a/src/ripple/shamap/NodeFamily.h +++ b/src/ripple/shamap/NodeFamily.h @@ -83,10 +83,10 @@ class NodeFamily : public Family reset() override; void - missingNode(std::uint32_t seq) override; + missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& hash) override; void - missingNode(uint256 const& hash, std::uint32_t seq) override + missingNodeAcquireByHash(uint256 const& hash, std::uint32_t seq) override { acquire(hash, seq); } diff --git a/src/ripple/shamap/ShardFamily.h b/src/ripple/shamap/ShardFamily.h index 550efeb5b81..de809cf589c 100644 --- a/src/ripple/shamap/ShardFamily.h +++ b/src/ripple/shamap/ShardFamily.h @@ -89,10 +89,11 @@ class ShardFamily : public Family reset() override; void - missingNode(std::uint32_t seq) override; + missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash) + override; void - missingNode(uint256 const& hash, std::uint32_t seq) override + missingNodeAcquireByHash(uint256 const& hash, std::uint32_t seq) override { acquire(hash, seq); } diff --git a/src/ripple/shamap/impl/NodeFamily.cpp b/src/ripple/shamap/impl/NodeFamily.cpp index f9c6dedb265..1752db06a8e 100644 --- a/src/ripple/shamap/impl/NodeFamily.cpp +++ b/src/ripple/shamap/impl/NodeFamily.cpp @@ -21,6 +21,7 @@ #include #include #include +#include namespace ripple { @@ -65,9 +66,16 @@ NodeFamily::reset() } void -NodeFamily::missingNode(std::uint32_t seq) +NodeFamily::missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash) { JLOG(j_.error()) << "Missing node in " << seq; + if (app_.config().reporting()) + { + std::stringstream ss; + ss << "Node not read, likely a Cassandra error in ledger seq " << seq + << " object hash " << nodeHash; + Throw(ss.str()); + } std::unique_lock lock(maxSeqMutex_); if (maxSeq_ == 0) diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index 1a5a283dd3c..fa42c8e8f82 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -173,30 +173,40 @@ SHAMap::finishFetch( std::shared_ptr const& object) const { assert(backed_); - if (!object) - { - if (full_) - { - full_ = false; - f_.missingNode(ledgerSeq_); - } - return {}; - } std::shared_ptr node; try { + if (!object) + { + if (full_) + { + full_ = false; + f_.missingNodeAcquireBySeq(ledgerSeq_, hash.as_uint256()); + } + return {}; + } + node = SHAMapTreeNode::makeFromPrefix(makeSlice(object->getData()), hash); if (node) canonicalize(hash, node); return node; } - catch (std::exception const&) + catch (SHAMapMissingNode const& e) + { + JLOG(journal_.warn()) << "Missing node: " << hash << " : " << e.what(); + } + catch (std::runtime_error const& e) + { + JLOG(journal_.warn()) << e.what(); + } + catch (...) { JLOG(journal_.warn()) << "Invalid DB node " << hash; - return std::shared_ptr(); } + + return std::shared_ptr(); } // See if a sync filter has a node diff --git a/src/ripple/shamap/impl/ShardFamily.cpp b/src/ripple/shamap/impl/ShardFamily.cpp index eadfc42aa27..f22d4152e2b 100644 --- a/src/ripple/shamap/impl/ShardFamily.cpp +++ b/src/ripple/shamap/impl/ShardFamily.cpp @@ -22,6 +22,7 @@ #include #include #include +#include namespace ripple { @@ -152,8 +153,9 @@ ShardFamily::reset() } void -ShardFamily::missingNode(std::uint32_t seq) +ShardFamily::missingNodeAcquireBySeq(std::uint32_t seq, uint256 const& nodeHash) { + std::ignore = nodeHash; JLOG(j_.error()) << "Missing node in ledger sequence " << seq; std::unique_lock lock(maxSeqMutex_); diff --git a/src/test/shamap/common.h b/src/test/shamap/common.h index c4238b2a65f..d89acb988d7 100644 --- a/src/test/shamap/common.h +++ b/src/test/shamap/common.h @@ -105,13 +105,15 @@ class TestNodeFamily : public Family } void - missingNode(std::uint32_t refNum) override + missingNodeAcquireBySeq(std::uint32_t refNum, uint256 const& nodeHash) + override { Throw("missing node"); } void - missingNode(uint256 const& refHash, std::uint32_t refNum) override + missingNodeAcquireByHash(uint256 const& refHash, std::uint32_t refNum) + override { Throw("missing node"); } From 84cde3ce0b8dba9d39288e9dad37497593aef3ab Mon Sep 17 00:00:00 2001 From: drlongle Date: Wed, 15 Mar 2023 04:54:54 +0100 Subject: [PATCH 120/508] Use <=> operator for base_uint, Issue, and Book: (#4411) - Implement the `operator==` and the `operator<=>` (aka the spaceship operator) in `base_uint`, `Issue`, and `Book`. - C++20-compliant compilers automatically provide the remaining comparison operators (e.g. `operator<`, `operator<=`, ...). - Remove the function compare() because it is no longer needed. - Maintain the same semantics as the existing code. - Add some unit tests to gain further confidence. - Fix #2525. --- .../app/tx/impl/details/NFTokenUtils.cpp | 11 ++- src/ripple/basics/base_uint.h | 86 ++++++------------- src/ripple/protocol/Book.h | 28 +++--- src/ripple/protocol/Issue.h | 36 ++++---- src/ripple/protocol/impl/Book.cpp | 54 ------------ src/ripple/protocol/impl/Issue.cpp | 56 ------------ src/test/basics/base_uint_test.cpp | 83 ++++++++++++++++-- 7 files changed, 138 insertions(+), 216 deletions(-) diff --git a/src/ripple/app/tx/impl/details/NFTokenUtils.cpp b/src/ripple/app/tx/impl/details/NFTokenUtils.cpp index db2c3ae62f7..09ff8f13caa 100644 --- a/src/ripple/app/tx/impl/details/NFTokenUtils.cpp +++ b/src/ripple/app/tx/impl/details/NFTokenUtils.cpp @@ -146,19 +146,22 @@ getPageForToken( return nullptr; else { - // This would be an ideal place for the spaceship operator... - int const relation = compare(id & nft::pageMask, cmp); + auto const relation{(id & nft::pageMask) <=> cmp}; if (relation == 0) + { // If the passed in id belongs exactly on this (full) page // this account simply cannot store the NFT. return nullptr; + } - else if (relation > 0) + if (relation > 0) + { // We need to leave the entire contents of this page in // narr so carr stays empty. The new NFT will be // inserted in carr. This keeps the NFTs that must be // together all on their own page. splitIter = narr.end(); + } // If neither of those conditions apply then put all of // narr into carr and produce an empty narr where the new NFT @@ -228,7 +231,7 @@ compareTokens(uint256 const& a, uint256 const& b) // 96-bits are identical we still need a fully deterministic sort. // So we sort on the low 96-bits first. If those are equal we sort on // the whole thing. - if (auto const lowBitsCmp = compare(a & nft::pageMask, b & nft::pageMask); + if (auto const lowBitsCmp{(a & nft::pageMask) <=> (b & nft::pageMask)}; lowBitsCmp != 0) return lowBitsCmp < 0; diff --git a/src/ripple/basics/base_uint.h b/src/ripple/basics/base_uint.h index 8f277c3003c..8b15b082647 100644 --- a/src/ripple/basics/base_uint.h +++ b/src/ripple/basics/base_uint.h @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -549,103 +550,66 @@ using uint160 = base_uint<160>; using uint256 = base_uint<256>; template -inline int -compare(base_uint const& a, base_uint const& b) +[[nodiscard]] inline constexpr std::strong_ordering +operator<=>(base_uint const& lhs, base_uint const& rhs) { - auto ret = std::mismatch(a.cbegin(), a.cend(), b.cbegin()); - - if (ret.first == a.cend()) - return 0; + // This comparison might seem wrong on a casual inspection because it + // compares data internally stored as std::uint32_t byte-by-byte. But + // note that the underlying data is stored in big endian, even if the + // plaform is little endian. This makes the comparison correct. + // + // FIXME: use std::lexicographical_compare_three_way once support is + // added to MacOS. - // a > b - if (*ret.first > *ret.second) - return 1; + auto const ret = std::mismatch(lhs.cbegin(), lhs.cend(), rhs.cbegin()); - // a < b - return -1; -} - -template -inline bool -operator<(base_uint const& a, base_uint const& b) -{ - return compare(a, b) < 0; -} + // a == b + if (ret.first == lhs.cend()) + return std::strong_ordering::equivalent; -template -inline bool -operator<=(base_uint const& a, base_uint const& b) -{ - return compare(a, b) <= 0; + return (*ret.first > *ret.second) ? std::strong_ordering::greater + : std::strong_ordering::less; } -template -inline bool -operator>(base_uint const& a, base_uint const& b) +template +[[nodiscard]] inline constexpr bool +operator==(base_uint const& lhs, base_uint const& rhs) { - return compare(a, b) > 0; -} - -template -inline bool -operator>=(base_uint const& a, base_uint const& b) -{ - return compare(a, b) >= 0; -} - -template -inline bool -operator==(base_uint const& a, base_uint const& b) -{ - return compare(a, b) == 0; -} - -template -inline bool -operator!=(base_uint const& a, base_uint const& b) -{ - return compare(a, b) != 0; + return (lhs <=> rhs) == 0; } //------------------------------------------------------------------------------ template -inline bool +inline constexpr bool operator==(base_uint const& a, std::uint64_t b) { return a == base_uint(b); } -template -inline bool -operator!=(base_uint const& a, std::uint64_t b) -{ - return !(a == b); -} - //------------------------------------------------------------------------------ template -inline const base_uint +inline constexpr base_uint operator^(base_uint const& a, base_uint const& b) { return base_uint(a) ^= b; } template -inline const base_uint +inline constexpr base_uint operator&(base_uint const& a, base_uint const& b) { return base_uint(a) &= b; } template -inline const base_uint +inline constexpr base_uint operator|(base_uint const& a, base_uint const& b) { return base_uint(a) |= b; } template -inline const base_uint +inline constexpr base_uint operator+(base_uint const& a, base_uint const& b) { return base_uint(a) += b; diff --git a/src/ripple/protocol/Book.h b/src/ripple/protocol/Book.h index 1469b60dd1b..609989062c0 100644 --- a/src/ripple/protocol/Book.h +++ b/src/ripple/protocol/Book.h @@ -65,28 +65,24 @@ hash_append(Hasher& h, Book const& b) Book reversed(Book const& book); -/** Ordered comparison. */ -int -compare(Book const& lhs, Book const& rhs); - /** Equality comparison. */ /** @{ */ -bool -operator==(Book const& lhs, Book const& rhs); -bool -operator!=(Book const& lhs, Book const& rhs); +[[nodiscard]] inline constexpr bool +operator==(Book const& lhs, Book const& rhs) +{ + return (lhs.in == rhs.in) && (lhs.out == rhs.out); +} /** @} */ /** Strict weak ordering. */ /** @{ */ -bool -operator<(Book const& lhs, Book const& rhs); -bool -operator>(Book const& lhs, Book const& rhs); -bool -operator>=(Book const& lhs, Book const& rhs); -bool -operator<=(Book const& lhs, Book const& rhs); +[[nodiscard]] inline constexpr std::weak_ordering +operator<=>(Book const& lhs, Book const& rhs) +{ + if (auto const c{lhs.in <=> rhs.in}; c != 0) + return c; + return lhs.out <=> rhs.out; +} /** @} */ } // namespace ripple diff --git a/src/ripple/protocol/Issue.h b/src/ripple/protocol/Issue.h index 11c45c0136c..be7733677a1 100644 --- a/src/ripple/protocol/Issue.h +++ b/src/ripple/protocol/Issue.h @@ -63,31 +63,29 @@ hash_append(Hasher& h, Issue const& r) hash_append(h, r.currency, r.account); } -/** Ordered comparison. - The assets are ordered first by currency and then by account, - if the currency is not XRP. -*/ -int -compare(Issue const& lhs, Issue const& rhs); - /** Equality comparison. */ /** @{ */ -bool -operator==(Issue const& lhs, Issue const& rhs); -bool -operator!=(Issue const& lhs, Issue const& rhs); +[[nodiscard]] inline constexpr bool +operator==(Issue const& lhs, Issue const& rhs) +{ + return (lhs.currency == rhs.currency) && + (isXRP(lhs.currency) || lhs.account == rhs.account); +} /** @} */ /** Strict weak ordering. */ /** @{ */ -bool -operator<(Issue const& lhs, Issue const& rhs); -bool -operator>(Issue const& lhs, Issue const& rhs); -bool -operator>=(Issue const& lhs, Issue const& rhs); -bool -operator<=(Issue const& lhs, Issue const& rhs); +[[nodiscard]] inline constexpr std::weak_ordering +operator<=>(Issue const& lhs, Issue const& rhs) +{ + if (auto const c{lhs.currency <=> rhs.currency}; c != 0) + return c; + + if (isXRP(lhs.currency)) + return std::weak_ordering::equivalent; + + return (lhs.account <=> rhs.account); +} /** @} */ //------------------------------------------------------------------------------ diff --git a/src/ripple/protocol/impl/Book.cpp b/src/ripple/protocol/impl/Book.cpp index 323985e6114..3ad22675d1b 100644 --- a/src/ripple/protocol/impl/Book.cpp +++ b/src/ripple/protocol/impl/Book.cpp @@ -47,58 +47,4 @@ reversed(Book const& book) return Book(book.out, book.in); } -/** Ordered comparison. */ -int -compare(Book const& lhs, Book const& rhs) -{ - int const diff(compare(lhs.in, rhs.in)); - if (diff != 0) - return diff; - return compare(lhs.out, rhs.out); -} - -/** Equality comparison. */ -/** @{ */ -bool -operator==(Book const& lhs, Book const& rhs) -{ - return (lhs.in == rhs.in) && (lhs.out == rhs.out); -} - -bool -operator!=(Book const& lhs, Book const& rhs) -{ - return (lhs.in != rhs.in) || (lhs.out != rhs.out); -} -/** @} */ - -/** Strict weak ordering. */ -/** @{ */ -bool -operator<(Book const& lhs, Book const& rhs) -{ - int const diff(compare(lhs.in, rhs.in)); - if (diff != 0) - return diff < 0; - return lhs.out < rhs.out; -} - -bool -operator>(Book const& lhs, Book const& rhs) -{ - return rhs < lhs; -} - -bool -operator>=(Book const& lhs, Book const& rhs) -{ - return !(lhs < rhs); -} - -bool -operator<=(Book const& lhs, Book const& rhs) -{ - return !(rhs < lhs); -} - } // namespace ripple diff --git a/src/ripple/protocol/impl/Issue.cpp b/src/ripple/protocol/impl/Issue.cpp index 24a8c764efb..e727cb4cade 100644 --- a/src/ripple/protocol/impl/Issue.cpp +++ b/src/ripple/protocol/impl/Issue.cpp @@ -43,60 +43,4 @@ operator<<(std::ostream& os, Issue const& x) return os; } -/** Ordered comparison. - The assets are ordered first by currency and then by account, - if the currency is not XRP. -*/ -int -compare(Issue const& lhs, Issue const& rhs) -{ - int diff = compare(lhs.currency, rhs.currency); - if (diff != 0) - return diff; - if (isXRP(lhs.currency)) - return 0; - return compare(lhs.account, rhs.account); -} - -/** Equality comparison. */ -/** @{ */ -bool -operator==(Issue const& lhs, Issue const& rhs) -{ - return compare(lhs, rhs) == 0; -} - -bool -operator!=(Issue const& lhs, Issue const& rhs) -{ - return !(lhs == rhs); -} -/** @} */ - -/** Strict weak ordering. */ -/** @{ */ -bool -operator<(Issue const& lhs, Issue const& rhs) -{ - return compare(lhs, rhs) < 0; -} - -bool -operator>(Issue const& lhs, Issue const& rhs) -{ - return rhs < lhs; -} - -bool -operator>=(Issue const& lhs, Issue const& rhs) -{ - return !(lhs < rhs); -} - -bool -operator<=(Issue const& lhs, Issue const& rhs) -{ - return !(rhs < lhs); -} - } // namespace ripple diff --git a/src/test/basics/base_uint_test.cpp b/src/test/basics/base_uint_test.cpp index c1ba7302ae8..9b1f7696dd5 100644 --- a/src/test/basics/base_uint_test.cpp +++ b/src/test/basics/base_uint_test.cpp @@ -57,8 +57,76 @@ struct nonhash struct base_uint_test : beast::unit_test::suite { using test96 = base_uint<96>; - static_assert(std::is_copy_constructible::value, ""); - static_assert(std::is_copy_assignable::value, ""); + static_assert(std::is_copy_constructible::value); + static_assert(std::is_copy_assignable::value); + + void + testComparisons() + { + { + static constexpr std:: + array, 6> + test_args{ + {{"0000000000000000", "0000000000000001"}, + {"0000000000000000", "ffffffffffffffff"}, + {"1234567812345678", "2345678923456789"}, + {"8000000000000000", "8000000000000001"}, + {"aaaaaaaaaaaaaaa9", "aaaaaaaaaaaaaaaa"}, + {"fffffffffffffffe", "ffffffffffffffff"}}}; + + for (auto const& arg : test_args) + { + ripple::base_uint<64> const u{arg.first}, v{arg.second}; + BEAST_EXPECT(u < v); + BEAST_EXPECT(u <= v); + BEAST_EXPECT(u != v); + BEAST_EXPECT(!(u == v)); + BEAST_EXPECT(!(u > v)); + BEAST_EXPECT(!(u >= v)); + BEAST_EXPECT(!(v < u)); + BEAST_EXPECT(!(v <= u)); + BEAST_EXPECT(v != u); + BEAST_EXPECT(!(v == u)); + BEAST_EXPECT(v > u); + BEAST_EXPECT(v >= u); + BEAST_EXPECT(u == u); + BEAST_EXPECT(v == v); + } + } + + { + static constexpr std::array< + std::pair, + 6> + test_args{{ + {"000000000000000000000000", "000000000000000000000001"}, + {"000000000000000000000000", "ffffffffffffffffffffffff"}, + {"0123456789ab0123456789ab", "123456789abc123456789abc"}, + {"555555555555555555555555", "55555555555a555555555555"}, + {"aaaaaaaaaaaaaaa9aaaaaaaa", "aaaaaaaaaaaaaaaaaaaaaaaa"}, + {"fffffffffffffffffffffffe", "ffffffffffffffffffffffff"}, + }}; + + for (auto const& arg : test_args) + { + ripple::base_uint<96> const u{arg.first}, v{arg.second}; + BEAST_EXPECT(u < v); + BEAST_EXPECT(u <= v); + BEAST_EXPECT(u != v); + BEAST_EXPECT(!(u == v)); + BEAST_EXPECT(!(u > v)); + BEAST_EXPECT(!(u >= v)); + BEAST_EXPECT(!(v < u)); + BEAST_EXPECT(!(v <= u)); + BEAST_EXPECT(v != u); + BEAST_EXPECT(!(v == u)); + BEAST_EXPECT(v > u); + BEAST_EXPECT(v >= u); + BEAST_EXPECT(u == u); + BEAST_EXPECT(v == v); + } + } + } void run() override @@ -66,9 +134,12 @@ struct base_uint_test : beast::unit_test::suite testcase("base_uint: general purpose tests"); static_assert( - !std::is_constructible>::value, ""); + !std::is_constructible>::value); static_assert( - !std::is_assignable>::value, ""); + !std::is_assignable>::value); + + testComparisons(); + // used to verify set insertion (hashing required) std::unordered_set> uset; @@ -112,8 +183,8 @@ struct base_uint_test : beast::unit_test::suite BEAST_EXPECT(d == --t); } - BEAST_EXPECT(compare(u, v) < 0); - BEAST_EXPECT(compare(v, u) > 0); + BEAST_EXPECT(u < v); + BEAST_EXPECT(v > u); v = u; BEAST_EXPECT(v == u); From cb08f2b6ec425c4686c8c5c7fc4a3209e91166f2 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Wed, 15 Mar 2023 05:06:30 +0100 Subject: [PATCH 121/508] Allow port numbers be be specified with a colon: (#4328) Port numbers can now be specified using either a colon or a space. Examples: 1.2.3.4:51235 1.2.3.4 51235 - In the configuration file, an annoying "gotcha" for node operators is accidentally specifying IP:PORT combinations using a colon. The code previously expected a space, not a colon. It also does not provide good feedback when this operator error is made. - This change simply allows this mistake (using a colon) to be fixed automatically, preserving the intention of the operator. - Add unit tests, which test the functionality when specifying IP:PORT in the configuration file. - The RPCCall test regime is not specific enough to test this functionality, it has been tested by hand. - Ensure IPv6 addresses are not confused for ip:port --------- Co-authored-by: Elliot Lee --- src/ripple/core/impl/Config.cpp | 23 +++++++++ src/ripple/net/impl/RPCCall.cpp | 22 ++++++-- src/ripple/rpc/handlers/Connect.cpp | 8 +-- src/test/core/Config_test.cpp | 79 +++++++++++++++++++++++++++++ 4 files changed, 125 insertions(+), 7 deletions(-) diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index c2cfb14d21d..12374972866 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -37,6 +37,7 @@ #include #include #include +#include #include #if BOOST_OS_WINDOWS @@ -468,6 +469,28 @@ Config::loadFromString(std::string const& fileContents) if (auto s = getIniFileSection(secConfig, SECTION_SNTP)) SNTP_SERVERS = *s; + // if the user has specified ip:port then replace : with a space. + { + auto replaceColons = [](std::vector& strVec) { + const static std::regex e(":([0-9]+)$"); + for (auto& line : strVec) + { + // skip anything that might be an ipv6 address + if (std::count(line.begin(), line.end(), ':') != 1) + continue; + + std::string result = std::regex_replace(line, e, " $1"); + // sanity check the result of the replace, should be same length + // as input + if (result.size() == line.size()) + line = result; + } + }; + + replaceColons(IPS_FIXED); + replaceColons(IPS); + } + { std::string dbPath; if (getSingleSection(secConfig, "database_path", dbPath, j_)) diff --git a/src/ripple/net/impl/RPCCall.cpp b/src/ripple/net/impl/RPCCall.cpp index b475afe9dfb..b5a167f76b0 100644 --- a/src/ripple/net/impl/RPCCall.cpp +++ b/src/ripple/net/impl/RPCCall.cpp @@ -482,17 +482,31 @@ class RPCParser return jvRequest; } - // connect [port] + // connect [port] Json::Value parseConnect(Json::Value const& jvParams) { Json::Value jvRequest(Json::objectValue); - - jvRequest[jss::ip] = jvParams[0u].asString(); - + std::string ip = jvParams[0u].asString(); if (jvParams.size() == 2) + { + jvRequest[jss::ip] = ip; jvRequest[jss::port] = jvParams[1u].asUInt(); + return jvRequest; + } + + // handle case where there is one argument of the form ip:port + if (std::count(ip.begin(), ip.end(), ':') == 1) + { + std::size_t colon = ip.find_last_of(":"); + jvRequest[jss::ip] = std::string{ip, 0, colon}; + jvRequest[jss::port] = + Json::Value{std::string{ip, colon + 1}}.asUInt(); + return jvRequest; + } + // default case, no port + jvRequest[jss::ip] = ip; return jvRequest; } diff --git a/src/ripple/rpc/handlers/Connect.cpp b/src/ripple/rpc/handlers/Connect.cpp index 532e04087aa..ed366f64b2b 100644 --- a/src/ripple/rpc/handlers/Connect.cpp +++ b/src/ripple/rpc/handlers/Connect.cpp @@ -59,13 +59,15 @@ doConnect(RPC::JsonContext& context) else iPort = DEFAULT_PEER_PORT; - auto ip = - beast::IP::Endpoint::from_string(context.params[jss::ip].asString()); + auto const ip_str = context.params[jss::ip].asString(); + auto ip = beast::IP::Endpoint::from_string(ip_str); if (!is_unspecified(ip)) context.app.overlay().connect(ip.at_port(iPort)); - return RPC::makeObjectValue("connecting"); + return RPC::makeObjectValue( + "attempting connection to IP:" + ip_str + + " port: " + std::to_string(iPort)); } } // namespace ripple diff --git a/src/test/core/Config_test.cpp b/src/test/core/Config_test.cpp index da29fafaca2..b455762dea4 100644 --- a/src/test/core/Config_test.cpp +++ b/src/test/core/Config_test.cpp @@ -856,6 +856,84 @@ r.ripple.com 51235 cfg.section(SECTION_IPS_FIXED).values().size() == 2); } + void + testColons() + { + Config cfg; + /* NOTE: this string includes some explicit + * space chars in order to verify proper trimming */ + std::string toLoad(R"( +[port_rpc])" + "\x20" + R"( +# comment + # indented comment +)" + "\x20\x20" + R"( +[ips])" + "\x20" + R"( +r.ripple.com:51235 + + [ips_fixed])" + "\x20\x20" + R"( + # COMMENT + s1.ripple.com:51235 + s2.ripple.com 51235 + anotherserversansport + anotherserverwithport:12 + 1.1.1.1:1 + 1.1.1.1 1 + 12.34.12.123:12345 + 12.34.12.123 12345 + :: + 2001:db8:: + ::1 + ::1:12345 + [::1]:12345 + 2001:db8:3333:4444:5555:6666:7777:8888:12345 + [2001:db8:3333:4444:5555:6666:7777:8888]:1 + + +)"); + cfg.loadFromString(toLoad); + BEAST_EXPECT( + cfg.exists("port_rpc") && cfg.section("port_rpc").lines().empty() && + cfg.section("port_rpc").values().empty()); + BEAST_EXPECT( + cfg.exists(SECTION_IPS) && + cfg.section(SECTION_IPS).lines().size() == 1 && + cfg.section(SECTION_IPS).values().size() == 1); + BEAST_EXPECT( + cfg.exists(SECTION_IPS_FIXED) && + cfg.section(SECTION_IPS_FIXED).lines().size() == 15 && + cfg.section(SECTION_IPS_FIXED).values().size() == 15); + BEAST_EXPECT(cfg.IPS[0] == "r.ripple.com 51235"); + + BEAST_EXPECT(cfg.IPS_FIXED[0] == "s1.ripple.com 51235"); + BEAST_EXPECT(cfg.IPS_FIXED[1] == "s2.ripple.com 51235"); + BEAST_EXPECT(cfg.IPS_FIXED[2] == "anotherserversansport"); + BEAST_EXPECT(cfg.IPS_FIXED[3] == "anotherserverwithport 12"); + BEAST_EXPECT(cfg.IPS_FIXED[4] == "1.1.1.1 1"); + BEAST_EXPECT(cfg.IPS_FIXED[5] == "1.1.1.1 1"); + BEAST_EXPECT(cfg.IPS_FIXED[6] == "12.34.12.123 12345"); + BEAST_EXPECT(cfg.IPS_FIXED[7] == "12.34.12.123 12345"); + + // all ipv6 should be ignored by colon replacer, howsoever formated + BEAST_EXPECT(cfg.IPS_FIXED[8] == "::"); + BEAST_EXPECT(cfg.IPS_FIXED[9] == "2001:db8::"); + BEAST_EXPECT(cfg.IPS_FIXED[10] == "::1"); + BEAST_EXPECT(cfg.IPS_FIXED[11] == "::1:12345"); + BEAST_EXPECT(cfg.IPS_FIXED[12] == "[::1]:12345"); + BEAST_EXPECT( + cfg.IPS_FIXED[13] == + "2001:db8:3333:4444:5555:6666:7777:8888:12345"); + BEAST_EXPECT( + cfg.IPS_FIXED[14] == "[2001:db8:3333:4444:5555:6666:7777:8888]:1"); + } + void testComments() { @@ -1147,6 +1225,7 @@ r.ripple.com 51235 testSetup(true); testPort(); testWhitespace(); + testColons(); testComments(); testGetters(); testAmendment(); From 9309b573648c8f0c3f0e556d17385ed3fcf69c5f Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Tue, 14 Mar 2023 21:10:56 -0700 Subject: [PATCH 122/508] Rectify the import paths of boost/iterator: (#4293) - MSVC 19.x reported a warning about import paths in boost for function_output_iterator class (boost::function_output_iterator). - Eliminate that warning by updating the import paths, as suggested by the compiler warnings. --- src/ripple/overlay/impl/ProtocolVersion.cpp | 2 +- src/test/csf/Tx.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ripple/overlay/impl/ProtocolVersion.cpp b/src/ripple/overlay/impl/ProtocolVersion.cpp index 9a549b56309..fbd48474420 100644 --- a/src/ripple/overlay/impl/ProtocolVersion.cpp +++ b/src/ripple/overlay/impl/ProtocolVersion.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/test/csf/Tx.h b/src/test/csf/Tx.h index e65897ffa89..5ccd910b80d 100644 --- a/src/test/csf/Tx.h +++ b/src/test/csf/Tx.h @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include From 0f1ffff068542d025b5590616839559726a52f14 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 14 Mar 2023 21:21:50 -0700 Subject: [PATCH 123/508] Set version to 1.10.1-b1 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 03e83dd36c9..666e0750574 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.0" +char const* const versionString = "1.10.1-b1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 10555faa928bc02400c22f1856ad1846b5d52f1a Mon Sep 17 00:00:00 2001 From: RichardAH Date: Thu, 16 Mar 2023 18:25:40 +0100 Subject: [PATCH 124/508] fix(gateway_balances): handle overflow exception: (#4355) * Prevent internal error by catching overflow exception in `gateway_balances`. * Treat `gateway_balances` obligations overflow as max (largest valid) `STAmount`. * Note that very large sums of STAmount are approximations regardless. --------- Co-authored-by: Scott Schurr --- src/ripple/rpc/handlers/GatewayBalances.cpp | 18 ++++++- src/test/rpc/GatewayBalances_test.cpp | 56 +++++++++++++++++++++ 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/src/ripple/rpc/handlers/GatewayBalances.cpp b/src/ripple/rpc/handlers/GatewayBalances.cpp index d0770f31edf..3a422c6e965 100644 --- a/src/ripple/rpc/handlers/GatewayBalances.cpp +++ b/src/ripple/rpc/handlers/GatewayBalances.cpp @@ -184,7 +184,23 @@ doGatewayBalances(RPC::JsonContext& context) bal = -rs->getBalance(); } else - bal -= rs->getBalance(); + { + try + { + bal -= rs->getBalance(); + } + catch (std::runtime_error const&) + { + // Presumably the exception was caused by overflow. + // On overflow return the largest valid STAmount. + // Very large sums of STAmount are approximations + // anyway. + bal = STAmount( + bal.issue(), + STAmount::cMaxValue, + STAmount::cMaxOffset); + } + } } }); } diff --git a/src/test/rpc/GatewayBalances_test.cpp b/src/test/rpc/GatewayBalances_test.cpp index 6b5dcdb8a0f..c14ec0f043c 100644 --- a/src/test/rpc/GatewayBalances_test.cpp +++ b/src/test/rpc/GatewayBalances_test.cpp @@ -148,6 +148,60 @@ class GatewayBalances_test : public beast::unit_test::suite } } + void + testGWBOverflow() + { + using namespace std::chrono_literals; + using namespace jtx; + Env env(*this); + + // Gateway account and assets + Account const alice{"alice"}; + env.fund(XRP(10000), alice); + env.close(); + auto USD = alice["USD"]; + + // The largest valid STAmount of USD: + STAmount const maxUSD( + USD.issue(), STAmount::cMaxValue, STAmount::cMaxOffset); + + // Create a hotwallet + Account const hw{"hw"}; + env.fund(XRP(10000), hw); + env(trust(hw, maxUSD)); + env.close(); + env(pay(alice, hw, maxUSD)); + + // Create some clients + Account const bob{"bob"}; + env.fund(XRP(10000), bob); + env(trust(bob, maxUSD)); + env.close(); + env(pay(alice, bob, maxUSD)); + + Account const charley{"charley"}; + env.fund(XRP(10000), charley); + env(trust(charley, maxUSD)); + env.close(); + env(pay(alice, charley, maxUSD)); + + env.close(); + + auto wsc = makeWSClient(env.app().config()); + + Json::Value query; + query[jss::account] = alice.human(); + query[jss::hotwallet] = hw.human(); + + // Note that the sum of bob's and charley's USD balances exceeds + // the amount that can be represented in an STAmount. Nevertheless + // we get a valid "obligations" that shows the maximum valid + // STAmount. + auto jv = wsc->invoke("gateway_balances", query); + expect(jv[jss::status] == "success"); + expect(jv[jss::result][jss::obligations]["USD"] == maxUSD.getText()); + } + void run() override { @@ -155,6 +209,8 @@ class GatewayBalances_test : public beast::unit_test::suite auto const sa = supported_amendments(); testGWB(sa - featureFlowCross); testGWB(sa); + + testGWBOverflow(); } }; From 1c9df69b33e2cc3601cdf3a3796f60d5fda51c76 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 16 Mar 2023 10:32:22 -0700 Subject: [PATCH 125/508] fix(ValidatorSite): handle rare null pointer dereference in timeout: (#4420) In rare circumstances, both `onRequestTimeout` and the response handler (`onSiteFetch` or `onTextFetch`) can get queued and processed. In all observed cases, the response handler processes a network error. `onRequestTimeout` usually runs first, but on rare occasions, the response handler runs first, which leaves `activeResource` empty. --- src/ripple/app/misc/impl/ValidatorSite.cpp | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/ripple/app/misc/impl/ValidatorSite.cpp b/src/ripple/app/misc/impl/ValidatorSite.cpp index 2dfecb1f235..a73d348ee04 100644 --- a/src/ripple/app/misc/impl/ValidatorSite.cpp +++ b/src/ripple/app/misc/impl/ValidatorSite.cpp @@ -316,8 +316,19 @@ ValidatorSite::onRequestTimeout(std::size_t siteIdx, error_code const& ec) { std::lock_guard lock_site{sites_mutex_}; - JLOG(j_.warn()) << "Request for " << sites_[siteIdx].activeResource->uri - << " took too long"; + // In some circumstances, both this function and the response + // handler (onSiteFetch or onTextFetch) can get queued and + // processed. In all observed cases, the response handler + // processes a network error. Usually, this function runs first, + // but on extremely rare occasions, the response handler can run + // first, which will leave activeResource empty. + auto const& site = sites_[siteIdx]; + if (site.activeResource) + JLOG(j_.warn()) << "Request for " << site.activeResource->uri + << " took too long"; + else + JLOG(j_.error()) << "Request took too long, but a response has " + "already been processed"; } std::lock_guard lock_state{state_mutex_}; From 150d4a47e441f3941667cffdc269e6c22b76205e Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Thu, 16 Mar 2023 15:00:07 -0700 Subject: [PATCH 126/508] refactor: optimize NodeStore object conversion: (#4353) When writing objects to the NodeStore, we need to convert them from the in-memory format to the binary format used by the node store. The conversion is handled by the `EncodedBlob` class, which is only instantiated on the stack. Coupled with the fact that most objects are under 1024 bytes in size, this presents an opportunity to elide a memory allocation in a critical path. This commit also simplifies the interface of `EncodedBlob` and eliminates a subtle corner case that could result in dangling pointers. These changes are not expected to cause a significant reduction in memory usage. The change avoids the use of a `std::shared_ptr` when unnecessary and tries to use stack-based memory allocation instead of the heap whenever possible. This is a net gain both in terms of memory usage (lower fragmentation) and performance (less work to do at runtime). --- Builds/CMake/RippledCore.cmake | 1 - .../nodestore/backend/CassandraFactory.cpp | 8 +- src/ripple/nodestore/backend/NuDBFactory.cpp | 3 +- .../nodestore/backend/RocksDBFactory.cpp | 4 +- src/ripple/nodestore/impl/EncodedBlob.cpp | 42 ------- src/ripple/nodestore/impl/EncodedBlob.h | 103 +++++++++++++++--- src/test/nodestore/Basics_test.cpp | 3 +- 7 files changed, 92 insertions(+), 72 deletions(-) delete mode 100644 src/ripple/nodestore/impl/EncodedBlob.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index dca4720e381..c1d1e21d4d2 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -532,7 +532,6 @@ target_sources (rippled PRIVATE src/ripple/nodestore/impl/DeterministicShard.cpp src/ripple/nodestore/impl/DecodedBlob.cpp src/ripple/nodestore/impl/DummyScheduler.cpp - src/ripple/nodestore/impl/EncodedBlob.cpp src/ripple/nodestore/impl/ManagerImp.cpp src/ripple/nodestore/impl/NodeObject.cpp src/ripple/nodestore/impl/Shard.cpp diff --git a/src/ripple/nodestore/backend/CassandraFactory.cpp b/src/ripple/nodestore/backend/CassandraFactory.cpp index c8d0c139c44..d13cd71827b 100644 --- a/src/ripple/nodestore/backend/CassandraFactory.cpp +++ b/src/ripple/nodestore/backend/CassandraFactory.cpp @@ -670,7 +670,7 @@ class CassandraBackend : public Backend // confirmed persisted. Otherwise, it can become deleted // prematurely if other copies are removed from caches. std::shared_ptr no; - NodeStore::EncodedBlob e; + std::optional e; std::pair compressed; std::chrono::steady_clock::time_point begin; // The data is stored in this buffer. The void* in the above member @@ -686,10 +686,10 @@ class CassandraBackend : public Backend std::atomic& retries) : backend(f), no(nobj), totalWriteRetries(retries) { - e.prepare(no); + e.emplace(no); compressed = - NodeStore::nodeobject_compress(e.getData(), e.getSize(), bf); + NodeStore::nodeobject_compress(e->getData(), e->getSize(), bf); } }; @@ -722,7 +722,7 @@ class CassandraBackend : public Backend CassError rc = cass_statement_bind_bytes( statement, 0, - static_cast(data.e.getKey()), + static_cast(data.e->getKey()), keyBytes_); if (rc != CASS_OK) { diff --git a/src/ripple/nodestore/backend/NuDBFactory.cpp b/src/ripple/nodestore/backend/NuDBFactory.cpp index 2b20b574a2c..30b848e82af 100644 --- a/src/ripple/nodestore/backend/NuDBFactory.cpp +++ b/src/ripple/nodestore/backend/NuDBFactory.cpp @@ -250,8 +250,7 @@ class NuDBBackend : public Backend void do_insert(std::shared_ptr const& no) { - EncodedBlob e; - e.prepare(no); + EncodedBlob e(no); nudb::error_code ec; nudb::detail::buffer bf; auto const result = nodeobject_compress(e.getData(), e.getSize(), bf); diff --git a/src/ripple/nodestore/backend/RocksDBFactory.cpp b/src/ripple/nodestore/backend/RocksDBFactory.cpp index 1a9e529e103..b34560dba89 100644 --- a/src/ripple/nodestore/backend/RocksDBFactory.cpp +++ b/src/ripple/nodestore/backend/RocksDBFactory.cpp @@ -352,11 +352,9 @@ class RocksDBBackend : public Backend, public BatchWriter::Callback assert(m_db); rocksdb::WriteBatch wb; - EncodedBlob encoded; - for (auto const& e : batch) { - encoded.prepare(e); + EncodedBlob encoded(e); wb.Put( rocksdb::Slice( diff --git a/src/ripple/nodestore/impl/EncodedBlob.cpp b/src/ripple/nodestore/impl/EncodedBlob.cpp deleted file mode 100644 index 4ec15b10209..00000000000 --- a/src/ripple/nodestore/impl/EncodedBlob.cpp +++ /dev/null @@ -1,42 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include - -namespace ripple { -namespace NodeStore { - -void -EncodedBlob::prepare(std::shared_ptr const& object) -{ - m_key = object->getHash().begin(); - - auto ret = m_data.alloc(object->getData().size() + 9); - - // the first 8 bytes are unused - std::memset(ret, 0, 8); - - ret[8] = static_cast(object->getType()); - - std::memcpy(ret + 9, object->getData().data(), object->getData().size()); -} - -} // namespace NodeStore -} // namespace ripple diff --git a/src/ripple/nodestore/impl/EncodedBlob.h b/src/ripple/nodestore/impl/EncodedBlob.h index 2094b52d338..77f8fbf3c6c 100644 --- a/src/ripple/nodestore/impl/EncodedBlob.h +++ b/src/ripple/nodestore/impl/EncodedBlob.h @@ -22,42 +22,109 @@ #include #include -#include +#include +#include +#include +#include +#include namespace ripple { namespace NodeStore { -/** Utility for producing flattened node objects. - @note This defines the database format of a NodeObject! -*/ -// VFALCO TODO Make allocator aware and use short_alloc -struct EncodedBlob +/** Convert a NodeObject from in-memory to database format. + + The (suboptimal) database format consists of: + + - 8 prefix bytes which will typically be 0, but don't assume that's the + case; earlier versions of the code would use these bytes to store the + ledger index either once or twice. + - A single byte denoting the type of the object. + - The payload. + + @note This class is typically instantiated on the stack, so the size of + the object does not matter as much as it normally would since the + allocation is, effectively, free. + + We leverage that fact to preallocate enough memory to handle most + payloads as part of this object, eliminating the need for dynamic + allocation. As of this writing ~94% of objects require fewer than + 1024 payload bytes. + */ + +class EncodedBlob { + /** The 32-byte key of the serialized object. */ + std::array key_; + + /** A pre-allocated buffer for the serialized object. + + The buffer is large enough for the 9 byte prefix and at least + 1024 more bytes. The precise size is calculated automatically + at compile time so as to avoid wasting space on padding bytes. + */ + std::array< + std::uint8_t, + boost::alignment::align_up(9 + 1024, alignof(std::uint32_t))> + payload_; + + /** The size of the serialized data. */ + std::uint32_t size_; + + /** A pointer to the serialized data. + + This may point to the pre-allocated buffer (if it is sufficiently + large) or to a dynamically allocated buffer. + */ + std::uint8_t* const ptr_; + public: - void - prepare(std::shared_ptr const& object); + explicit EncodedBlob(std::shared_ptr const& obj) + : size_([&obj]() { + assert(obj); + + if (!obj) + throw std::runtime_error( + "EncodedBlob: unseated std::shared_ptr used."); + + return obj->getData().size() + 9; + }()) + , ptr_( + (size_ <= payload_.size()) ? payload_.data() + : new std::uint8_t[size_]) + { + std::fill_n(ptr_, 8, std::uint8_t{0}); + ptr_[8] = static_cast(obj->getType()); + std::copy_n(obj->getData().data(), obj->getData().size(), ptr_ + 9); + std::copy_n(obj->getHash().data(), obj->getHash().size(), key_.data()); + } - void const* + ~EncodedBlob() + { + assert( + ((ptr_ == payload_.data()) && (size_ <= payload_.size())) || + ((ptr_ != payload_.data()) && (size_ > payload_.size()))); + + if (ptr_ != payload_.data()) + delete[] ptr_; + } + + [[nodiscard]] void const* getKey() const noexcept { - return m_key; + return static_cast(key_.data()); } - std::size_t + [[nodiscard]] std::size_t getSize() const noexcept { - return m_data.size(); + return size_; } - void const* + [[nodiscard]] void const* getData() const noexcept { - return reinterpret_cast(m_data.data()); + return static_cast(ptr_); } - -private: - void const* m_key; - Buffer m_data; }; } // namespace NodeStore diff --git a/src/test/nodestore/Basics_test.cpp b/src/test/nodestore/Basics_test.cpp index e9911980b7f..92f2ae15aaf 100644 --- a/src/test/nodestore/Basics_test.cpp +++ b/src/test/nodestore/Basics_test.cpp @@ -56,10 +56,9 @@ class NodeStoreBasic_test : public TestBase auto batch = createPredictableBatch(numObjectsToTest, seedValue); - EncodedBlob encoded; for (int i = 0; i < batch.size(); ++i) { - encoded.prepare(batch[i]); + EncodedBlob encoded(batch[i]); DecodedBlob decoded( encoded.getKey(), encoded.getData(), encoded.getSize()); From 9b2d563decfad222a5a7e6d47dd05f935b95a07f Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Mon, 20 Mar 2023 10:22:15 -0700 Subject: [PATCH 127/508] fix: support RPC markers for any ledger object: (#4361) There were situations where `marker`s returned by `account_lines` did not work on subsequent requests, returning "Invalid Parameters". This was caused by the optimization implemented in "Enforce account RPC limits by account objects traversed": https://github.com/XRPLF/rippled/pull/4040/commits/e28989638d56d31cc9225da220a5bbe5bb1a046e?diff=unified&w=1 Previously, the ledger traversal would find up to `limit` account lines, and if there were more, the marker would be derived from the key of the next account line. After the change, ledger traversal would _consider_ up to `limit` account objects of any kind found in the account's directory structure. If there were more, the marker would be derived from the key of the next object, regardless of type. With this optimization, it is expected that `account_lines` may return fewer than `limit` account lines - even 0 - along with a marker indicating that there are may be more available. The problem is that this optimization did not update the `RPC::isOwnedByAccount` helper function to handle those other object types. Additionally, XLS-20 added `ltNFTOKEN_OFFER` ledger objects to objects that have been added to the account's directory structure, but did not update `RPC::isOwnedByAccount` to be able to handle those objects. The `marker` provided in the example for #4354 includes the key for an `ltNFTOKEN_OFFER`. When that `marker` is used on subsequent calls, it is not recognized as valid, and so the request fails. * Add unit test that walks all the object types and verifies that all of their indexes can work as a marker. * Fix #4340 * Fix #4354 --- src/ripple/rpc/handlers/AccountChannels.cpp | 2 +- src/ripple/rpc/handlers/AccountLines.cpp | 2 +- src/ripple/rpc/handlers/AccountOffers.cpp | 2 +- src/ripple/rpc/impl/RPCHelpers.cpp | 19 +- src/ripple/rpc/impl/RPCHelpers.h | 2 +- src/test/rpc/AccountLinesRPC_test.cpp | 238 +++++++++++++++++++- 6 files changed, 257 insertions(+), 8 deletions(-) diff --git a/src/ripple/rpc/handlers/AccountChannels.cpp b/src/ripple/rpc/handlers/AccountChannels.cpp index e5059d3ffc5..9e5c9ca2c46 100644 --- a/src/ripple/rpc/handlers/AccountChannels.cpp +++ b/src/ripple/rpc/handlers/AccountChannels.cpp @@ -151,7 +151,7 @@ doAccountChannels(RPC::JsonContext& context) if (!sle) return rpcError(rpcINVALID_PARAMS); - if (!RPC::isOwnedByAccount(*ledger, sle, accountID)) + if (!RPC::isRelatedToAccount(*ledger, sle, accountID)) return rpcError(rpcINVALID_PARAMS); } diff --git a/src/ripple/rpc/handlers/AccountLines.cpp b/src/ripple/rpc/handlers/AccountLines.cpp index 364d40673fa..adba2acaa72 100644 --- a/src/ripple/rpc/handlers/AccountLines.cpp +++ b/src/ripple/rpc/handlers/AccountLines.cpp @@ -177,7 +177,7 @@ doAccountLines(RPC::JsonContext& context) if (!sle) return rpcError(rpcINVALID_PARAMS); - if (!RPC::isOwnedByAccount(*ledger, sle, accountID)) + if (!RPC::isRelatedToAccount(*ledger, sle, accountID)) return rpcError(rpcINVALID_PARAMS); } diff --git a/src/ripple/rpc/handlers/AccountOffers.cpp b/src/ripple/rpc/handlers/AccountOffers.cpp index e957fe8a8e0..409d071fb02 100644 --- a/src/ripple/rpc/handlers/AccountOffers.cpp +++ b/src/ripple/rpc/handlers/AccountOffers.cpp @@ -128,7 +128,7 @@ doAccountOffers(RPC::JsonContext& context) if (!sle) return rpcError(rpcINVALID_PARAMS); - if (!RPC::isOwnedByAccount(*ledger, sle, accountID)) + if (!RPC::isRelatedToAccount(*ledger, sle, accountID)) return rpcError(rpcINVALID_PARAMS); } diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 3d1bfe6375b..ad84d7b12f5 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -109,7 +110,7 @@ getStartHint(std::shared_ptr const& sle, AccountID const& accountID) } bool -isOwnedByAccount( +isRelatedToAccount( ReadView const& ledger, std::shared_ptr const& sle, AccountID const& accountID) @@ -121,13 +122,27 @@ isOwnedByAccount( } else if (sle->isFieldPresent(sfAccount)) { - return sle->getAccountID(sfAccount) == accountID; + // If there's an sfAccount present, also test the sfDestination, if + // present. This will match objects such as Escrows (ltESCROW), Payment + // Channels (ltPAYCHAN), and Checks (ltCHECK) because those are added to + // the Destination account's directory. It intentionally EXCLUDES + // NFToken Offers (ltNFTOKEN_OFFER). NFToken Offers are NOT added to the + // Destination account's directory. + return sle->getAccountID(sfAccount) == accountID || + (sle->isFieldPresent(sfDestination) && + sle->getAccountID(sfDestination) == accountID); } else if (sle->getType() == ltSIGNER_LIST) { Keylet const accountSignerList = keylet::signers(accountID); return sle->key() == accountSignerList.key; } + else if (sle->getType() == ltNFTOKEN_OFFER) + { + // Do not check the sfDestination field. NFToken Offers are NOT added to + // the Destination account's directory. + return sle->getAccountID(sfOwner) == accountID; + } return false; } diff --git a/src/ripple/rpc/impl/RPCHelpers.h b/src/ripple/rpc/impl/RPCHelpers.h index 2aa62f3474a..12f27641ddf 100644 --- a/src/ripple/rpc/impl/RPCHelpers.h +++ b/src/ripple/rpc/impl/RPCHelpers.h @@ -86,7 +86,7 @@ getStartHint(std::shared_ptr const& sle, AccountID const& accountID); * @param account - The account being tested for SLE ownership. */ bool -isOwnedByAccount( +isRelatedToAccount( ReadView const& ledger, std::shared_ptr const& sle, AccountID const& accountID); diff --git a/src/test/rpc/AccountLinesRPC_test.cpp b/src/test/rpc/AccountLinesRPC_test.cpp index bdd376b3aae..cdc61922097 100644 --- a/src/test/rpc/AccountLinesRPC_test.cpp +++ b/src/test/rpc/AccountLinesRPC_test.cpp @@ -33,7 +33,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite void testAccountLines() { - testcase("acccount_lines"); + testcase("account_lines"); using namespace test::jtx; Env env(*this); @@ -524,11 +524,244 @@ class AccountLinesRPC_test : public beast::unit_test::suite RPC::make_error(rpcINVALID_PARAMS)[jss::error_message]); } + void + testAccountLinesWalkMarkers() + { + testcase("Marker can point to any appropriate ledger entry type"); + using namespace test::jtx; + using namespace std::chrono_literals; + Env env(*this); + + // The goal of this test is observe account_lines RPC calls return an + // error message when the SLE pointed to by the marker is not owned by + // the Account being traversed. + // + // To start, we'll create an environment with some trust lines, offers + // and a signers list. + Account const alice{"alice"}; + Account const becky{"becky"}; + Account const gw1{"gw1"}; + env.fund(XRP(10000), alice, becky, gw1); + env.close(); + + // A couple of helper lambdas + auto escrow = [&env]( + Account const& account, + Account const& to, + STAmount const& amount) { + Json::Value jv; + jv[jss::TransactionType] = jss::EscrowCreate; + jv[jss::Flags] = tfUniversal; + jv[jss::Account] = account.human(); + jv[jss::Destination] = to.human(); + jv[jss::Amount] = amount.getJson(JsonOptions::none); + NetClock::time_point finish = env.now() + 1s; + jv[sfFinishAfter.jsonName] = finish.time_since_epoch().count(); + return jv; + }; + + auto payChan = [](Account const& account, + Account const& to, + STAmount const& amount, + NetClock::duration const& settleDelay, + PublicKey const& pk) { + Json::Value jv; + jv[jss::TransactionType] = jss::PaymentChannelCreate; + jv[jss::Flags] = tfUniversal; + jv[jss::Account] = account.human(); + jv[jss::Destination] = to.human(); + jv[jss::Amount] = amount.getJson(JsonOptions::none); + jv["SettleDelay"] = settleDelay.count(); + jv["PublicKey"] = strHex(pk.slice()); + return jv; + }; + + // Test all available object types. Not all of these objects will be + // included in the search, nor found by `account_objects`. If that ever + // changes for any reason, this test will help catch that. + // + // SignerList, for alice + Account const bogie{"bogie"}; + env(signers(alice, 2, {{bogie, 3}})); + env.close(); + + // SignerList, includes alice + env(signers(becky, 2, {{alice, 3}})); + env.close(); + + // Trust lines + auto const EUR = gw1["EUR"]; + env(trust(alice, EUR(200))); + env(trust(becky, EUR(200))); + env.close(); + + // Escrow, in each direction + env(escrow(alice, becky, XRP(1000))); + env(escrow(becky, alice, XRP(1000))); + + // Pay channels, in each direction + env(payChan(alice, becky, XRP(1000), 100s, alice.pk())); + env(payChan(becky, alice, XRP(1000), 100s, becky.pk())); + + // Mint NFTs, for each account + uint256 const aliceNFtokenID = + token::getNextID(env, alice, 0, tfTransferable); + env(token::mint(alice, 0), txflags(tfTransferable)); + + uint256 const beckyNFtokenID = + token::getNextID(env, becky, 0, tfTransferable); + env(token::mint(becky, 0), txflags(tfTransferable)); + + // NFT Offers, for each other's NFTs + env(token::createOffer(alice, beckyNFtokenID, drops(1)), + token::owner(becky)); + env(token::createOffer(becky, aliceNFtokenID, drops(1)), + token::owner(alice)); + + env(token::createOffer(becky, beckyNFtokenID, drops(1)), + txflags(tfSellNFToken), + token::destination(alice)); + env(token::createOffer(alice, aliceNFtokenID, drops(1)), + txflags(tfSellNFToken), + token::destination(becky)); + + env(token::createOffer(gw1, beckyNFtokenID, drops(1)), + token::owner(becky), + token::destination(alice)); + env(token::createOffer(gw1, aliceNFtokenID, drops(1)), + token::owner(alice), + token::destination(becky)); + + env(token::createOffer(becky, beckyNFtokenID, drops(1)), + txflags(tfSellNFToken)); + env(token::createOffer(alice, aliceNFtokenID, drops(1)), + txflags(tfSellNFToken)); + + // Checks, in each direction + env(check::create(alice, becky, XRP(50))); + env(check::create(becky, alice, XRP(50))); + + // Deposit preauth, in each direction + env(deposit::auth(alice, becky)); + env(deposit::auth(becky, alice)); + + // Offers, one where alice is the owner, and one where alice is the + // issuer + auto const USDalice = alice["USD"]; + env(offer(alice, EUR(10), XRP(100))); + env(offer(becky, USDalice(10), XRP(100))); + + // Tickets + env(ticket::create(alice, 2)); + + // Add another trustline for good measure + auto const BTCbecky = becky["BTC"]; + env(trust(alice, BTCbecky(200))); + + env.close(); + + { + // Now make repeated calls to `account_lines` with a limit of 1. + // That should iterate all of alice's relevant objects, even though + // the list will be empty for most calls. + auto getNextLine = [](Env& env, + Account const& alice, + std::optional const marker) { + Json::Value params(Json::objectValue); + params[jss::account] = alice.human(); + params[jss::limit] = 1; + if (marker) + params[jss::marker] = *marker; + + return env.rpc("json", "account_lines", to_string(params)); + }; + + auto aliceLines = getNextLine(env, alice, std::nullopt); + constexpr std::size_t expectedIterations = 16; + constexpr std::size_t expectedLines = 2; + std::size_t foundLines = 0; + + auto hasMarker = [](auto const& aliceLines) { + return aliceLines[jss::result].isMember(jss::marker); + }; + auto marker = [](auto const& aliceLines) { + return aliceLines[jss::result][jss::marker].asString(); + }; + auto checkLines = [](auto const& aliceLines) { + return aliceLines.isMember(jss::result) && + !aliceLines[jss::result].isMember(jss::error_message) && + aliceLines[jss::result].isMember(jss::lines) && + aliceLines[jss::result][jss::lines].isArray() && + aliceLines[jss::result][jss::lines].size() <= 1; + }; + + BEAST_EXPECT(hasMarker(aliceLines)); + BEAST_EXPECT(checkLines(aliceLines)); + BEAST_EXPECT(aliceLines[jss::result][jss::lines].size() == 0); + + int iterations = 1; + + while (hasMarker(aliceLines)) + { + // Iterate through the markers + aliceLines = getNextLine(env, alice, marker(aliceLines)); + BEAST_EXPECT(checkLines(aliceLines)); + foundLines += aliceLines[jss::result][jss::lines].size(); + ++iterations; + } + BEAST_EXPECT(expectedLines == foundLines); + + Json::Value const aliceObjects = env.rpc( + "json", + "account_objects", + R"({"account": ")" + alice.human() + + R"(", )" + R"("limit": 200})"); + BEAST_EXPECT(aliceObjects.isMember(jss::result)); + BEAST_EXPECT( + !aliceObjects[jss::result].isMember(jss::error_message)); + BEAST_EXPECT( + aliceObjects[jss::result].isMember(jss::account_objects)); + BEAST_EXPECT( + aliceObjects[jss::result][jss::account_objects].isArray()); + // account_objects does not currently return NFTPages. If + // that ever changes, without also changing account_lines, + // this test will need to be updated. + BEAST_EXPECT( + aliceObjects[jss::result][jss::account_objects].size() == + iterations); + // If ledger object association ever changes, for whatever + // reason, this test will need to be updated. + BEAST_EXPECTS( + iterations == expectedIterations, std::to_string(iterations)); + + // Get becky's objects just to confirm that they're symmetrical + Json::Value const beckyObjects = env.rpc( + "json", + "account_objects", + R"({"account": ")" + becky.human() + + R"(", )" + R"("limit": 200})"); + BEAST_EXPECT(beckyObjects.isMember(jss::result)); + BEAST_EXPECT( + !beckyObjects[jss::result].isMember(jss::error_message)); + BEAST_EXPECT( + beckyObjects[jss::result].isMember(jss::account_objects)); + BEAST_EXPECT( + beckyObjects[jss::result][jss::account_objects].isArray()); + // becky should have the same number of objects as alice, except the + // 2 tickets that only alice created. + BEAST_EXPECT( + beckyObjects[jss::result][jss::account_objects].size() == + aliceObjects[jss::result][jss::account_objects].size() - 2); + } + } + // test API V2 void testAccountLines2() { - testcase("V2: acccount_lines"); + testcase("V2: account_lines"); using namespace test::jtx; Env env(*this); @@ -1234,6 +1467,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite testAccountLines(); testAccountLinesMarker(); testAccountLineDelete(); + testAccountLinesWalkMarkers(); testAccountLines2(); testAccountLineDelete2(); } From 305c9a8d61b919f8be18ff5345ccd7b050c64ddd Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Mon, 20 Mar 2023 17:47:46 -0400 Subject: [PATCH 128/508] `fixNFTokenRemint`: prevent NFT re-mint: (#4406) Without the protocol amendment introduced by this commit, an NFT ID can be reminted in this manner: 1. Alice creates an account and mints an NFT. 2. Alice burns the NFT with an `NFTokenBurn` transaction. 3. Alice deletes her account with an `AccountDelete` transaction. 4. Alice re-creates her account. 5. Alice mints an NFT with an `NFTokenMint` transaction with params: `NFTokenTaxon` = 0, `Flags` = 9). This will mint a NFT with the same `NFTokenID` as the one minted in step 1. The params that construct the NFT ID will cause a collision in `NFTokenID` if their values are equal before and after the remint. With the `fixNFTokenRemint` amendment, there is a new sequence number construct which avoids this scenario: - A new `AccountRoot` field, `FirstNFTSequence`, stays constant over time. - This field is set to the current account sequence when the account issues their first NFT. - Otherwise, it is not set. - The sequence of a newly-minted NFT is computed by: `FirstNFTSequence + MintedNFTokens`. - `MintedNFTokens` is then incremented by 1 for each mint. Furthermore, there is a new account deletion restriction: - An account can only be deleted if `FirstNFTSequence + MintedNFTokens + 256` is less than the current ledger sequence. - 256 was chosen because it already exists in the current account deletion constraint. Without this restriction, an NFT may still be remintable. Example scenario: 1. Alice's account sequence is at 1. 2. Bob is Alice's authorized minter. 3. Bob mints 500 NFTs for Alice. The NFTs will have sequences 1-501, as NFT sequence is computed by `FirstNFTokenSequence + MintedNFTokens`). 4. Alice deletes her account at ledger 257 (as required by the existing `AccountDelete` amendment). 5. Alice re-creates her account at ledger 258. 6. Alice mints an NFT. `FirstNFTokenSequence` initializes to her account sequence (258), and `MintedNFTokens` initializes as 0. This newly-minted NFT would have a sequence number of 258, which is a duplicate of what she issued through authorized minting before she deleted her account. --------- Signed-off-by: Shawn Xie --- src/ripple/app/tx/impl/DeleteAccount.cpp | 17 + src/ripple/app/tx/impl/NFTokenMint.cpp | 63 ++- src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/SField.h | 1 + src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/LedgerFormats.cpp | 1 + src/ripple/protocol/impl/SField.cpp | 3 + src/test/app/NFTokenBurn_test.cpp | 18 +- src/test/app/NFTokenDir_test.cpp | 35 +- src/test/app/NFToken_test.cpp | 584 ++++++++++++++++++++- src/test/jtx/impl/token.cpp | 11 +- src/test/jtx/token.h | 1 + 12 files changed, 695 insertions(+), 43 deletions(-) diff --git a/src/ripple/app/tx/impl/DeleteAccount.cpp b/src/ripple/app/tx/impl/DeleteAccount.cpp index 3d9d83c0d35..62cc9e1fbbf 100644 --- a/src/ripple/app/tx/impl/DeleteAccount.cpp +++ b/src/ripple/app/tx/impl/DeleteAccount.cpp @@ -214,6 +214,23 @@ DeleteAccount::preclaim(PreclaimContext const& ctx) if ((*sleAccount)[sfSequence] + seqDelta > ctx.view.seq()) return tecTOO_SOON; + // When fixNFTokenRemint is enabled, we don't allow an account to be + // deleted if is within 256 of the + // current ledger. This is to prevent having duplicate NFTokenIDs after + // account re-creation. + // + // Without this restriction, duplicate NFTokenIDs can be reproduced when + // authorized minting is involved. Because when the minter mints a NFToken, + // the issuer's sequence does not change. So when the issuer re-creates + // their account and mints a NFToken, it is possible that the + // NFTokenSequence of this NFToken is the same as the one that the + // authorized minter minted in a previous ledger. + if (ctx.view.rules().enabled(fixNFTokenRemint) && + ((*sleAccount)[~sfFirstNFTokenSequence].value_or(0) + + (*sleAccount)[~sfMintedNFTokens].value_or(0) + seqDelta > + ctx.view.seq())) + return tecTOO_SOON; + // Verify that the account does not own any objects that would prevent // the account from being deleted. Keylet const ownerDirKeylet{keylet::ownerDir(account)}; diff --git a/src/ripple/app/tx/impl/NFTokenMint.cpp b/src/ripple/app/tx/impl/NFTokenMint.cpp index f4d3eb85676..c26fb1fb12a 100644 --- a/src/ripple/app/tx/impl/NFTokenMint.cpp +++ b/src/ripple/app/tx/impl/NFTokenMint.cpp @@ -160,15 +160,66 @@ NFTokenMint::doApply() // Should not happen. Checked in preclaim. return Unexpected(tecNO_ISSUER); - // Get the unique sequence number for this token: - std::uint32_t const tokenSeq = (*root)[~sfMintedNFTokens].value_or(0); + if (!ctx_.view().rules().enabled(fixNFTokenRemint)) { - std::uint32_t const nextTokenSeq = tokenSeq + 1; - if (nextTokenSeq < tokenSeq) - return Unexpected(tecMAX_SEQUENCE_REACHED); + // Get the unique sequence number for this token: + std::uint32_t const tokenSeq = + (*root)[~sfMintedNFTokens].value_or(0); + { + std::uint32_t const nextTokenSeq = tokenSeq + 1; + if (nextTokenSeq < tokenSeq) + return Unexpected(tecMAX_SEQUENCE_REACHED); + + (*root)[sfMintedNFTokens] = nextTokenSeq; + } + ctx_.view().update(root); + return tokenSeq; + } + + // With fixNFTokenRemint amendment enabled: + // + // If the issuer hasn't minted an NFToken before we must add a + // FirstNFTokenSequence field to the issuer's AccountRoot. The + // value of the FirstNFTokenSequence must equal the issuer's + // current account sequence. + // + // There are three situations: + // o If the first token is being minted by the issuer and + // * If the transaction consumes a Sequence number, then the + // Sequence has been pre-incremented by the time we get here in + // doApply. We must decrement the value in the Sequence field. + // * Otherwise the transaction uses a Ticket so the Sequence has + // not been pre-incremented. We use the Sequence value as is. + // o The first token is being minted by an authorized minter. In + // this case the issuer's Sequence field has been left untouched. + // We use the issuer's Sequence value as is. + if (!root->isFieldPresent(sfFirstNFTokenSequence)) + { + std::uint32_t const acctSeq = root->at(sfSequence); - (*root)[sfMintedNFTokens] = nextTokenSeq; + root->at(sfFirstNFTokenSequence) = + ctx_.tx.isFieldPresent(sfIssuer) || + ctx_.tx.getSeqProxy().isTicket() + ? acctSeq + : acctSeq - 1; } + + std::uint32_t const mintedNftCnt = + (*root)[~sfMintedNFTokens].value_or(0u); + + (*root)[sfMintedNFTokens] = mintedNftCnt + 1u; + if ((*root)[sfMintedNFTokens] == 0u) + return Unexpected(tecMAX_SEQUENCE_REACHED); + + // Get the unique sequence number of this token by + // sfFirstNFTokenSequence + sfMintedNFTokens + std::uint32_t const offset = (*root)[sfFirstNFTokenSequence]; + std::uint32_t const tokenSeq = offset + mintedNftCnt; + + // Check for more overflow cases + if (tokenSeq + 1u == 0u || tokenSeq < offset) + return Unexpected(tecMAX_SEQUENCE_REACHED); + ctx_.view().update(root); return tokenSeq; }(); diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index d53d992d242..62dc327d98d 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 57; +static constexpr std::size_t numFeatures = 58; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -344,6 +344,7 @@ extern uint256 const featureDisallowIncoming; extern uint256 const featureXRPFees; extern uint256 const fixUniversalNumber; extern uint256 const fixNonFungibleTokensV1_2; +extern uint256 const fixNFTokenRemint; } // namespace ripple diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index 694eeef5cbb..5821a43d1c0 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -400,6 +400,7 @@ extern SF_UINT32 const sfMintedNFTokens; extern SF_UINT32 const sfBurnedNFTokens; extern SF_UINT32 const sfHookStateCount; extern SF_UINT32 const sfEmitGeneration; +extern SF_UINT32 const sfFirstNFTokenSequence; // 64-bit integers (common) extern SF_UINT64 const sfIndexNext; diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 4fb79e4cc48..d15c3fc60ff 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -454,6 +454,7 @@ REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no) REGISTER_FEATURE(XRPFees, Supported::yes, DefaultVote::no); REGISTER_FIX (fixUniversalNumber, Supported::yes, DefaultVote::no); REGISTER_FIX (fixNonFungibleTokensV1_2, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixNFTokenRemint, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/ripple/protocol/impl/LedgerFormats.cpp b/src/ripple/protocol/impl/LedgerFormats.cpp index a540a5d80c0..c60c8c59dbb 100644 --- a/src/ripple/protocol/impl/LedgerFormats.cpp +++ b/src/ripple/protocol/impl/LedgerFormats.cpp @@ -55,6 +55,7 @@ LedgerFormats::LedgerFormats() {sfNFTokenMinter, soeOPTIONAL}, {sfMintedNFTokens, soeDEFAULT}, {sfBurnedNFTokens, soeDEFAULT}, + {sfFirstNFTokenSequence, soeOPTIONAL}, }, commonFields); diff --git a/src/ripple/protocol/impl/SField.cpp b/src/ripple/protocol/impl/SField.cpp index f458c2dfe54..14c2bd5c3de 100644 --- a/src/ripple/protocol/impl/SField.cpp +++ b/src/ripple/protocol/impl/SField.cpp @@ -150,6 +150,9 @@ CONSTRUCT_TYPED_SFIELD(sfMintedNFTokens, "MintedNFTokens", UINT32, CONSTRUCT_TYPED_SFIELD(sfBurnedNFTokens, "BurnedNFTokens", UINT32, 44); CONSTRUCT_TYPED_SFIELD(sfHookStateCount, "HookStateCount", UINT32, 45); CONSTRUCT_TYPED_SFIELD(sfEmitGeneration, "EmitGeneration", UINT32, 46); +// Three field values of 47, 48 and 49 are reserved for +// LockCount(Hooks), VoteWeight(AMM), DiscountedFee(AMM) +CONSTRUCT_TYPED_SFIELD(sfFirstNFTokenSequence, "FirstNFTokenSequence", UINT32, 50); // 64-bit integers (common) CONSTRUCT_TYPED_SFIELD(sfIndexNext, "IndexNext", UINT64, 1); diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index 096fd5ce1e8..75c32385acf 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -380,8 +380,16 @@ class NFTokenBurn_test : public beast::unit_test::suite auto internalTaxon = [&env]( Account const& acct, std::uint32_t taxon) -> std::uint32_t { - std::uint32_t const tokenSeq = { - env.le(acct)->at(~sfMintedNFTokens).value_or(0)}; + std::uint32_t tokenSeq = + env.le(acct)->at(~sfMintedNFTokens).value_or(0); + + // If fixNFTokenRemint amendment is on, we must + // add FirstNFTokenSequence. + if (env.current()->rules().enabled(fixNFTokenRemint)) + tokenSeq += env.le(acct) + ->at(~sfFirstNFTokenSequence) + .value_or(env.seq(acct)); + return toUInt32( nft::cipheredTaxon(tokenSeq, nft::toTaxon(taxon))); }; @@ -786,8 +794,10 @@ class NFTokenBurn_test : public beast::unit_test::suite FeatureBitset const all{supported_amendments()}; FeatureBitset const fixNFTDir{fixNFTokenDirV1}; - testWithFeats(all - fixNonFungibleTokensV1_2 - fixNFTDir); - testWithFeats(all - fixNonFungibleTokensV1_2); + testWithFeats( + all - fixNonFungibleTokensV1_2 - fixNFTDir - fixNFTokenRemint); + testWithFeats(all - fixNonFungibleTokensV1_2 - fixNFTokenRemint); + testWithFeats(all - fixNFTokenRemint); testWithFeats(all); } }; diff --git a/src/test/app/NFTokenDir_test.cpp b/src/test/app/NFTokenDir_test.cpp index d50bd1584d6..e9addfa83f7 100644 --- a/src/test/app/NFTokenDir_test.cpp +++ b/src/test/app/NFTokenDir_test.cpp @@ -190,8 +190,14 @@ class NFTokenDir_test : public beast::unit_test::suite Account const& account = accounts.emplace_back( Account::base58Seed, std::string(seed)); env.fund(XRP(10000), account); - env.close(); + + // Do not close the ledger inside the loop. If + // fixNFTokenRemint is enabled and accounts are initialized + // at different ledgers, they will have different account + // sequences. That would cause the accounts to have + // different NFTokenID sequence numbers. } + env.close(); // All of the accounts create one NFT and and offer that NFT to // buyer. @@ -408,8 +414,14 @@ class NFTokenDir_test : public beast::unit_test::suite Account const& account = accounts.emplace_back( Account::base58Seed, std::string(seed)); env.fund(XRP(10000), account); - env.close(); + + // Do not close the ledger inside the loop. If + // fixNFTokenRemint is enabled and accounts are initialized + // at different ledgers, they will have different account + // sequences. That would cause the accounts to have + // different NFTokenID sequence numbers. } + env.close(); // All of the accounts create one NFT and and offer that NFT to // buyer. @@ -652,8 +664,14 @@ class NFTokenDir_test : public beast::unit_test::suite Account const& account = accounts.emplace_back(Account::base58Seed, std::string(seed)); env.fund(XRP(10000), account); - env.close(); + + // Do not close the ledger inside the loop. If + // fixNFTokenRemint is enabled and accounts are initialized + // at different ledgers, they will have different account + // sequences. That would cause the accounts to have + // different NFTokenID sequence numbers. } + env.close(); // All of the accounts create one NFT and and offer that NFT to buyer. std::vector nftIDs; @@ -827,8 +845,14 @@ class NFTokenDir_test : public beast::unit_test::suite Account const& account = accounts.emplace_back(Account::base58Seed, std::string(seed)); env.fund(XRP(10000), account); - env.close(); + + // Do not close the ledger inside the loop. If + // fixNFTokenRemint is enabled and accounts are initialized + // at different ledgers, they will have different account + // sequences. That would cause the accounts to have + // different NFTokenID sequence numbers. } + env.close(); // All of the accounts create seven consecutive NFTs and and offer // those NFTs to buyer. @@ -1078,7 +1102,8 @@ class NFTokenDir_test : public beast::unit_test::suite FeatureBitset const fixNFTDir{ fixNFTokenDirV1, featureNonFungibleTokensV1_1}; - testWithFeats(all - fixNFTDir); + testWithFeats(all - fixNFTDir - fixNFTokenRemint); + testWithFeats(all - fixNFTokenRemint); testWithFeats(all); } }; diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 150622c731c..81abee34b07 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -215,8 +215,8 @@ class NFToken_test : public beast::unit_test::suite Account const minter{"minter"}; // Fund alice and minter enough to exist, but not enough to meet - // the reserve for creating their first NFT. Account reserve for unit - // tests is 200 XRP, not 20. + // the reserve for creating their first NFT. Account reserve for + // unit tests is 200 XRP, not 20. env.fund(XRP(200), alice, minter); env.close(); BEAST_EXPECT(env.balance(alice) == XRP(200)); @@ -224,7 +224,8 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, alice) == 0); BEAST_EXPECT(ownerCount(env, minter) == 0); - // alice does not have enough XRP to cover the reserve for an NFT page. + // alice does not have enough XRP to cover the reserve for an NFT + // page. env(token::mint(alice, 0u), ter(tecINSUFFICIENT_RESERVE)); env.close(); BEAST_EXPECT(ownerCount(env, alice) == 0); @@ -260,7 +261,8 @@ class NFToken_test : public beast::unit_test::suite oneCheck("burned", burnedCount(env, alice), burned); }; - // alice still does not have enough XRP for the reserve of an NFT page. + // alice still does not have enough XRP for the reserve of an NFT + // page. env(token::mint(alice, 0u), ter(tecINSUFFICIENT_RESERVE)); env.close(); checkAliceOwnerMintedBurned(0, 0, 0, __LINE__); @@ -292,7 +294,8 @@ class NFToken_test : public beast::unit_test::suite env(pay(env.master, alice, XRP(50) + drops(329))); env.close(); - // alice still does not have enough XRP for the reserve of an NFT page. + // alice still does not have enough XRP for the reserve of an NFT + // page. env(token::mint(alice), ter(tecINSUFFICIENT_RESERVE)); env.close(); checkAliceOwnerMintedBurned(1, 32, 0, __LINE__); @@ -311,18 +314,20 @@ class NFToken_test : public beast::unit_test::suite while (seq < 33) { - env(token::burn(alice, token::getID(alice, 0, seq++))); + env(token::burn(alice, token::getID(env, alice, 0, seq++))); env.close(); checkAliceOwnerMintedBurned((33 - seq) ? 1 : 0, 33, seq, __LINE__); } // alice burns a non-existent NFT. - env(token::burn(alice, token::getID(alice, 197, 5)), ter(tecNO_ENTRY)); + env(token::burn(alice, token::getID(env, alice, 197, 5)), + ter(tecNO_ENTRY)); env.close(); checkAliceOwnerMintedBurned(0, 33, 33, __LINE__); - // That was fun! Now let's see what happens when we let someone else - // mint NFTs on alice's behalf. alice gives permission to minter. + // That was fun! Now let's see what happens when we let someone + // else mint NFTs on alice's behalf. alice gives permission to + // minter. env(token::setMinter(alice, minter)); env.close(); BEAST_EXPECT( @@ -373,9 +378,9 @@ class NFToken_test : public beast::unit_test::suite env.close(); checkMintersOwnerMintedBurned(0, 33, nftSeq, 0, 0, 0, __LINE__); - // minter still does not have enough XRP for the reserve of an NFT page. - // Just for grins (and code coverage), minter mints NFTs that include - // a URI. + // minter still does not have enough XRP for the reserve of an NFT + // page. Just for grins (and code coverage), minter mints NFTs that + // include a URI. env(token::mint(minter), token::issuer(alice), token::uri("uri"), @@ -400,7 +405,8 @@ class NFToken_test : public beast::unit_test::suite checkMintersOwnerMintedBurned(0, i + 34, nftSeq, 1, 0, 0, __LINE__); } - // Pay minter almost enough for the reserve of an additional NFT page. + // Pay minter almost enough for the reserve of an additional NFT + // page. env(pay(env.master, minter, XRP(50) + drops(319))); env.close(); @@ -425,19 +431,20 @@ class NFToken_test : public beast::unit_test::suite // minter burns the NFTs she created. while (nftSeq < 65) { - env(token::burn(minter, token::getID(alice, 0, nftSeq++))); + env(token::burn(minter, token::getID(env, alice, 0, nftSeq++))); env.close(); checkMintersOwnerMintedBurned( 0, 66, nftSeq, (65 - seq) ? 1 : 0, 0, 0, __LINE__); } - // minter has one more NFT to burn. Should take her owner count to 0. - env(token::burn(minter, token::getID(alice, 0, nftSeq++))); + // minter has one more NFT to burn. Should take her owner count to + // 0. + env(token::burn(minter, token::getID(env, alice, 0, nftSeq++))); env.close(); checkMintersOwnerMintedBurned(0, 66, nftSeq, 0, 0, 0, __LINE__); // minter burns a non-existent NFT. - env(token::burn(minter, token::getID(alice, 2009, 3)), + env(token::burn(minter, token::getID(env, alice, 2009, 3)), ter(tecNO_ENTRY)); env.close(); checkMintersOwnerMintedBurned(0, 66, nftSeq, 0, 0, 0, __LINE__); @@ -475,7 +482,7 @@ class NFToken_test : public beast::unit_test::suite // checks with this modify() call. If you call close() between // here and the end of the test all the effort will be lost. env.app().openLedger().modify( - [&alice](OpenView& view, beast::Journal j) { + [&alice, &env](OpenView& view, beast::Journal j) { // Get the account root we want to hijack. auto const sle = view.read(keylet::account(alice.id())); if (!sle) @@ -487,8 +494,23 @@ class NFToken_test : public beast::unit_test::suite if (replacement->getFieldU32(sfMintedNFTokens) != 1) return false; // Unexpected test conditions. - // Now replace sfMintedNFTokens with the largest valid value. - (*replacement)[sfMintedNFTokens] = 0xFFFF'FFFE; + if (env.current()->rules().enabled(fixNFTokenRemint)) + { + // If fixNFTokenRemint is enabled, sequence number is + // generated by sfFirstNFTokenSequence + sfMintedNFTokens. + // We can replace the two fields with any numbers as long as + // they add up to the largest valid number. In our case, + // sfFirstNFTokenSequence is set to the largest valid + // number, and sfMintedNFTokens is set to zero. + (*replacement)[sfFirstNFTokenSequence] = 0xFFFF'FFFE; + (*replacement)[sfMintedNFTokens] = 0x0000'0000; + } + else + { + // Now replace sfMintedNFTokens with the largest valid + // value. + (*replacement)[sfMintedNFTokens] = 0xFFFF'FFFE; + } view.rawReplace(replacement); return true; }); @@ -623,7 +645,8 @@ class NFToken_test : public beast::unit_test::suite // preclaim // Try to burn a token that doesn't exist. - env(token::burn(alice, token::getID(alice, 0, 1)), ter(tecNO_ENTRY)); + env(token::burn(alice, token::getID(env, alice, 0, 1)), + ter(tecNO_ENTRY)); env.close(); BEAST_EXPECT(ownerCount(env, buyer) == 0); @@ -769,14 +792,16 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(ownerCount(env, buyer) == 0); // The nftID must be present in the ledger. - env(token::createOffer(buyer, token::getID(alice, 0, 1), XRP(1000)), + env(token::createOffer( + buyer, token::getID(env, alice, 0, 1), XRP(1000)), token::owner(alice), ter(tecNO_ENTRY)); env.close(); BEAST_EXPECT(ownerCount(env, buyer) == 0); // The nftID must be present in the ledger of a sell offer too. - env(token::createOffer(alice, token::getID(alice, 0, 1), XRP(1000)), + env(token::createOffer( + alice, token::getID(env, alice, 0, 1), XRP(1000)), txflags(tfSellNFToken), ter(tecNO_ENTRY)); env.close(); @@ -2554,6 +2579,7 @@ class NFToken_test : public beast::unit_test::suite }; uint256 const nftAliceID = token::getID( + env, alice, taxon, rand_int(), @@ -2562,6 +2588,7 @@ class NFToken_test : public beast::unit_test::suite check(taxon, nftAliceID); uint256 const nftBeckyID = token::getID( + env, becky, taxon, rand_int(), @@ -6038,6 +6065,506 @@ class NFToken_test : public beast::unit_test::suite } } + void + testFixNFTokenRemint(FeatureBitset features) + { + using namespace test::jtx; + + testcase("fixNFTokenRemint"); + + // Returns the current ledger sequence + auto openLedgerSeq = [](Env& env) { return env.current()->seq(); }; + + // Close the ledger until the ledger sequence is large enough to delete + // the account (no longer within ) + // This is enforced by the featureDeletableAccounts amendment + auto incLgrSeqForAcctDel = [&](Env& env, Account const& acct) { + int const delta = [&]() -> int { + if (env.seq(acct) + 255 > openLedgerSeq(env)) + return env.seq(acct) - openLedgerSeq(env) + 255; + return 0; + }(); + BEAST_EXPECT(delta >= 0); + for (int i = 0; i < delta; ++i) + env.close(); + BEAST_EXPECT(openLedgerSeq(env) == env.seq(acct) + 255); + }; + + // Close the ledger until the ledger sequence is no longer + // within . + // This is enforced by the fixNFTokenRemint amendment. + auto incLgrSeqForFixNftRemint = [&](Env& env, Account const& acct) { + int delta = 0; + auto const deletableLgrSeq = + (*env.le(acct))[~sfFirstNFTokenSequence].value_or(0) + + (*env.le(acct))[sfMintedNFTokens] + 255; + + if (deletableLgrSeq > openLedgerSeq(env)) + delta = deletableLgrSeq - openLedgerSeq(env); + + BEAST_EXPECT(delta >= 0); + for (int i = 0; i < delta; ++i) + env.close(); + BEAST_EXPECT(openLedgerSeq(env) == deletableLgrSeq); + }; + + // We check if NFTokenIDs can be duplicated by + // re-creation of an account + { + Env env{*this, features}; + Account const alice("alice"); + Account const becky("becky"); + + env.fund(XRP(10000), alice, becky); + env.close(); + + // alice mint and burn a NFT + uint256 const prevNFTokenID = token::getNextID(env, alice, 0u); + env(token::mint(alice)); + env.close(); + env(token::burn(alice, prevNFTokenID)); + env.close(); + + // alice has minted 1 NFToken + BEAST_EXPECT((*env.le(alice))[sfMintedNFTokens] == 1); + + // Close enough ledgers to delete alice's account + incLgrSeqForAcctDel(env, alice); + + // alice's account is deleted + Keylet const aliceAcctKey{keylet::account(alice.id())}; + auto const acctDelFee{drops(env.current()->fees().increment)}; + env(acctdelete(alice, becky), fee(acctDelFee)); + env.close(); + + // alice's account root is gone from the most recently + // closed ledger and the current ledger. + BEAST_EXPECT(!env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(!env.current()->exists(aliceAcctKey)); + + // Fund alice to re-create her account + env.fund(XRP(10000), alice); + env.close(); + + // alice's account now exists and has minted 0 NFTokens + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + BEAST_EXPECT((*env.le(alice))[sfMintedNFTokens] == 0); + + // alice mints a NFT with same params as prevNFTokenID + uint256 const remintNFTokenID = token::getNextID(env, alice, 0u); + env(token::mint(alice)); + env.close(); + + // burn the NFT to make sure alice owns remintNFTokenID + env(token::burn(alice, remintNFTokenID)); + env.close(); + + if (features[fixNFTokenRemint]) + // Check that two NFTs don't have the same ID + BEAST_EXPECT(remintNFTokenID != prevNFTokenID); + else + // Check that two NFTs have the same ID + BEAST_EXPECT(remintNFTokenID == prevNFTokenID); + } + + // Test if the issuer account can be deleted after an authorized + // minter mints and burns a batch of NFTokens. + { + Env env{*this, features}; + Account const alice("alice"); + Account const becky("becky"); + Account const minter{"minter"}; + + env.fund(XRP(10000), alice, becky, minter); + env.close(); + + // alice sets minter as her authorized minter + env(token::setMinter(alice, minter)); + env.close(); + + // minter mints 500 NFTs for alice + std::vector nftIDs; + nftIDs.reserve(500); + for (int i = 0; i < 500; i++) + { + uint256 const nftokenID = token::getNextID(env, alice, 0u); + nftIDs.push_back(nftokenID); + env(token::mint(minter), token::issuer(alice)); + } + env.close(); + + // minter burns 500 NFTs + for (auto const nftokenID : nftIDs) + { + env(token::burn(minter, nftokenID)); + } + env.close(); + + // Increment ledger sequence to the number that is + // enforced by the featureDeletableAccounts amendment + incLgrSeqForAcctDel(env, alice); + + // Verify that alice's account root is present. + Keylet const aliceAcctKey{keylet::account(alice.id())}; + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + + auto const acctDelFee{drops(env.current()->fees().increment)}; + + if (!features[fixNFTokenRemint]) + { + // alice's account can be successfully deleted. + env(acctdelete(alice, becky), fee(acctDelFee)); + env.close(); + BEAST_EXPECT(!env.current()->exists(aliceAcctKey)); + + // Fund alice to re-create her account + env.fund(XRP(10000), alice); + env.close(); + + // alice's account now exists and has minted 0 NFTokens + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + BEAST_EXPECT((*env.le(alice))[sfMintedNFTokens] == 0); + + // alice mints a NFT with same params as the first one before + // the account delete. + uint256 const remintNFTokenID = + token::getNextID(env, alice, 0u); + env(token::mint(alice)); + env.close(); + + // burn the NFT to make sure alice owns remintNFTokenID + env(token::burn(alice, remintNFTokenID)); + env.close(); + + // The new NFT minted has the same ID as one of the NFTs + // authorized minter minted for alice + BEAST_EXPECT( + std::find(nftIDs.begin(), nftIDs.end(), remintNFTokenID) != + nftIDs.end()); + } + else if (features[fixNFTokenRemint]) + { + // alice tries to delete her account, but is unsuccessful. + // Due to authorized minting, alice's account sequence does not + // advance while minter mints NFTokens for her. + // The new account deletion retriction enabled by this amendment will enforce + // alice to wait for more ledgers to close before she can + // delete her account, to prevent duplicate NFTokenIDs + env(acctdelete(alice, becky), + fee(acctDelFee), + ter(tecTOO_SOON)); + env.close(); + + // alice's account is still present + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + + // Close more ledgers until it is no longer within + // + // to be able to delete alice's account + incLgrSeqForFixNftRemint(env, alice); + + // alice's account is deleted + env(acctdelete(alice, becky), fee(acctDelFee)); + env.close(); + + // alice's account root is gone from the most recently + // closed ledger and the current ledger. + BEAST_EXPECT(!env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(!env.current()->exists(aliceAcctKey)); + + // Fund alice to re-create her account + env.fund(XRP(10000), alice); + env.close(); + + // alice's account now exists and has minted 0 NFTokens + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + BEAST_EXPECT((*env.le(alice))[sfMintedNFTokens] == 0); + + // alice mints a NFT with same params as the first one before + // the account delete. + uint256 const remintNFTokenID = + token::getNextID(env, alice, 0u); + env(token::mint(alice)); + env.close(); + + // burn the NFT to make sure alice owns remintNFTokenID + env(token::burn(alice, remintNFTokenID)); + env.close(); + + // The new NFT minted will not have the same ID + // as any of the NFTs authorized minter minted + BEAST_EXPECT( + std::find(nftIDs.begin(), nftIDs.end(), remintNFTokenID) == + nftIDs.end()); + } + } + + // When an account mints and burns a batch of NFTokens using tickets, + // see if the the account can be deleted. + { + Env env{*this, features}; + + Account const alice{"alice"}; + Account const becky{"becky"}; + env.fund(XRP(10000), alice, becky); + env.close(); + + // alice grab enough tickets for all of the following + // transactions. Note that once the tickets are acquired alice's + // account sequence number should not advance. + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, 100)); + env.close(); + + BEAST_EXPECT(ticketCount(env, alice) == 100); + BEAST_EXPECT(ownerCount(env, alice) == 100); + + // alice mints 50 NFTs using tickets + std::vector nftIDs; + nftIDs.reserve(50); + for (int i = 0; i < 50; i++) + { + nftIDs.push_back(token::getNextID(env, alice, 0u)); + env(token::mint(alice, 0u), ticket::use(aliceTicketSeq++)); + env.close(); + } + + // alice burns 50 NFTs using tickets + for (auto const nftokenID : nftIDs) + { + env(token::burn(alice, nftokenID), + ticket::use(aliceTicketSeq++)); + } + env.close(); + + BEAST_EXPECT(ticketCount(env, alice) == 0); + + // Increment ledger sequence to the number that is + // enforced by the featureDeletableAccounts amendment + incLgrSeqForAcctDel(env, alice); + + // Verify that alice's account root is present. + Keylet const aliceAcctKey{keylet::account(alice.id())}; + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + + auto const acctDelFee{drops(env.current()->fees().increment)}; + + if (!features[fixNFTokenRemint]) + { + // alice tries to delete her account, and is successful. + env(acctdelete(alice, becky), fee(acctDelFee)); + env.close(); + + // alice's account root is gone from the most recently + // closed ledger and the current ledger. + BEAST_EXPECT(!env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(!env.current()->exists(aliceAcctKey)); + + // Fund alice to re-create her account + env.fund(XRP(10000), alice); + env.close(); + + // alice's account now exists and has minted 0 NFTokens + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + BEAST_EXPECT((*env.le(alice))[sfMintedNFTokens] == 0); + + // alice mints a NFT with same params as the first one before + // the account delete. + uint256 const remintNFTokenID = + token::getNextID(env, alice, 0u); + env(token::mint(alice)); + env.close(); + + // burn the NFT to make sure alice owns remintNFTokenID + env(token::burn(alice, remintNFTokenID)); + env.close(); + + // The new NFT minted will have the same ID + // as one of NFTs minted using tickets + BEAST_EXPECT( + std::find(nftIDs.begin(), nftIDs.end(), remintNFTokenID) != + nftIDs.end()); + } + else if (features[fixNFTokenRemint]) + { + // alice tries to delete her account, but is unsuccessful. + // Due to authorized minting, alice's account sequence does not + // advance while minter mints NFTokens for her using tickets. + // The new account deletion retriction enabled by this amendment will enforce + // alice to wait for more ledgers to close before she can + // delete her account, to prevent duplicate NFTokenIDs + env(acctdelete(alice, becky), + fee(acctDelFee), + ter(tecTOO_SOON)); + env.close(); + + // alice's account is still present + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + + // Close more ledgers until it is no longer within + // + // to be able to delete alice's account + incLgrSeqForFixNftRemint(env, alice); + + // alice's account is deleted + env(acctdelete(alice, becky), fee(acctDelFee)); + env.close(); + + // alice's account root is gone from the most recently + // closed ledger and the current ledger. + BEAST_EXPECT(!env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(!env.current()->exists(aliceAcctKey)); + + // Fund alice to re-create her account + env.fund(XRP(10000), alice); + env.close(); + + // alice's account now exists and has minted 0 NFTokens + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + BEAST_EXPECT((*env.le(alice))[sfMintedNFTokens] == 0); + + // alice mints a NFT with same params as the first one before + // the account delete. + uint256 const remintNFTokenID = + token::getNextID(env, alice, 0u); + env(token::mint(alice)); + env.close(); + + // burn the NFT to make sure alice owns remintNFTokenID + env(token::burn(alice, remintNFTokenID)); + env.close(); + + // The new NFT minted will not have the same ID + // as any of the NFTs authorized minter minted using tickets + BEAST_EXPECT( + std::find(nftIDs.begin(), nftIDs.end(), remintNFTokenID) == + nftIDs.end()); + } + } + // If fixNFTokenRemint is enabled, + // when an authorized minter mints and burns a batch of NFTokens using + // tickets, issuer's account needs to wait a longer time before it can + // deleted. + // After the issuer's account is re-created and mints a NFT, it should + // not have the same NFTokenID as the ones authorized minter minted. + if (features[fixNFTokenRemint]) + { + Env env{*this, features}; + Account const alice("alice"); + Account const becky("becky"); + Account const minter{"minter"}; + + env.fund(XRP(10000), alice, becky, minter); + env.close(); + + // alice sets minter as her authorized minter + env(token::setMinter(alice, minter)); + env.close(); + + // minter creates 100 tickets + std::uint32_t minterTicketSeq{env.seq(minter) + 1}; + env(ticket::create(minter, 100)); + env.close(); + + BEAST_EXPECT(ticketCount(env, minter) == 100); + BEAST_EXPECT(ownerCount(env, minter) == 100); + + // minter mints 50 NFTs for alice using tickets + std::vector nftIDs; + nftIDs.reserve(50); + for (int i = 0; i < 50; i++) + { + uint256 const nftokenID = token::getNextID(env, alice, 0u); + nftIDs.push_back(nftokenID); + env(token::mint(minter), + token::issuer(alice), + ticket::use(minterTicketSeq++)); + } + env.close(); + + // minter burns 50 NFTs using tickets + for (auto const nftokenID : nftIDs) + { + env(token::burn(minter, nftokenID), + ticket::use(minterTicketSeq++)); + } + env.close(); + + BEAST_EXPECT(ticketCount(env, minter) == 0); + + // Increment ledger sequence to the number that is + // enforced by the featureDeletableAccounts amendment + incLgrSeqForAcctDel(env, alice); + + // Verify that alice's account root is present. + Keylet const aliceAcctKey{keylet::account(alice.id())}; + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + + // alice tries to delete her account, but is unsuccessful. + // Due to authorized minting, alice's account sequence does not + // advance while minter mints NFTokens for her using tickets. + // The new account deletion retriction enabled by this amendment will enforce + // alice to wait for more ledgers to close before she can delete her + // account, to prevent duplicate NFTokenIDs + auto const acctDelFee{drops(env.current()->fees().increment)}; + env(acctdelete(alice, becky), fee(acctDelFee), ter(tecTOO_SOON)); + env.close(); + + // alice's account is still present + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + + // Close more ledgers until it is no longer within + // + // to be able to delete alice's account + incLgrSeqForFixNftRemint(env, alice); + + // alice's account is deleted + env(acctdelete(alice, becky), fee(acctDelFee)); + env.close(); + + // alice's account root is gone from the most recently + // closed ledger and the current ledger. + BEAST_EXPECT(!env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(!env.current()->exists(aliceAcctKey)); + + // Fund alice to re-create her account + env.fund(XRP(10000), alice); + env.close(); + + // alice's account now exists and has minted 0 NFTokens + BEAST_EXPECT(env.closed()->exists(aliceAcctKey)); + BEAST_EXPECT(env.current()->exists(aliceAcctKey)); + BEAST_EXPECT((*env.le(alice))[sfMintedNFTokens] == 0); + + // The new NFT minted will not have the same ID + // as any of the NFTs authorized minter minted using tickets + uint256 const remintNFTokenID = token::getNextID(env, alice, 0u); + env(token::mint(alice)); + env.close(); + + // burn the NFT to make sure alice owns remintNFTokenID + env(token::burn(alice, remintNFTokenID)); + env.close(); + + // The new NFT minted will not have the same ID + // as one of NFTs authorized minter minted using tickets + BEAST_EXPECT( + std::find(nftIDs.begin(), nftIDs.end(), remintNFTokenID) == + nftIDs.end()); + } + } + void testWithFeats(FeatureBitset features) { @@ -6069,6 +6596,7 @@ class NFToken_test : public beast::unit_test::suite testFixNFTokenNegOffer(features); testIOUWithTransferFee(features); testBrokeredSaleToSelf(features); + testFixNFTokenRemint(features); } public: @@ -6079,9 +6607,13 @@ class NFToken_test : public beast::unit_test::suite FeatureBitset const all{supported_amendments()}; FeatureBitset const fixNFTDir{fixNFTokenDirV1}; - testWithFeats(all - fixNFTDir - fixNonFungibleTokensV1_2); - testWithFeats(all - disallowIncoming - fixNonFungibleTokensV1_2); - testWithFeats(all - fixNonFungibleTokensV1_2); + testWithFeats( + all - fixNFTDir - fixNonFungibleTokensV1_2 - fixNFTokenRemint); + testWithFeats( + all - disallowIncoming - fixNonFungibleTokensV1_2 - + fixNFTokenRemint); + testWithFeats(all - fixNonFungibleTokensV1_2 - fixNFTokenRemint); + testWithFeats(all - fixNFTokenRemint); testWithFeats(all); } }; diff --git a/src/test/jtx/impl/token.cpp b/src/test/jtx/impl/token.cpp index cfbcfe11c98..6c5cae4147a 100644 --- a/src/test/jtx/impl/token.cpp +++ b/src/test/jtx/impl/token.cpp @@ -68,17 +68,26 @@ getNextID( // Get the nftSeq from the account root of the issuer. std::uint32_t const nftSeq = { env.le(issuer)->at(~sfMintedNFTokens).value_or(0)}; - return getID(issuer, nfTokenTaxon, nftSeq, flags, xferFee); + return token::getID(env, issuer, nfTokenTaxon, nftSeq, flags, xferFee); } uint256 getID( + jtx::Env const& env, jtx::Account const& issuer, std::uint32_t nfTokenTaxon, std::uint32_t nftSeq, std::uint16_t flags, std::uint16_t xferFee) { + if (env.current()->rules().enabled(fixNFTokenRemint)) + { + // If fixNFTokenRemint is enabled, we must add issuer's + // FirstNFTokenSequence to offset the starting NFT sequence number. + nftSeq += env.le(issuer) + ->at(~sfFirstNFTokenSequence) + .value_or(env.seq(issuer)); + } return ripple::NFTokenMint::createNFTokenID( flags, xferFee, issuer, nft::toTaxon(nfTokenTaxon), nftSeq); } diff --git a/src/test/jtx/token.h b/src/test/jtx/token.h index 44f89087b85..150ddfab0ea 100644 --- a/src/test/jtx/token.h +++ b/src/test/jtx/token.h @@ -95,6 +95,7 @@ getNextID( /** Get the NFTokenID for a particular nftSequence. */ uint256 getID( + jtx::Env const& env, jtx::Account const& account, std::uint32_t tokenTaxon, std::uint32_t nftSeq, From 9fcb28acad120902791e59007f1746f98733eeb7 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 21 Mar 2023 08:01:47 -0700 Subject: [PATCH 129/508] docs: update protocol README (#4457) --- src/ripple/protocol/README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/ripple/protocol/README.md b/src/ripple/protocol/README.md index 9a0f4952a38..9864b700ef2 100644 --- a/src/ripple/protocol/README.md +++ b/src/ripple/protocol/README.md @@ -1,14 +1,13 @@ # protocol Classes and functions for handling data and -values associated with the Ripple protocol. +values associated with the XRP Ledger protocol. ## Serialized Objects -In ripple objects transmitted over the network must be +Objects transmitted over the network must be serialized into a canonical format. The prefix "ST" refers -to classes that deal with the serialized format of ripple -objects. +to classes that deal with the serialized format. The term "Tx" or "tx" is an abbreviation for "Transaction", a commonly occurring object type. @@ -38,3 +37,8 @@ it exists and a second time to get/set its value. The source of this "type magic" is in [SField.h](./SField.h#L296-L302). + +### Related Resources + +- [ripple-binary-codec SField enums](https://github.com/XRPLF/xrpl.js/tree/main/packages/ripple-binary-codec/src/enums) +- [SFCode Registry Tables](https://github.com/XRPLF/sFieldRegistry) From da18c86cbfea1d8fe6940035f9103e15890d47ce Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Tue, 21 Mar 2023 02:18:01 -0700 Subject: [PATCH 130/508] Build packages with Ubuntu 18.04 Restores Ubuntu 18.04 packages Update docker images to use Conan --- Builds/CMake/RippledCompiler.cmake | 5 +- Builds/CMake/RippledRelease.cmake | 10 +- Builds/containers/gitlab-ci/pkgbuild.yml | 77 ++++++++- .../gitlab-ci/push_to_artifactory.sh | 2 +- .../containers/packaging/dpkg/build_dpkg.sh | 2 - Builds/containers/packaging/dpkg/debian/rules | 6 +- Builds/containers/ubuntu-builder/Dockerfile | 17 +- .../containers/ubuntu-builder/ubuntu_setup.sh | 154 +++++++----------- 8 files changed, 153 insertions(+), 120 deletions(-) diff --git a/Builds/CMake/RippledCompiler.cmake b/Builds/CMake/RippledCompiler.cmake index 9a96d647f45..2fbb6004333 100644 --- a/Builds/CMake/RippledCompiler.cmake +++ b/Builds/CMake/RippledCompiler.cmake @@ -124,7 +124,10 @@ else () # * static option set and # * NOT APPLE (AppleClang does not support static libc/c++) and # * NOT san (sanitizers typically don't work with static libc/c++) - $<$,$>,$>>:-static-libstdc++>) + $<$,$>,$>>: + -static-libstdc++ + -static-libgcc + >) endif () if (use_gold AND is_gcc) diff --git a/Builds/CMake/RippledRelease.cmake b/Builds/CMake/RippledRelease.cmake index 8e4dcfcd6d0..1c60ea80d7b 100644 --- a/Builds/CMake/RippledRelease.cmake +++ b/Builds/CMake/RippledRelease.cmake @@ -64,16 +64,16 @@ if (is_root_project) #[===================================================================[ dpkg #]===================================================================] - # currently use ubuntu 16.04 as a base b/c it has one of + # currently use ubuntu 18.04 as a base b/c it has one of # the lower versions of libc among ubuntu and debian releases. # we could change this in the future and build with some other deb # based system. add_custom_target (dpkg_container docker build --pull - --build-arg DIST_TAG=20.04 + --build-arg DIST_TAG=18.04 --build-arg GIT_COMMIT=${commit_hash} - -t rippleci/rippled-dpkg-builder:${container_label} + -t rippled-dpkg-builder:${container_label} $<$:--cache-from=${dpkg_cache_from}> -f ubuntu-builder/Dockerfile . WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/Builds/containers @@ -119,7 +119,7 @@ if (is_root_project) docker run -v ${CMAKE_CURRENT_SOURCE_DIR}:/opt/rippled_bld/pkg/rippled -v ${CMAKE_CURRENT_BINARY_DIR}/packages:/opt/rippled_bld/pkg/out - -t rippleci/rippled-dpkg-builder:${container_label} + -t rippled-dpkg-builder:${container_label} /bin/bash -c "cp -fpu rippled/Builds/containers/packaging/dpkg/build_dpkg.sh . && ./build_dpkg.sh" VERBATIM USES_TERMINAL @@ -159,7 +159,7 @@ if (is_root_project) add_custom_target (ci_container docker build --pull - --build-arg DIST_TAG=20.04 + --build-arg DIST_TAG=18.04 --build-arg GIT_COMMIT=${commit_hash} --build-arg CI_USE=true -t rippled-ci-builder:${container_label} diff --git a/Builds/containers/gitlab-ci/pkgbuild.yml b/Builds/containers/gitlab-ci/pkgbuild.yml index 8a602c84cdc..f35186acb8e 100644 --- a/Builds/containers/gitlab-ci/pkgbuild.yml +++ b/Builds/containers/gitlab-ci/pkgbuild.yml @@ -12,10 +12,10 @@ variables: # cmake build (container targets) and tagged/pushed so they # can be used here RPM_CONTAINER_TAG: "2023-02-13" - RPM_CONTAINER_NAME: "rippleci/rippled-rpm-builder" + RPM_CONTAINER_NAME: "rippled-rpm-builder" RPM_CONTAINER_FULLNAME: "${RPM_CONTAINER_NAME}:${RPM_CONTAINER_TAG}" - DPKG_CONTAINER_TAG: "2023-02-13" - DPKG_CONTAINER_NAME: "rippleci/rippled-dpkg-builder" + DPKG_CONTAINER_TAG: "2023-03-20" + DPKG_CONTAINER_NAME: "rippled-dpkg-builder" DPKG_CONTAINER_FULLNAME: "${DPKG_CONTAINER_NAME}:${DPKG_CONTAINER_TAG}" ARTIFACTORY_HOST: "artifactory.ops.ripple.com" ARTIFACTORY_HUB: "${ARTIFACTORY_HOST}:6555" @@ -190,7 +190,7 @@ rocky_8_smoketest: dependencies: - rpm_build image: - name: rockylinux/rockylinux:8 + name: artifactory.ops.ripple.com/rockylinux/rockylinux:8 <<: *run_local_smoketest fedora_37_smoketest: @@ -209,6 +209,14 @@ fedora_38_smoketest: name: artifactory.ops.ripple.com/fedora:38 <<: *run_local_smoketest +ubuntu_18_smoketest: + stage: smoketest + dependencies: + - dpkg_build + image: + name: artifactory.ops.ripple.com/ubuntu:18.04 + <<: *run_local_smoketest + ubuntu_20_smoketest: stage: smoketest dependencies: @@ -225,6 +233,14 @@ ubuntu_22_smoketest: name: artifactory.ops.ripple.com/ubuntu:22.04 <<: *run_local_smoketest +debian_10_smoketest: + stage: smoketest + dependencies: + - dpkg_build + image: + name: artifactory.ops.ripple.com/debian:10 + <<: *run_local_smoketest + debian_11_smoketest: stage: smoketest dependencies: @@ -329,12 +345,13 @@ centos_7_verify_repo_test: <<: *only_primary <<: *run_repo_smoketest + rocky_8_verify_repo_test: stage: verify_from_test variables: RPM_REPO: "rippled-rpm-test-mirror" image: - name: rockylinux/rockylinux:8 + name: artifactory.ops.ripple.com/rockylinux/rockylinux:8 dependencies: - rpm_sign <<: *only_primary @@ -362,6 +379,18 @@ fedora_38_verify_repo_test: <<: *only_primary <<: *run_repo_smoketest +ubuntu_18_verify_repo_test: + stage: verify_from_test + variables: + DISTRO: "bionic" + DEB_REPO: "rippled-deb-test-mirror" + image: + name: artifactory.ops.ripple.com/ubuntu:18.04 + dependencies: + - dpkg_sign + <<: *only_primary + <<: *run_repo_smoketest + ubuntu_20_verify_repo_test: stage: verify_from_test variables: @@ -386,6 +415,18 @@ ubuntu_22_verify_repo_test: <<: *only_primary <<: *run_repo_smoketest +debian_10_verify_repo_test: + stage: verify_from_test + variables: + DISTRO: "buster" + DEB_REPO: "rippled-deb-test-mirror" + image: + name: artifactory.ops.ripple.com/debian:10 + dependencies: + - dpkg_sign + <<: *only_primary + <<: *run_repo_smoketest + debian_11_verify_repo_test: stage: verify_from_test variables: @@ -468,7 +509,7 @@ rocky_8_verify_repo_prod: variables: RPM_REPO: "rippled-rpm" image: - name: rockylinux/rockylinux:8 + name: artifactory.ops.ripple.com/rockylinux/rockylinux:8 dependencies: - rpm_sign <<: *only_primary @@ -496,6 +537,18 @@ fedora_38_verify_repo_prod: <<: *only_primary <<: *run_repo_smoketest +ubuntu_18_verify_repo_prod: + stage: verify_from_prod + variables: + DISTRO: "bionic" + DEB_REPO: "rippled-deb" + image: + name: artifactory.ops.ripple.com/ubuntu:18.04 + dependencies: + - dpkg_sign + <<: *only_primary + <<: *run_repo_smoketest + ubuntu_20_verify_repo_prod: stage: verify_from_prod variables: @@ -520,6 +573,18 @@ ubuntu_22_verify_repo_prod: <<: *only_primary <<: *run_repo_smoketest +debian_10_verify_repo_prod: + stage: verify_from_prod + variables: + DISTRO: "buster" + DEB_REPO: "rippled-deb" + image: + name: artifactory.ops.ripple.com/debian:10 + dependencies: + - dpkg_sign + <<: *only_primary + <<: *run_repo_smoketest + debian_11_verify_repo_prod: stage: verify_from_prod variables: diff --git a/Builds/containers/gitlab-ci/push_to_artifactory.sh b/Builds/containers/gitlab-ci/push_to_artifactory.sh index 255f4247ae0..97a9e906ad6 100644 --- a/Builds/containers/gitlab-ci/push_to_artifactory.sh +++ b/Builds/containers/gitlab-ci/push_to_artifactory.sh @@ -20,7 +20,7 @@ RIPPLED_REPORTING_DBG_PKG=$(ls rippled-reporting-dbgsym_*.*deb) # TODO - where to upload src tgz? RIPPLED_SRC=$(ls rippled_*.orig.tar.gz) DEB_MATRIX=";deb.component=${COMPONENT};deb.architecture=amd64" -for dist in bullseye focal jammy; do +for dist in buster bullseye bionic focal jammy; do DEB_MATRIX="${DEB_MATRIX};deb.distribution=${dist}" done echo "{ \"debs\": {" > "${TOPDIR}/files.info" diff --git a/Builds/containers/packaging/dpkg/build_dpkg.sh b/Builds/containers/packaging/dpkg/build_dpkg.sh index fb67ea2ee10..3cfafe9e342 100755 --- a/Builds/containers/packaging/dpkg/build_dpkg.sh +++ b/Builds/containers/packaging/dpkg/build_dpkg.sh @@ -45,8 +45,6 @@ CHANGELOG # PATH must be preserved for our more modern cmake in /opt/local # TODO : consider allowing lintian to run in future ? export DH_BUILD_DDEBS=1 -export CC=gcc-11 -export CXX=g++-11 debuild --no-lintian --preserve-envvar PATH --preserve-env -us -uc rc=$?; if [[ $rc != 0 ]]; then error "error building dpkg" diff --git a/Builds/containers/packaging/dpkg/debian/rules b/Builds/containers/packaging/dpkg/debian/rules index 83af329cd6d..d62517d34b6 100755 --- a/Builds/containers/packaging/dpkg/debian/rules +++ b/Builds/containers/packaging/dpkg/debian/rules @@ -23,6 +23,8 @@ override_dh_auto_configure: conan install . \ --install-folder bld/rippled \ --build missing \ + --build boost \ + --build sqlite3 \ --settings build_type=Release cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ @@ -38,8 +40,10 @@ override_dh_auto_configure: conan install . \ --install-folder bld/rippled-reporting \ --build missing \ + --build boost \ + --build sqlite3 \ + --build libuv \ --settings build_type=Release \ - --settings compiler.cppstd=17 \ --options reporting=True cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ diff --git a/Builds/containers/ubuntu-builder/Dockerfile b/Builds/containers/ubuntu-builder/Dockerfile index 478d9ad7590..23723967fc2 100644 --- a/Builds/containers/ubuntu-builder/Dockerfile +++ b/Builds/containers/ubuntu-builder/Dockerfile @@ -1,22 +1,13 @@ -ARG DIST_TAG=20.04 +ARG DIST_TAG=18.04 FROM ubuntu:$DIST_TAG ARG GIT_COMMIT=unknown ARG CI_USE=false LABEL git-commit=$GIT_COMMIT -# install/setup prerequisites: -COPY ubuntu-builder/ubuntu_setup.sh /tmp/ -COPY shared/install_cmake.sh /tmp/ -RUN chmod +x /tmp/ubuntu_setup.sh && \ - chmod +x /tmp/install_cmake.sh -RUN /tmp/ubuntu_setup.sh +WORKDIR /root +COPY ubuntu-builder/ubuntu_setup.sh . +RUN ./ubuntu_setup.sh && rm ubuntu_setup.sh -RUN /tmp/install_cmake.sh 3.16.3 /opt/local/cmake-3.16 -RUN ln -s /opt/local/cmake-3.16 /opt/local/cmake -ENV PATH="/opt/local/cmake/bin:$PATH" - -# prep files for package building -RUN update-alternatives --set gcc /usr/bin/gcc-11 RUN mkdir -m 777 -p /opt/rippled_bld/pkg/ WORKDIR /opt/rippled_bld/pkg diff --git a/Builds/containers/ubuntu-builder/ubuntu_setup.sh b/Builds/containers/ubuntu-builder/ubuntu_setup.sh index 379ca61e2b1..cd8db75153d 100755 --- a/Builds/containers/ubuntu-builder/ubuntu_setup.sh +++ b/Builds/containers/ubuntu-builder/ubuntu_setup.sh @@ -1,104 +1,76 @@ #!/usr/bin/env bash -set -ex -source /etc/os-release +set -o errexit +set -o nounset +set -o xtrace -if [[ ${VERSION_ID} =~ ^20\. || ${VERSION_ID} =~ ^22\. ]] ; then - echo "setup for ${PRETTY_NAME}" -else - echo "${VERSION} not supported" - exit 1 -fi +# Parameters -export DEBIAN_FRONTEND="noninteractive" -echo "Acquire::Retries 3;" > /etc/apt/apt.conf.d/80-retries -echo "Acquire::http::Pipeline-Depth 0;" >> /etc/apt/apt.conf.d/80-retries -echo "Acquire::http::No-Cache true;" >> /etc/apt/apt.conf.d/80-retries -echo "Acquire::BrokenProxy true;" >> /etc/apt/apt.conf.d/80-retries -apt-get update -o Acquire::CompressionTypes::Order::=gz +gcc_version=${GCC_VERSION:-10} +cmake_version=${CMAKE_VERSION:-3.25.1} +conan_version=${CONAN_VERSION:-1.59} -apt-get -y update -apt-get -y install apt-utils -apt-get -y install software-properties-common wget curl ca-certificates -apt-get -y install python3-pip -apt-get -y upgrade -add-apt-repository -y ppa:ubuntu-toolchain-r/test -apt-get -y clean -apt-get -y update +apt update +# Iteratively build the list of packages to install so that we can interleave +# the lines with comments explaining their inclusion. +dependencies='' +# - to identify the Ubuntu version +dependencies+=' lsb-release' +# - for add-apt-repository +dependencies+=' software-properties-common' +# - to download CMake +dependencies+=' curl' +# - to build CMake +dependencies+=' libssl-dev' +# - Python headers for Boost.Python +dependencies+=' python3-dev' +# - to install Conan +dependencies+=' python3-pip' +# - to download rippled +dependencies+=' git' +# - CMake generators (but not CMake itself) +dependencies+=' make ninja-build' +apt install --yes ${dependencies} -apt-get -y --fix-missing install \ - make cmake ninja-build autoconf automake libtool pkg-config libtool \ - openssl libssl-dev \ - liblzma-dev libbz2-dev zlib1g-dev \ - libjemalloc-dev \ - gdb gdbserver \ - libstdc++6 \ - flex bison parallel \ - libicu-dev texinfo \ - java-common javacc \ - dpkg-dev debhelper devscripts fakeroot \ - debmake git-buildpackage dh-make gitpkg debsums gnupg \ - dh-buildinfo dh-make \ - apt-transport-https +add-apt-repository --yes ppa:ubuntu-toolchain-r/test +apt install --yes gcc-${gcc_version} g++-${gcc_version} \ + debhelper debmake debsums gnupg dh-buildinfo dh-make dh-systemd cmake \ + ninja-build zlib1g-dev make cmake ninja-build autoconf automake \ + pkg-config apt-transport-https -if [[ ${VERSION_ID} =~ ^20\. ]] ; then -apt-get install -y \ - dh-systemd -fi - -apt-get -y install gcc-11 g++-11 +# Give us nice unversioned aliases for gcc and company. update-alternatives --install \ - /usr/bin/gcc gcc /usr/bin/gcc-11 20 \ - --slave /usr/bin/g++ g++ /usr/bin/g++-11 \ - --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-11 \ - --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-11 \ - --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-11 \ - --slave /usr/bin/gcov gcov /usr/bin/gcov-11 \ - --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-11 \ - --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-11 + /usr/bin/gcc gcc /usr/bin/gcc-${gcc_version} 100 \ + --slave /usr/bin/g++ g++ /usr/bin/g++-${gcc_version} \ + --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-${gcc_version} \ + --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-${gcc_version} \ + --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-${gcc_version} \ + --slave /usr/bin/gcov gcov /usr/bin/gcov-${gcc_version} \ + --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-${gcc_version} \ + --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-${gcc_version} update-alternatives --auto gcc -update-alternatives --install /usr/bin/cpp cpp /usr/bin/cpp-11 20 -update-alternatives --auto cpp - -wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - - -if [[ ${VERSION_ID} =~ ^20\. ]] ; then - cat << EOF > /etc/apt/sources.list.d/llvm.list -deb http://apt.llvm.org/focal/ llvm-toolchain-focal main -deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal main -deb http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main -deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-13 main -deb http://apt.llvm.org/focal/ llvm-toolchain-focal-14 main -deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-14 main -EOF - apt-get -y install binutils clang-12 -fi - +# Download and unpack CMake. +cmake_slug="cmake-${cmake_version}" +curl --location --remote-name \ + "https://github.com/Kitware/CMake/releases/download/v${cmake_version}/${cmake_slug}.tar.gz" +tar xzf ${cmake_slug}.tar.gz +rm ${cmake_slug}.tar.gz -apt-get -y update -if [[ ${VERSION_ID} =~ ^20\. ]] ; then -for v in 12 14; do - apt-get -y install \ - clang-$v libclang-common-$v-dev libclang-$v-dev libllvm$v llvm-$v \ - llvm-$v-dev llvm-$v-runtime clang-format-$v python3-clang-$v \ - lld-$v libfuzzer-$v-dev libc++-$v-dev python-is-python3 - update-alternatives --install \ - /usr/bin/clang clang /usr/bin/clang-$v 40 \ - --slave /usr/bin/clang++ clang++ /usr/bin/clang++-$v \ - --slave /usr/bin/llvm-profdata llvm-profdata /usr/bin/llvm-profdata-$v \ - --slave /usr/bin/asan-symbolize asan-symbolize /usr/bin/asan_symbolize-$v \ - --slave /usr/bin/llvm-symbolizer llvm-symbolizer /usr/bin/llvm-symbolizer-$v \ - --slave /usr/bin/clang-format clang-format /usr/bin/clang-format-$v \ - --slave /usr/bin/llvm-ar llvm-ar /usr/bin/llvm-ar-$v \ - --slave /usr/bin/llvm-cov llvm-cov /usr/bin/llvm-cov-$v \ - --slave /usr/bin/llvm-nm llvm-nm /usr/bin/llvm-nm-$v - done -fi +# Build and install CMake. +cd ${cmake_slug} +./bootstrap --parallel=$(nproc) +make --jobs $(nproc) +make install +cd .. +rm --recursive --force ${cmake_slug} -pip install "conan<2" && \ - conan profile new default --detect && \ - conan profile update settings.compiler.cppstd=20 default && \ - conan profile update settings.compiler.libcxx=libstdc++11 default +# Install Conan. +pip3 install conan==${conan_version} -apt-get -y autoremove +conan profile new --detect gcc +conan profile update settings.compiler=gcc gcc +conan profile update settings.compiler.version=${gcc_version} gcc +conan profile update settings.compiler.libcxx=libstdc++11 gcc +conan profile update env.CC=/usr/bin/gcc gcc +conan profile update env.CXX=/usr/bin/g++ gcc From 383f1b6ab3e8509f4a473e8992c76feb29fe1c55 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 21 Mar 2023 11:14:20 -0700 Subject: [PATCH 131/508] Set version to 1.10.1-rc1 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 03e83dd36c9..e4f901584dc 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.0" +char const* const versionString = "1.10.1-rc1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 4f506599f62d530affeb5a3b47257c15b1c49160 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Wed, 22 Mar 2023 09:27:56 -0700 Subject: [PATCH 132/508] Set version to 1.10.1 * Add release notes --- RELEASENOTES.md | 45 ++++++++++++++++++++++++++ src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index e814019b411..08ceb5b3dd4 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -7,6 +7,51 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? [Please open an issue here](https://github.com/xrplf/rippled/issues/new/choose). + +# Introducing XRP Ledger version 1.10.1 + +Version 1.10.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release restores packages for Ubuntu 18.04. + +Compared to version 1.10.0, the only C++ code change fixes an edge case in Reporting Mode. + +If you are already running version 1.10.0, then upgrading to version 1.10.1 is generally not required. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Install / Upgrade + +On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). + +## Changelog + +- [`da18c86cbf`](https://github.com/ripple/rippled/commit/da18c86cbfea1d8fe6940035f9103e15890d47ce) Build packages with Ubuntu 18.04 +- [`f7b3ddd87b`](https://github.com/ripple/rippled/commit/f7b3ddd87b8ef093a06ab1420bea57ed1e77643a) Reporting Mode: Do not attempt to acquire missing data from peer network (#4458) + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + +### Credits + +The following people contributed directly to this release: + +- John Freeman +- Mark Travis +- Michael Legleux + +Bug Bounties and Responsible Disclosures: +We welcome reviews of the rippled code and urge researchers to +responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + + bugs@xrpl.org + + # Introducing XRP Ledger version 1.10.0 Version 1.10.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release introduces six new amendments, detailed below, and cleans up code to improve performance. diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index e4f901584dc..b1c1bab7fb5 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.1-rc1" +char const* const versionString = "1.10.1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 7745c72b2cff3794ae7f0a14e2c99802652c322b Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 22 Mar 2023 14:02:42 -0500 Subject: [PATCH 133/508] docs: update build instructions: (#4381) * Remove obsolete build instructions. * By using Conan, builders can choose which dependencies specifically to build and link as shared objects. * Refactor the build instructions based on the plan in #4433. --- .gitignore | 3 +- BUILD.md | 164 ++--------- Builds/VisualStudio2017/README.md | 1 - .../CMakeSettings-example.json | 45 --- Builds/VisualStudio2019/README.md | 263 ------------------ Builds/linux/README.md | 1 - Builds/macos/README.md | 1 - docs/build/conan.md | 114 ++++++++ docs/build/environment.md | 83 ++++++ docs/build/install.md | 13 + 10 files changed, 235 insertions(+), 453 deletions(-) delete mode 100644 Builds/VisualStudio2017/README.md delete mode 100644 Builds/VisualStudio2019/CMakeSettings-example.json delete mode 100644 Builds/VisualStudio2019/README.md delete mode 100644 Builds/linux/README.md delete mode 100644 Builds/macos/README.md create mode 100644 docs/build/conan.md create mode 100644 docs/build/environment.md create mode 100644 docs/build/install.md diff --git a/.gitignore b/.gitignore index 352d68e735e..5e4414e8e62 100644 --- a/.gitignore +++ b/.gitignore @@ -21,7 +21,6 @@ bin/project-cache.jam # Ignore object files. *.o -build .nih_c tags TAGS @@ -65,7 +64,7 @@ docs/html_doc # Xcode user-specific project settings # Xcode .DS_Store -*/build/* +/build/ *.pbxuser !default.pbxuser *.mode1v3 diff --git a/BUILD.md b/BUILD.md index a1376bdc5df..3a7283407d9 100644 --- a/BUILD.md +++ b/BUILD.md @@ -1,122 +1,3 @@ -## A crash course in CMake and Conan - -To better understand how to use Conan, -we should first understand _why_ we use Conan, -and to understand that, -we need to understand how we use CMake. - - -### CMake - -Technically, you don't need CMake to build this project. -You could manually compile every translation unit into an object file, -using the right compiler options, -and then manually link all those objects together, -using the right linker options. -However, that is very tedious and error-prone, -which is why we lean on tools like CMake. - -We have written CMake configuration files -([`CMakeLists.txt`](./CMakeLists.txt) and friends) -for this project so that CMake can be used to correctly compile and link -all of the translation units in it. -Or rather, CMake will generate files for a separate build system -(e.g. Make, Ninja, Visual Studio, Xcode, etc.) -that compile and link all of the translation units. -Even then, CMake has parameters, some of which are platform-specific. -In CMake's parlance, parameters are specially-named **variables** like -[`CMAKE_BUILD_TYPE`][build_type] or -[`CMAKE_MSVC_RUNTIME_LIBRARY`][runtime]. -Parameters include: - -- what build system to generate files for -- where to find the compiler and linker -- where to find dependencies, e.g. libraries and headers -- how to link dependencies, e.g. any special compiler or linker flags that - need to be used with them, including preprocessor definitions -- how to compile translation units, e.g. with optimizations, debug symbols, - position-independent code, etc. -- on Windows, which runtime library to link with - -For some of these parameters, like the build system and compiler, -CMake goes through a complicated search process to choose default values. -For others, like the dependencies, -_we_ had written in the CMake configuration files of this project -our own complicated process to choose defaults. -For most developers, things "just worked"... until they didn't, and then -you were left trying to debug one of these complicated processes, instead of -choosing and manually passing the parameter values yourself. - -You can pass every parameter to CMake on the command line, -but writing out these parameters every time we want to configure CMake is -a pain. -Most humans prefer to put them into a configuration file, once, that -CMake can read every time it is configured. -For CMake, that file is a [toolchain file][toolchain]. - - -### Conan - -These next few paragraphs on Conan are going to read much like the ones above -for CMake. - -Technically, you don't need Conan to build this project. -You could manually download, configure, build, and install all of the -dependencies yourself, and then pass all of the parameters necessary for -CMake to link to those dependencies. -To guarantee ABI compatibility, you must be sure to use the same set of -compiler and linker options for all dependencies _and_ this project. -However, that is very tedious and error-prone, which is why we lean on tools -like Conan. - -We have written a Conan configuration file ([`conanfile.py`](./conanfile.py)) -so that Conan can be used to correctly download, configure, build, and install -all of the dependencies for this project, -using a single set of compiler and linker options for all of them. -It generates files that contain almost all of the parameters that CMake -expects. -Those files include: - -- A single toolchain file. -- For every dependency, a CMake [package configuration file][pcf], - [package version file][pvf], and for every build type, a package - targets file. - Together, these files implement version checking and define `IMPORTED` - targets for the dependencies. - -The toolchain file itself amends the search path -([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package] -will [discover][search] the generated package configuration files. - -**Nearly all we must do to properly configure CMake is pass the toolchain -file.** -What CMake parameters are left out? -You'll still need to pick a build system generator, -and if you choose a single-configuration generator, -you'll need to pass the `CMAKE_BUILD_TYPE`, -which should match the `build_type` setting you gave to Conan. - -Even then, Conan has parameters, some of which are platform-specific. -In Conan's parlance, parameters are either settings or options. -**Settings** are shared by all packages, e.g. the build type. -**Options** are specific to a given package, e.g. whether to build and link -OpenSSL as a shared library. - -For settings, Conan goes through a complicated search process to choose -defaults. -For options, each package recipe defines its own defaults. - -You can pass every parameter to Conan on the command line, -but it is more convenient to put them in a [profile][profile]. -**All we must do to properly configure Conan is edit and pass the profile.** -By default, Conan will use the profile named "default". -You can let Conan create the default profile with this command: - -``` -conan profile new default --detect -``` - - ## Branches For a stable release, choose the `master` branch or one of the [tagged @@ -170,26 +51,29 @@ Until then, we advise Windows developers to use Visual Studio 2019. ## Prerequisites -To build this package, you will need Python (>= 3.7), -[Conan][] (>= 1.55), and [CMake][] (>= 3.16). - > **Warning** -> The commands in this document are not meant to be blindly copied and pasted. -> This document is written for multiple audiences, -> meaning that your particular circumstances may require some commands and not -> others. -> You should never run any commands without understanding what they do -> and why you are running them. +> These instructions assume you have a C++ development environment ready +> with Git, Python, Conan, CMake, and a C++ compiler. +> For help setting one up on Linux, macOS, or Windows, +> please see [our guide](./docs/build/environment.md). > -> These instructions assume a basic familiarity with Conan and CMake. +> These instructions further assume a basic familiarity with Conan and CMake. > If you are unfamiliar with Conan, -> then please read the [crash course](#a-crash-course-in-cmake-and-conan) -> at the beginning of this document, +> then please read our [crash course](./docs/build/conan.md) > or the official [Getting Started][3] walkthrough. +To build this package, you will need Python (>= 3.7), +[Conan][] (>= 1.55, < 2), and [CMake][] (>= 3.16). + [Conan]: https://conan.io/downloads.html [CMake]: https://cmake.org/download/ +You'll need at least one Conan profile: + +``` +conan profile new default --detect +``` + You'll need to compile in the C++20 dialect: ``` @@ -235,15 +119,21 @@ conan profile update env.CC= default conan profile update env.CXX= default ``` +Export our [Conan recipe for Snappy](./external/snappy). +It does not explicitly link the C++ standard library, +which allows you to statically link it with GCC, if you want. + +``` +conan export external/snappy snappy/1.1.9@ +``` ## How to build and test Let's start with a couple of examples of common workflows. The first is for a single-configuration generator (e.g. Unix Makefiles) on -Linux or MacOS: +Linux or macOS: ``` -conan export external/snappy snappy/1.1.9@ mkdir .build cd .build conan install .. --output-folder . --build missing --settings build_type=Release @@ -256,7 +146,6 @@ The second is for a multi-configuration generator (e.g. Visual Studio) on Windows: ``` -conan export external/snappy snappy/1.1.9@ mkdir .build cd .build conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT @@ -270,11 +159,6 @@ cmake --build . --config Debug Now to explain the individual steps in each example: -1. Export our [Conan recipe for Snappy](./external/snappy). - - It does not explicitly link the C++ standard library, - which allows us to statically link it. - 1. Create a build directory (and move into it). You can choose any name you want. @@ -327,7 +211,7 @@ Now to explain the individual steps in each example: For a single-configuration generator, it will build whatever configuration you passed for `CMAKE_BUILD_TYPE`. -5. Test rippled. +1. Test rippled. The exact location of rippled in your build directory depends on your choice of CMake generator. diff --git a/Builds/VisualStudio2017/README.md b/Builds/VisualStudio2017/README.md deleted file mode 100644 index fce8e824a05..00000000000 --- a/Builds/VisualStudio2017/README.md +++ /dev/null @@ -1 +0,0 @@ -[Build instructions are currently located in `BUILD.md`](../../BUILD.md) diff --git a/Builds/VisualStudio2019/CMakeSettings-example.json b/Builds/VisualStudio2019/CMakeSettings-example.json deleted file mode 100644 index b90bfce6bb1..00000000000 --- a/Builds/VisualStudio2019/CMakeSettings-example.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - // See https://go.microsoft.com//fwlink//?linkid=834763 for more information about this file. - "configurations": [ - { - "name": "x64-Debug", - "generator": "Visual Studio 16 2019", - "configurationType": "Debug", - "inheritEnvironments": [ "msvc_x64_x64" ], - "buildRoot": "${thisFileDir}\\build\\${name}", - "cmakeCommandArgs": "", - "buildCommandArgs": "-v:minimal", - "ctestCommandArgs": "", - "variables": [ - { - "name": "BOOST_ROOT", - "value": "C:\\lib\\boost" - }, - { - "name": "OPENSSL_ROOT", - "value": "C:\\lib\\OpenSSL-Win64" - } - ] - }, - { - "name": "x64-Release", - "generator": "Visual Studio 16 2019", - "configurationType": "Release", - "inheritEnvironments": [ "msvc_x64_x64" ], - "buildRoot": "${thisFileDir}\\build\\${name}", - "cmakeCommandArgs": "", - "buildCommandArgs": "-v:minimal", - "ctestCommandArgs": "", - "variables": [ - { - "name": "BOOST_ROOT", - "value": "C:\\lib\\boost" - }, - { - "name": "OPENSSL_ROOT", - "value": "C:\\lib\\OpenSSL-Win64" - } - ] - } - ] -} diff --git a/Builds/VisualStudio2019/README.md b/Builds/VisualStudio2019/README.md deleted file mode 100644 index e369eac6775..00000000000 --- a/Builds/VisualStudio2019/README.md +++ /dev/null @@ -1,263 +0,0 @@ -# Visual Studio 2019 Build Instructions - -## Important - -We do not recommend Windows for rippled production use at this time. Currently, -the Ubuntu platform has received the highest level of quality assurance, -testing, and support. Additionally, 32-bit Windows versions are not supported. - -## Prerequisites - -To clone the source code repository, create branches for inspection or -modification, build rippled under Visual Studio, and run the unit tests you will -need these software components - -| Component | Minimum Recommended Version | -|-----------|-----------------------| -| [Visual Studio 2019](README.md#install-visual-studio-2019)| 15.5.4 | -| [Git for Windows](README.md#install-git-for-windows)| 2.16.1 | -| [OpenSSL Library](README.md#install-openssl) | 1.1.1L | -| [Boost library](README.md#build-boost) | 1.70.0 | -| [CMake for Windows](README.md#optional-install-cmake-for-windows)* | 3.12 | - -\* Only needed if not using the integrated CMake in VS 2019 and prefer generating dedicated project/solution files. - -## Install Software - -### Install Visual Studio 2019 - -If not already installed on your system, download your choice of installer from -the [Visual Studio 2019 -Download](https://www.visualstudio.com/downloads/download-visual-studio-vs) -page, run the installer, and follow the directions. **You may need to choose the -`Desktop development with C++` workload to install all necessary C++ features.** - -Any version of Visual Studio 2019 may be used to build rippled. The **Visual -Studio 2019 Community** edition is available free of charge (see [the product -page](https://www.visualstudio.com/products/visual-studio-community-vs) for -licensing details), while paid editions may be used for an initial free-trial -period. - -### Install Git for Windows - -Git is a distributed revision control system. The Windows version also provides -the bash shell and many Windows versions of Unix commands. While there are other -varieties of Git (such as TortoiseGit, which has a native Windows interface and -integrates with the Explorer shell), we recommend installing [Git for -Windows](https://git-scm.com/) since it provides a Unix-like command line -environment useful for running shell scripts. Use of the bash shell under -Windows is mandatory for running the unit tests. - -### Install OpenSSL - -[Download the latest version of -OpenSSL.](http://slproweb.com/products/Win32OpenSSL.html) There will -several `Win64` bit variants available, you want the non-light -`v1.1` line. As of this writing, you **should** select - -* Win64 OpenSSL v1.1.1q - -and should **not** select - -* Anything with "Win32" in the name -* Anything with "light" in the name -* Anything with "EXPERIMENTAL" in the name -* Anything in the 3.0 line - rippled won't currently build with this version. - -Run the installer, and choose an appropriate location for your OpenSSL -installation. In this guide we use `C:\lib\OpenSSL-Win64` as the destination -location. - -You may be informed on running the installer that "Visual C++ 2008 -Redistributables" must first be installed first. If so, download it from the -[same page](http://slproweb.com/products/Win32OpenSSL.html), again making sure -to get the correct 32-/64-bit variant. - -* NOTE: Since rippled links statically to OpenSSL, it does not matter where the - OpenSSL .DLL files are placed, or what version they are. rippled does not use - or require any external .DLL files to run other than the standard operating - system ones. - -### Build Boost - -Boost 1.70 or later is required. - -[Download boost](http://www.boost.org/users/download/) and unpack it -to `c:\lib`. As of this writing, the most recent version of boost is 1.80.0, -which will unpack into a directory named `boost_1_80_0`. We recommended either -renaming this directory to `boost`, or creating a junction link `mklink /J boost -boost_1_80_0`, so that you can more easily switch between versions. - -Next, open **Developer Command Prompt** and type the following commands - -```powershell -cd C:\lib\boost -bootstrap -``` - -The rippled application is linked statically to the standard runtimes and -external dependencies on Windows, to ensure that the behavior of the executable -is not affected by changes in outside files. Therefore, it is necessary to build -the required boost static libraries using this command: - -```powershell -b2 -j --toolset=msvc-14.2 address-model=64 architecture=x86 link=static threading=multi runtime-link=shared,static stage -``` - -where you should replace `` with the number of parallel -invocations to use build, e.g. `bjam -j8 ...` would use up to 8 concurrent build -shell commands for the build. - -Building the boost libraries may take considerable time. When the build process -is completed, take note of both the reported compiler include paths and linker -library paths as they will be required later. - -### (Optional) Install CMake for Windows - -[CMake](http://cmake.org) is a cross platform build system generator. Visual -Studio 2019 includes an integrated version of CMake that avoids having to -manually run CMake, but it is undergoing continuous improvement. Users that -prefer to use standard Visual Studio project and solution files need to install -a dedicated version of CMake to generate them. The latest version can be found -at the [CMake download site](https://cmake.org/download/). It is recommended you -select the install option to add CMake to your path. - -## Clone the rippled repository - -If you are familiar with cloning github repositories, just follow your normal -process and clone `git@github.com:ripple/rippled.git`. Otherwise follow this -section for instructions. - -1. If you don't have a github account, sign up for one at - [github.com](https://github.com/). -2. Make sure you have Github ssh keys. For help see - [generating-ssh-keys](https://help.github.com/articles/generating-ssh-keys). - -Open the "Git Bash" shell that was installed with "Git for Windows" in the step -above. Navigate to the directory where you want to clone rippled (git bash uses -`/c` for windows's `C:` and forward slash where windows uses backslash, so -`C:\Users\joe\projs` would be `/c/Users/joe/projs` in git bash). Now clone the -repository and optionally switch to the *master* branch. Type the following at -the bash prompt: - -```powershell -git clone git@github.com:XRPLF/rippled.git -cd rippled -``` -If you receive an error about not having the "correct access rights" make sure -you have Github ssh keys, as described above. - -For a stable release, choose the `master` branch or one of the tagged releases -listed on [rippled's GitHub page](https://github.com/ripple/rippled/releases). - -``` -git checkout master -``` - -To test the latest release candidate, choose the `release` branch. - -``` -git checkout release -``` - -If you are doing development work and want the latest set of beta features, -you can consider using the `develop` branch instead. - -``` -git checkout develop -``` - -# Build using Visual Studio integrated CMake - -In Visual Studio 2017, Microsoft added [integrated IDE support for -cmake](https://blogs.msdn.microsoft.com/vcblog/2016/10/05/cmake-support-in-visual-studio/). -To begin, simply: - -1. Launch Visual Studio and choose **File | Open | Folder**, navigating to the - cloned rippled folder. -2. Right-click on `CMakeLists.txt` in the **Solution Explorer - Folder View** to - generate a `CMakeSettings.json` file. A sample settings file is provided - [here](/Builds/VisualStudio2019/CMakeSettings-example.json). Customize the - settings for `BOOST_ROOT`, `OPENSSL_ROOT` to match the install paths if they - differ from those in the file. -4. Select either the `x64-Release` or `x64-Debug` configuration from the - **Project Settings** drop-down. This should invoke the built-in CMake project - generator. If not, you can right-click on the `CMakeLists.txt` file and - choose **Configure rippled**. -5. Select the `rippled.exe` - option in the **Select Startup Item** drop-down. This will be the target - built when you press F7. Alternatively, you can choose a target to build from - the top-level **CMake | Build** menu. Note that at this time, there are other - targets listed that come from third party visual studio files embedded in the - rippled repo, e.g. `datagen.vcxproj`. Please ignore them. - -For details on configuring debugging sessions or further customization of CMake, -please refer to the [CMake tools for VS -documentation](https://docs.microsoft.com/en-us/cpp/ide/cmake-tools-for-visual-cpp). - -If using the provided `CMakeSettings.json` file, the executable will be in -``` -.\build\x64-Release\Release\rippled.exe -``` -or -``` -.\build\x64-Debug\Debug\rippled.exe -``` -These paths are relative to your cloned git repository. - -# Build using stand-alone CMake - -This requires having installed [CMake for -Windows](README.md#optional-install-cmake-for-windows). We do not recommend -mixing this method with the integrated CMake method for the same repository -clone. Assuming you included the cmake executable folder in your path, -execute the following commands within your `rippled` cloned repository: - -``` -mkdir build\cmake -cd build\cmake -cmake ..\.. -G"Visual Studio 16 2019" -Ax64 -DBOOST_ROOT="C:\lib\boost" -DOPENSSL_ROOT="C:\lib\OpenSSL-Win64" -DCMAKE_GENERATOR_TOOLSET=host=x64 -``` -Now launch Visual Studio 2019 and select **File | Open | Project/Solution**. -Navigate to the `build\cmake` folder created above and select the `rippled.sln` -file. You can then choose whether to build the `Debug` or `Release` solution -configuration. - -The executable will be in -``` -.\build\cmake\Release\rippled.exe -``` -or -``` -.\build\cmake\Debug\rippled.exe -``` -These paths are relative to your cloned git repository. - -# Unity/No-Unity Builds - -The rippled build system defaults to using -[unity source files](http://onqtam.com/programming/2018-07-07-unity-builds/) -to improve build times. In some cases it might be desirable to disable the -unity build and compile individual translation units. Here is how you can -switch to a "no-unity" build configuration: - -## Visual Studio Integrated CMake - -Edit your `CmakeSettings.json` (described above) by adding `-Dunity=OFF` -to the `cmakeCommandArgs` entry for each build configuration. - -## Standalone CMake Builds - -When running cmake to generate the Visual Studio project files, add -`-Dunity=OFF` to the command line options passed to cmake. - -**Note:** you will need to re-run the cmake configuration step anytime you -want to switch between unity/no-unity builds. - -# Unit Test (Recommended) - -`rippled` builds a set of unit tests into the server executable. To run these -unit tests after building, pass the `--unittest` option to the compiled -`rippled` executable. The executable will exit with summary info after running -the unit tests. - diff --git a/Builds/linux/README.md b/Builds/linux/README.md deleted file mode 100644 index fce8e824a05..00000000000 --- a/Builds/linux/README.md +++ /dev/null @@ -1 +0,0 @@ -[Build instructions are currently located in `BUILD.md`](../../BUILD.md) diff --git a/Builds/macos/README.md b/Builds/macos/README.md deleted file mode 100644 index fce8e824a05..00000000000 --- a/Builds/macos/README.md +++ /dev/null @@ -1 +0,0 @@ -[Build instructions are currently located in `BUILD.md`](../../BUILD.md) diff --git a/docs/build/conan.md b/docs/build/conan.md new file mode 100644 index 00000000000..01fb7150d38 --- /dev/null +++ b/docs/build/conan.md @@ -0,0 +1,114 @@ +## A crash course in CMake and Conan + +To better understand how to use Conan, +we should first understand _why_ we use Conan, +and to understand that, +we need to understand how we use CMake. + + +### CMake + +Technically, you don't need CMake to build this project. +You could manually compile every translation unit into an object file, +using the right compiler options, +and then manually link all those objects together, +using the right linker options. +However, that is very tedious and error-prone, +which is why we lean on tools like CMake. + +We have written CMake configuration files +([`CMakeLists.txt`](./CMakeLists.txt) and friends) +for this project so that CMake can be used to correctly compile and link +all of the translation units in it. +Or rather, CMake will generate files for a separate build system +(e.g. Make, Ninja, Visual Studio, Xcode, etc.) +that compile and link all of the translation units. +Even then, CMake has parameters, some of which are platform-specific. +In CMake's parlance, parameters are specially-named **variables** like +[`CMAKE_BUILD_TYPE`][build_type] or +[`CMAKE_MSVC_RUNTIME_LIBRARY`][runtime]. +Parameters include: + +- what build system to generate files for +- where to find the compiler and linker +- where to find dependencies, e.g. libraries and headers +- how to link dependencies, e.g. any special compiler or linker flags that + need to be used with them, including preprocessor definitions +- how to compile translation units, e.g. with optimizations, debug symbols, + position-independent code, etc. +- on Windows, which runtime library to link with + +For some of these parameters, like the build system and compiler, +CMake goes through a complicated search process to choose default values. +For others, like the dependencies, +_we_ had written in the CMake configuration files of this project +our own complicated process to choose defaults. +For most developers, things "just worked"... until they didn't, and then +you were left trying to debug one of these complicated processes, instead of +choosing and manually passing the parameter values yourself. + +You can pass every parameter to CMake on the command line, +but writing out these parameters every time we want to configure CMake is +a pain. +Most humans prefer to put them into a configuration file, once, that +CMake can read every time it is configured. +For CMake, that file is a [toolchain file][toolchain]. + + +### Conan + +These next few paragraphs on Conan are going to read much like the ones above +for CMake. + +Technically, you don't need Conan to build this project. +You could manually download, configure, build, and install all of the +dependencies yourself, and then pass all of the parameters necessary for +CMake to link to those dependencies. +To guarantee ABI compatibility, you must be sure to use the same set of +compiler and linker options for all dependencies _and_ this project. +However, that is very tedious and error-prone, which is why we lean on tools +like Conan. + +We have written a Conan configuration file ([`conanfile.py`](./conanfile.py)) +so that Conan can be used to correctly download, configure, build, and install +all of the dependencies for this project, +using a single set of compiler and linker options for all of them. +It generates files that contain almost all of the parameters that CMake +expects. +Those files include: + +- A single toolchain file. +- For every dependency, a CMake [package configuration file][pcf], + [package version file][pvf], and for every build type, a package + targets file. + Together, these files implement version checking and define `IMPORTED` + targets for the dependencies. + +The toolchain file itself amends the search path +([`CMAKE_PREFIX_PATH`][prefix_path]) so that [`find_package()`][find_package] +will [discover][search] the generated package configuration files. + +**Nearly all we must do to properly configure CMake is pass the toolchain +file.** +What CMake parameters are left out? +You'll still need to pick a build system generator, +and if you choose a single-configuration generator, +you'll need to pass the `CMAKE_BUILD_TYPE`, +which should match the `build_type` setting you gave to Conan. + +Even then, Conan has parameters, some of which are platform-specific. +In Conan's parlance, parameters are either settings or options. +**Settings** are shared by all packages, e.g. the build type. +**Options** are specific to a given package, e.g. whether to build and link +OpenSSL as a shared library. + +For settings, Conan goes through a complicated search process to choose +defaults. +For options, each package recipe defines its own defaults. + +You can pass every parameter to Conan on the command line, +but it is more convenient to put them in a configuration file, once, that +Conan can read every time it is configured. +For Conan, that file is a [profile][profile]. +**All we must do to properly configure Conan is edit and pass the profile.** +By default, Conan will use the profile named "default". diff --git a/docs/build/environment.md b/docs/build/environment.md new file mode 100644 index 00000000000..8e6ba45aacc --- /dev/null +++ b/docs/build/environment.md @@ -0,0 +1,83 @@ +Our [build instructions][BUILD.md] assume you have a C++ development +environment complete with Git, Python, Conan, CMake, and a C++ compiler. +This document exists to help readers set one up on any of the Big Three +platforms: Linux, macOS, or Windows. + +[BUILD.md]: ../../BUILD.md + + +## Linux + +Package ecosystems vary across Linux distributions, +so there is no one set of instructions that will work for every Linux user. +These instructions are written for Ubuntu 22.04. +They are largely copied from the [script][1] used to configure our Docker +container for continuous integration. +That script handles many more responsibilities. +These instructions are just the bare minimum to build one configuration of +rippled. +You can check that codebase for other Linux distributions and versions. +If you cannot find yours there, +then we hope that these instructions can at least guide you in the right +direction. + +``` +apt update +apt install --yes curl git libssl-dev python3.10-dev python3-pip make g++-11 + +curl --location --remote-name \ + "https://github.com/Kitware/CMake/releases/download/v3.25.1/cmake-3.25.1.tar.gz" +tar -xzf cmake-3.25.1.tar.gz +rm cmake-3.25.1.tar.gz +cd cmake-3.25.1 +./bootstrap --parallel=$(nproc) +make --jobs $(nproc) +make install +cd .. + +pip3 install 'conan<2' +``` + +[1]: https://github.com/thejohnfreeman/rippled-docker/blob/master/ubuntu-22.04/install.sh + + +## macOS + +Open a Terminal and enter the below command to bring up a dialog to install +the command line developer tools. +Once it is finished, this command should return a version greater than the +minimum required (see [BUILD.md][]). + +``` +clang --version +``` + +The command line developer tools should include Git too: + +``` +git --version +``` + +Install [Homebrew][], +use it to install [pyenv][], +use it to install Python, +and use it to install Conan: + +[Homebrew]: https://brew.sh/ +[pyenv]: https://github.com/pyenv/pyenv + +``` +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +brew update +brew install pyenv +pyenv install 3.10-dev +pyenv global 3.10-dev +eval "$(pyenv init -)" +pip install 'conan<2' +``` + +Install CMake with Homebrew too: + +``` +brew install cmake +``` diff --git a/docs/build/install.md b/docs/build/install.md new file mode 100644 index 00000000000..1bdc456b6d8 --- /dev/null +++ b/docs/build/install.md @@ -0,0 +1,13 @@ +## From source + +From a source build, you can install rippled and libxrpl using CMake's +`--install` mode: + +``` +cmake --install . --prefix /opt/local +``` + +The default [prefix][1] is typically `/usr/local` on Linux and macOS and +`C:/Program Files/rippled` on Windows. + +[1]: https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html From d7725837f5517069984833d3bf7aca559a0d38c9 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 22 Mar 2023 19:21:03 -0500 Subject: [PATCH 134/508] build: add interface library libxrpl: (#4449) Make it easy for projects to depend on libxrpl by adding an `ALIAS` target named `xrpl::libxrpl` for projects to link. The name was chosen because: * The current library target is named `xrpl_core`. There is no other "non-core" library target against which we need to distinguish the "core" library. We only export one library target, and it should just be named after the project to keep things simple and predictable. * Underscores in target or library names are generally discouraged. * Every target exported in CMake should be prefixed with the project name. By adding an `ALIAS` target, existing consumers who use the `xrpl_core` target will not be affected. * In the future, there can be a migration plan to make `xrpl_core` the `ALIAS` target (and `libxrpl` the "real" target, which will affect the filename of the compiled binary), and eventually remove it entirely. Also: * Fix the Conan recipe so that consumers using Conan import a target named `xrpl::libxrpl`. This way, every consumer can use the same instructions. * Document the two easiest methods to depend on libxrpl. Both have been tested. * See #4443. --- Builds/CMake/RippledCore.cmake | 4 ++ conanfile.py | 13 +++-- docs/build/depend.md | 98 ++++++++++++++++++++++++++++++++++ 3 files changed, 111 insertions(+), 4 deletions(-) create mode 100644 docs/build/depend.md diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index c1d1e21d4d2..c34b40392fa 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -13,6 +13,10 @@ if (unity) set_target_properties(xrpl_core PROPERTIES UNITY_BUILD ON) endif () +add_library(libxrpl INTERFACE) +target_link_libraries(libxrpl INTERFACE xrpl_core) +add_library(xrpl::libxrpl ALIAS libxrpl) + #[===============================[ beast/legacy FILES: diff --git a/conanfile.py b/conanfile.py index 51e8d7d553e..6e616b3c9f4 100644 --- a/conanfile.py +++ b/conanfile.py @@ -1,4 +1,4 @@ -from conans import ConanFile +from conan import ConanFile from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout import re @@ -108,7 +108,9 @@ def requirements(self): if self.options.rocksdb: self.requires('rocksdb/6.27.3') - exports_sources = 'CMakeLists.txt', 'Builds/CMake/*', 'src/*', 'cfg/*' + exports_sources = ( + 'CMakeLists.txt', 'Builds/*', 'bin/getRippledInfo', 'src/*', 'cfg/*' + ) def layout(self): cmake_layout(self) @@ -142,8 +144,11 @@ def package(self): cmake.install() def package_info(self): - self.cpp_info.libs = [ + libxrpl = self.cpp_info.components['libxrpl'] + libxrpl.libs = [ 'libxrpl_core.a', - 'libed25519-donna.a', + 'libed25519.a', 'libsecp256k1.a', ] + libxrpl.includedirs = ['include'] + libxrpl.requires = ['boost::boost'] diff --git a/docs/build/depend.md b/docs/build/depend.md new file mode 100644 index 00000000000..42fd41a26e8 --- /dev/null +++ b/docs/build/depend.md @@ -0,0 +1,98 @@ +We recommend two different methods to depend on libxrpl in your own [CMake][] +project. +Both methods add a CMake library target named `xrpl::libxrpl`. + + +## Conan requirement + +The first method adds libxrpl as a [Conan][] requirement. +With this method, there is no need for a Git [submodule][]. +It is good for when you just need a dependency on libxrpl as-is. + +``` +# This conanfile.txt is just an example. +[requires] +xrpl/1.10.0 + +[generators] +CMakeDeps +CMakeToolchain +``` + +``` +# If you want to depend on a version of libxrpl that is not in ConanCenter, +# then you can export the recipe from the rippled project. +conan export +``` + +```cmake +# Find and link the library in your CMake project. +find_package(xrpl) +target_link_libraries( PUBLIC xrpl::libxrpl) +``` + +``` +# Download, build, and connect dependencies with Conan. +mkdir .build +cd .build +mkdir -p build/generators +conan install \ + --install-folder build/generators \ + --build missing \ + --settings build_type=Release \ + .. +cmake \ + -DCMAKE_TOOLCHAIN_FILE=build/generators/conan_toolchain.cmake \ + -DCMAKE_BUILD_TYPE=Release \ + .. +cmake --build . --parallel +``` + + +## CMake subdirectory + +The second method adds the [rippled][] project as a CMake +[subdirectory][add_subdirectory]. +This method works well when you keep the rippled project as a Git +[submodule][]. +It's good for when you want to make changes to libxrpl as part of your own +project. +Be careful, though. +Your project will inherit all of the same CMake options, +so watch out for name collisions. +We still recommend using [Conan][] to download, build, and connect dependencies. + +``` +# Add the project as a Git submodule. +mkdir submodules +git submodule add https://github.com/XRPLF/rippled.git submodules/rippled +``` + +```cmake +# Add and link the library in your CMake project. +add_subdirectory(submodules/rippled) +target_link_libraries( PUBLIC xrpl::libxrpl) +``` + +``` +# Download, build, and connect dependencies with Conan. +mkdir .build +cd .build +conan install \ + --output-folder . \ + --build missing \ + --settings build_type=Release \ + ../submodules/rippled +cmake \ + -DCMAKE_TOOLCHAIN_FILE=build/generators/conan_toolchain.cmake \ + -DCMAKE_BUILD_TYPE=Release \ + .. +cmake --build . --parallel +``` + + +[add_subdirectory]: https://cmake.org/cmake/help/latest/command/add_subdirectory.html +[submodule]: https://git-scm.com/book/en/v2/Git-Tools-Submodules +[rippled]: https://github.com/ripple/rippled +[Conan]: https://docs.conan.io/ +[CMake]: https://cmake.org/cmake/help/latest/ From dffcdea12b564734f789a637b70cd2e347862d67 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Thu, 23 Mar 2023 17:32:17 -0700 Subject: [PATCH 135/508] fix: `Expected` to return a value: (#4401) Fix a case where `ripple::Expected` returned a json array, not a value. The problem was that `Expected` invoked the wrong constructor for the expected type, which resulted in a constructor that took multiple arguments being interpreted as an array. A proposed fix was provided by @godexsoft, which involved a minor adjustment to three constructors that replaces the use of curly braces with parentheses. This makes `Expected` usable for [Clio](https://github.com/XRPLF/clio). A unit test is also included to ensure that the issue doesn't occur again in the future. --- src/ripple/basics/Expected.h | 6 +++--- src/test/basics/Expected_test.cpp | 13 +++++++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/ripple/basics/Expected.h b/src/ripple/basics/Expected.h index 8dc368eefd7..bb699579b17 100644 --- a/src/ripple/basics/Expected.h +++ b/src/ripple/basics/Expected.h @@ -137,14 +137,14 @@ class [[nodiscard]] Expected public: template requires std::convertible_to constexpr Expected(U && r) - : Base(T{std::forward(r)}) + : Base(T(std::forward(r))) { } template requires std::convertible_to && (!std::is_reference_v)constexpr Expected(Unexpected e) - : Base(E{std::move(e.value())}) + : Base(E(std::move(e.value()))) { } @@ -220,7 +220,7 @@ class [[nodiscard]] Expected template requires std::convertible_to && (!std::is_reference_v)constexpr Expected(Unexpected e) - : Base(E{std::move(e.value())}) + : Base(E(std::move(e.value()))) { } diff --git a/src/test/basics/Expected_test.cpp b/src/test/basics/Expected_test.cpp index 1f16e724de6..b89b9f6d309 100644 --- a/src/test/basics/Expected_test.cpp +++ b/src/test/basics/Expected_test.cpp @@ -20,6 +20,9 @@ #include #include #include +#if BOOST_VERSION >= 107500 +#include // Not part of boost before version 1.75 +#endif // BOOST_VERSION #include #include @@ -203,6 +206,16 @@ struct Expected_test : beast::unit_test::suite std::string const s(std::move(expected.error())); BEAST_EXPECT(s == "Not what is expected!"); } + // Test a case that previously unintentionally returned an array. +#if BOOST_VERSION >= 107500 + { + auto expected = []() -> Expected { + return boost::json::object{{"oops", "me array now"}}; + }(); + BEAST_EXPECT(expected); + BEAST_EXPECT(!expected.value().is_array()); + } +#endif // BOOST_VERSION } }; From 7aad6e51278a90fdb743b45d6b5df94a03d90d31 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Thu, 23 Mar 2023 22:28:53 -0700 Subject: [PATCH 136/508] feat: mark 4 amendments as obsolete: (#4291) Add the ability to mark amendments as obsolete. There are some known amendments that should not be voted for because they are broken (or similar reasons). This commit marks four amendments as obsolete: 1. `CryptoConditionsSuite` 2. `NonFungibleTokensV1` 3. `fixNFTokenDirV1` 4. `fixNFTokenNegOffer` When an amendment is `Obsolete`, voting for the amendment is prevented. A determined operator can still vote for the amendment by changing the source, and doing so does not break any protocol rules. The "feature" command now does not modify the vote for obsolete amendments. Before this change, there were two options for an amendment's `DefaultVote` behavior: yes and no. After this change, there are three options for an amendment's `VoteBehavior`: DefaultYes, DefaultNo, and Obsolete. To be clear, if an obsolete amendment were to (somehow) be activated by consensus, the server still has the code to process transactions according to that amendment, and would not be amendment blocked. It would function the same as if it had been voting "no" on the amendment. Resolves #4014. Incorporates review feedback from @scottschurr. --- src/ripple/app/misc/AmendmentTable.h | 4 +- src/ripple/app/misc/impl/AmendmentTable.cpp | 41 ++++- src/ripple/app/rdb/Wallet.h | 2 +- src/ripple/protocol/Feature.h | 14 +- src/ripple/protocol/impl/Feature.cpp | 133 +++++++------- src/test/app/AmendmentTable_test.cpp | 170 +++++++++++++----- src/test/rpc/Feature_test.cpp | 184 ++++++++++++++++---- 7 files changed, 385 insertions(+), 163 deletions(-) diff --git a/src/ripple/app/misc/AmendmentTable.h b/src/ripple/app/misc/AmendmentTable.h index 14558b7da51..0a5f6a011ad 100644 --- a/src/ripple/app/misc/AmendmentTable.h +++ b/src/ripple/app/misc/AmendmentTable.h @@ -40,14 +40,14 @@ class AmendmentTable struct FeatureInfo { FeatureInfo() = delete; - FeatureInfo(std::string const& n, uint256 const& f, DefaultVote v) + FeatureInfo(std::string const& n, uint256 const& f, VoteBehavior v) : name(n), feature(f), vote(v) { } std::string const name; uint256 const feature; - DefaultVote const vote; + VoteBehavior const vote; }; virtual ~AmendmentTable() = default; diff --git a/src/ripple/app/misc/impl/AmendmentTable.cpp b/src/ripple/app/misc/impl/AmendmentTable.cpp index 93113af800e..6f9ea86fa6c 100644 --- a/src/ripple/app/misc/impl/AmendmentTable.cpp +++ b/src/ripple/app/misc/impl/AmendmentTable.cpp @@ -333,19 +333,31 @@ AmendmentTableImpl::AmendmentTableImpl( }(); // Parse supported amendments - for (auto const& [name, amendment, defaultVote] : supported) + for (auto const& [name, amendment, votebehavior] : supported) { AmendmentState& s = add(amendment, lock); s.name = name; s.supported = true; - s.vote = defaultVote == DefaultVote::yes ? AmendmentVote::up - : AmendmentVote::down; + switch (votebehavior) + { + case VoteBehavior::DefaultYes: + s.vote = AmendmentVote::up; + break; + + case VoteBehavior::DefaultNo: + s.vote = AmendmentVote::down; + break; + + case VoteBehavior::Obsolete: + s.vote = AmendmentVote::obsolete; + break; + } JLOG(j_.debug()) << "Amendment " << amendment << " (" << s.name << ") is supported and will be " << (s.vote == AmendmentVote::up ? "up" : "down") - << " voted if not enabled on the ledger."; + << " voted by default if not enabled on the ledger."; } hash_set detect_conflict; @@ -420,7 +432,9 @@ AmendmentTableImpl::AmendmentTableImpl( << amend_hash << "} is downvoted."; if (!amendment_name->empty()) s->name = *amendment_name; - s->vote = *vote; + // An obsolete amendment's vote can never be changed + if (s->vote != AmendmentVote::obsolete) + s->vote = *vote; } } else // up-vote @@ -431,7 +445,9 @@ AmendmentTableImpl::AmendmentTableImpl( << amend_hash << "} is upvoted."; if (!amendment_name->empty()) s.name = *amendment_name; - s.vote = *vote; + // An obsolete amendment's vote can never be changed + if (s.vote != AmendmentVote::obsolete) + s.vote = *vote; } }); } @@ -489,6 +505,7 @@ AmendmentTableImpl::persistVote( std::string const& name, AmendmentVote vote) const { + assert(vote != AmendmentVote::obsolete); auto db = db_.checkoutDb(); voteAmendment(*db, amendment, name, vote); } @@ -499,7 +516,7 @@ AmendmentTableImpl::veto(uint256 const& amendment) std::lock_guard lock(mutex_); AmendmentState& s = add(amendment, lock); - if (s.vote == AmendmentVote::down) + if (s.vote != AmendmentVote::up) return false; s.vote = AmendmentVote::down; persistVote(amendment, s.name, s.vote); @@ -512,7 +529,7 @@ AmendmentTableImpl::unVeto(uint256 const& amendment) std::lock_guard lock(mutex_); AmendmentState* const s = get(amendment, lock); - if (!s || s->vote == AmendmentVote::up) + if (!s || s->vote != AmendmentVote::down) return false; s->vote = AmendmentVote::up; persistVote(amendment, s->name, s->vote); @@ -734,7 +751,13 @@ AmendmentTableImpl::injectJson( v[jss::name] = fs.name; v[jss::supported] = fs.supported; - v[jss::vetoed] = fs.vote == AmendmentVote::down; + if (!fs.enabled) + { + if (fs.vote == AmendmentVote::obsolete) + v[jss::vetoed] = "Obsolete"; + else + v[jss::vetoed] = fs.vote == AmendmentVote::down; + } v[jss::enabled] = fs.enabled; if (!fs.enabled && lastVote_) diff --git a/src/ripple/app/rdb/Wallet.h b/src/ripple/app/rdb/Wallet.h index 2769e459acc..e9846714ece 100644 --- a/src/ripple/app/rdb/Wallet.h +++ b/src/ripple/app/rdb/Wallet.h @@ -144,7 +144,7 @@ createFeatureVotes(soci::session& session); // For historical reasons the up-vote and down-vote integer representations // are unintuitive. -enum class AmendmentVote : int { up = 0, down = 1 }; +enum class AmendmentVote : int { obsolete = -1, up = 0, down = 1 }; /** * @brief readAmendments Reads all amendments from the FeatureVotes table. diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 62dc327d98d..e4b0e3d4acd 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -36,17 +36,17 @@ * for the feature at the bottom * 2) Add a uint256 definition for the feature to the corresponding source * file (Feature.cpp). Use `registerFeature` to create the feature with - * the feature's name, `Supported::no`, and `DefaultVote::no`. This + * the feature's name, `Supported::no`, and `VoteBehavior::DefaultNo`. This * should be the only place the feature's name appears in code as a string. * 3) Use the uint256 as the parameter to `view.rules.enabled()` to * control flow into new code that this feature limits. * 4) If the feature development is COMPLETE, and the feature is ready to be * SUPPORTED, change the `registerFeature` parameter to Supported::yes. * 5) When the feature is ready to be ENABLED, change the `registerFeature` - * parameter to `DefaultVote::yes`. + * parameter to `VoteBehavior::DefaultYes`. * In general, any newly supported amendments (`Supported::yes`) should have - * a `DefaultVote::no` for at least one full release cycle. High priority - * bug fixes can be an exception to this rule of thumb. + * a `VoteBehavior::DefaultNo` for at least one full release cycle. High + * priority bug fixes can be an exception to this rule of thumb. * * When a feature has been enabled for several years, the conditional code * may be removed, and the feature "retired". To retire a feature: @@ -55,7 +55,7 @@ * section at the end of the file. * 3) CHANGE the name of the variable to start with "retired". * 4) CHANGE the parameters of the `registerFeature` call to `Supported::yes` - * and `DefaultVote::no`. + * and `VoteBehavior::DefaultNo`. * The feature must remain registered and supported indefinitely because it * still exists in the ledger, but there is no need to vote for it because * there's nothing to vote for. If it is removed completely from the code, any @@ -66,7 +66,7 @@ namespace ripple { -enum class DefaultVote : bool { no = false, yes }; +enum class VoteBehavior : int { Obsolete = -1, DefaultNo = 0, DefaultYes }; namespace detail { @@ -79,7 +79,7 @@ static constexpr std::size_t numFeatures = 58; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated ledger */ -std::map const& +std::map const& supportedAmendments(); /** Amendments that this server won't vote for by default. diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index d15c3fc60ff..6b8e7719d00 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -67,9 +67,10 @@ enum class Supported : bool { no = false, yes }; // updated. // // Generally, amendments which introduce new features should be set as -// "DefaultVote::no" whereas in rare cases, amendments that fix critical -// bugs should be set as "DefaultVote::yes", if off-chain consensus is -// reached amongst reviewers, validator operators, and other participants. +// "VoteBehavior::DefaultNo" whereas in rare cases, amendments that fix +// critical bugs should be set as "VoteBehavior::DefaultYes", if off-chain +// consensus is reached amongst reviewers, validator operators, and other +// participants. class FeatureCollections { @@ -115,7 +116,7 @@ class FeatureCollections // name, index, and uint256 feature identifier boost::multi_index::multi_index_container features; - std::map supported; + std::map supported; std::size_t upVotes = 0; std::size_t downVotes = 0; mutable std::atomic readOnly = false; @@ -163,7 +164,7 @@ class FeatureCollections registerFeature( std::string const& name, Supported support, - DefaultVote vote); + VoteBehavior vote); /** Tell FeatureCollections when registration is complete. */ bool @@ -181,7 +182,7 @@ class FeatureCollections /** Amendments that this server supports. Whether they are enabled depends on the Rules defined in the validated ledger */ - std::map const& + std::map const& supportedAmendments() const { return supported; @@ -230,11 +231,11 @@ uint256 FeatureCollections::registerFeature( std::string const& name, Supported support, - DefaultVote vote) + VoteBehavior vote) { check(!readOnly, "Attempting to register a feature after startup."); check( - support == Supported::yes || vote == DefaultVote::no, + support == Supported::yes || vote == VoteBehavior::DefaultNo, "Invalid feature parameters. Must be supported to be up-voted."); Feature const* i = getByName(name); if (!i) @@ -254,7 +255,7 @@ FeatureCollections::registerFeature( { supported.emplace(name, vote); - if (vote == DefaultVote::yes) + if (vote == VoteBehavior::DefaultYes) ++upVotes; else ++downVotes; @@ -315,7 +316,7 @@ static FeatureCollections featureCollections; /** Amendments that this server supports. Whether they are enabled depends on the Rules defined in the validated ledger */ -std::map const& +std::map const& detail::supportedAmendments() { return featureCollections.supportedAmendments(); @@ -344,7 +345,7 @@ getRegisteredFeature(std::string const& name) } uint256 -registerFeature(std::string const& name, Supported support, DefaultVote vote) +registerFeature(std::string const& name, Supported support, VoteBehavior vote) { return featureCollections.registerFeature(name, support, vote); } @@ -354,7 +355,7 @@ registerFeature(std::string const& name, Supported support, DefaultVote vote) uint256 retireFeature(std::string const& name) { - return registerFeature(name, Supported::yes, DefaultVote::no); + return registerFeature(name, Supported::yes, VoteBehavior::Obsolete); } /** Tell FeatureCollections when registration is complete. */ @@ -390,9 +391,9 @@ Takes the name of a feature, whether it's supported, and the default vote. Will register the feature, and create a variable whose name is "feature" plus the feature name. */ -#define REGISTER_FEATURE(fName, supported, defaultvote) \ - uint256 const feature##fName = \ - registerFeature(#fName, supported, defaultvote) +#define REGISTER_FEATURE(fName, supported, votebehavior) \ + uint256 const feature##fName = \ + registerFeature(#fName, supported, votebehavior) #pragma push_macro("REGISTER_FIX") #undef REGISTER_FIX @@ -402,59 +403,71 @@ Takes the name of a feature, whether it's supported, and the default vote. Will register the feature, and create a variable whose name is the unmodified feature name. */ -#define REGISTER_FIX(fName, supported, defaultvote) \ - uint256 const fName = registerFeature(#fName, supported, defaultvote) +#define REGISTER_FIX(fName, supported, votebehavior) \ + uint256 const fName = registerFeature(#fName, supported, votebehavior) // clang-format off // All known amendments must be registered either here or below with the // "retired" amendments -REGISTER_FEATURE(OwnerPaysFee, Supported::no, DefaultVote::no); -REGISTER_FEATURE(Flow, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(FlowCross, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(CryptoConditionsSuite, Supported::yes, DefaultVote::no); -REGISTER_FIX (fix1513, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(DepositAuth, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(Checks, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fix1571, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fix1543, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fix1623, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(DepositPreauth, Supported::yes, DefaultVote::yes); +REGISTER_FEATURE(OwnerPaysFee, Supported::no, VoteBehavior::DefaultNo); +REGISTER_FEATURE(Flow, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(FlowCross, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fix1513, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(DepositAuth, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(Checks, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fix1571, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fix1543, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fix1623, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(DepositPreauth, Supported::yes, VoteBehavior::DefaultYes); // Use liquidity from strands that consume max offers, but mark as dry -REGISTER_FIX (fix1515, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fix1578, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(MultiSignReserve, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fixTakerDryOfferRemoval, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fixMasterKeyAsRegularKey, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fixCheckThreading, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fixPayChanRecipientOwnerDir, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(DeletableAccounts, Supported::yes, DefaultVote::yes); +REGISTER_FIX (fix1515, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fix1578, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(MultiSignReserve, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixTakerDryOfferRemoval, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixMasterKeyAsRegularKey, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixCheckThreading, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixPayChanRecipientOwnerDir, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(DeletableAccounts, Supported::yes, VoteBehavior::DefaultYes); // fixQualityUpperBound should be activated before FlowCross -REGISTER_FIX (fixQualityUpperBound, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(RequireFullyCanonicalSig, Supported::yes, DefaultVote::yes); +REGISTER_FIX (fixQualityUpperBound, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(RequireFullyCanonicalSig, Supported::yes, VoteBehavior::DefaultYes); // fix1781: XRPEndpointSteps should be included in the circular payment check -REGISTER_FIX (fix1781, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(HardenedValidations, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fixAmendmentMajorityCalc, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(NegativeUNL, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(TicketBatch, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(FlowSortStrands, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fixSTAmountCanonicalize, Supported::yes, DefaultVote::yes); -REGISTER_FIX (fixRmSmallIncreasedQOffers, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(CheckCashMakesTrustLine, Supported::yes, DefaultVote::no); -REGISTER_FEATURE(NonFungibleTokensV1, Supported::yes, DefaultVote::no); -REGISTER_FEATURE(ExpandedSignerList, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixNFTokenDirV1, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixNFTokenNegOffer, Supported::yes, DefaultVote::no); -REGISTER_FEATURE(NonFungibleTokensV1_1, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixTrustLinesToSelf, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, DefaultVote::yes); -REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, DefaultVote::no); -REGISTER_FEATURE(DisallowIncoming, Supported::yes, DefaultVote::no); -REGISTER_FEATURE(XRPFees, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixUniversalNumber, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixNonFungibleTokensV1_2, Supported::yes, DefaultVote::no); -REGISTER_FIX (fixNFTokenRemint, Supported::yes, DefaultVote::no); +REGISTER_FIX (fix1781, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(HardenedValidations, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixAmendmentMajorityCalc, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(NegativeUNL, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(TicketBatch, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(FlowSortStrands, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixSTAmountCanonicalize, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FIX (fixRmSmallIncreasedQOffers, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(CheckCashMakesTrustLine, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FEATURE(ExpandedSignerList, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FEATURE(NonFungibleTokensV1_1, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixTrustLinesToSelf, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixRemoveNFTokenAutoTrustLine, Supported::yes, VoteBehavior::DefaultYes); +REGISTER_FEATURE(ImmediateOfferKilled, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FEATURE(DisallowIncoming, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FEATURE(XRPFees, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixUniversalNumber, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixNonFungibleTokensV1_2, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixNFTokenRemint, Supported::yes, VoteBehavior::DefaultNo); + +// The following amendments are obsolete, but must remain supported +// because they could potentially get enabled. +// +// Obsolete features are (usually) not in the ledger, and may have code +// controlled by the feature. They need to be supported because at some +// time in the past, the feature was supported and votable, but never +// passed. So the feature needs to be supported in case it is ever +// enabled (added to the ledger). +// +// If a feature remains obsolete for long enough that no clients are able +// to vote for it, the feature can be removed (entirely?) from the code. +REGISTER_FEATURE(CryptoConditionsSuite, Supported::yes, VoteBehavior::Obsolete); +REGISTER_FEATURE(NonFungibleTokensV1, Supported::yes, VoteBehavior::Obsolete); +REGISTER_FIX (fixNFTokenDirV1, Supported::yes, VoteBehavior::Obsolete); +REGISTER_FIX (fixNFTokenNegOffer, Supported::yes, VoteBehavior::Obsolete); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/test/app/AmendmentTable_test.cpp b/src/test/app/AmendmentTable_test.cpp index 99922e863a4..4284190a18a 100644 --- a/src/test/app/AmendmentTable_test.cpp +++ b/src/test/app/AmendmentTable_test.cpp @@ -87,45 +87,105 @@ class AmendmentTable_test final : public beast::unit_test::suite } static std::vector - makeDefaultYes(std::vector const& amendments) + makeFeatureInfo( + std::vector const& amendments, + VoteBehavior voteBehavior) { std::vector result; result.reserve(amendments.size()); for (auto const& a : amendments) { - result.emplace_back(a, amendmentId(a), DefaultVote::yes); + result.emplace_back(a, amendmentId(a), voteBehavior); } return result; } + static std::vector + makeDefaultYes(std::vector const& amendments) + { + return makeFeatureInfo(amendments, VoteBehavior::DefaultYes); + } + static std::vector makeDefaultYes(uint256 const amendment) { std::vector result{ - {to_string(amendment), amendment, DefaultVote::yes}}; + {to_string(amendment), amendment, VoteBehavior::DefaultYes}}; return result; } + static std::vector + makeDefaultNo(std::vector const& amendments) + { + return makeFeatureInfo(amendments, VoteBehavior::DefaultNo); + } + + static std::vector + makeObsolete(std::vector const& amendments) + { + return makeFeatureInfo(amendments, VoteBehavior::Obsolete); + } + + template + static size_t + totalsize(std::vector const& src, Args const&... args) + { + if constexpr (sizeof...(args) > 0) + return src.size() + totalsize(args...); + return src.size(); + } + + template + static void + combine_arg( + std::vector& dest, + std::vector const& src, + Args const&... args) + { + assert(dest.capacity() >= dest.size() + src.size()); + std::copy(src.begin(), src.end(), std::back_inserter(dest)); + if constexpr (sizeof...(args) > 0) + combine_arg(dest, args...); + } + + template + static std::vector + combine( + // Pass "left" by value. The values will need to be copied one way or + // another, so just reuse it. + std::vector left, + std::vector const& right, + Args const&... args) + { + left.reserve(totalsize(left, right, args...)); + + combine_arg(left, right, args...); + + return left; + } + // All useful amendments are supported amendments. // Enabled amendments are typically a subset of supported amendments. // Vetoed amendments should be supported but not enabled. // Unsupported amendments may be added to the AmendmentTable. - std::vector const supportedYes_{ - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", - "l", "m", "n", "o", "p", "q", "r", "s", "t", "u"}; + std::vector const + yes_{"g", "i", "k", "m", "o", "q", "r", "s", "t", "u"}; std::vector const enabled_{"b", "d", "f", "h", "j", "l", "n", "p"}; std::vector const vetoed_{"a", "c", "e"}; + std::vector const obsolete_{"0", "1", "2"}; + std::vector const allSupported_{ + combine(yes_, enabled_, vetoed_, obsolete_)}; std::vector const unsupported_{"v", "w", "x"}; std::vector const unsupportedMajority_{"y", "z"}; - Section const emptySection; - std::vector const emptyYes; + Section const emptySection_; + std::vector const emptyYes_; - test::SuiteJournal journal; + test::SuiteJournal journal_; public: - AmendmentTable_test() : journal("AmendmentTable_test", *this) + AmendmentTable_test() : journal_("AmendmentTable_test", *this) { } @@ -138,7 +198,7 @@ class AmendmentTable_test final : public beast::unit_test::suite Section const& vetoed) { return make_AmendmentTable( - app, majorityTime, supported, enabled, vetoed, journal); + app, majorityTime, supported, enabled, vetoed, journal_); } std::unique_ptr @@ -155,10 +215,20 @@ class AmendmentTable_test final : public beast::unit_test::suite std::unique_ptr makeTable(test::jtx::Env& env, std::chrono::seconds majorityTime) { + static std::vector const supported = + combine( + makeDefaultYes(yes_), + // Use non-intuitive default votes for "enabled_" and "vetoed_" + // so that when the tests later explicitly enable or veto them, + // we can be certain that they are not simply going by their + // default vote setting. + makeDefaultNo(enabled_), + makeDefaultYes(vetoed_), + makeObsolete(obsolete_)); return makeTable( env.app(), majorityTime, - makeDefaultYes(supportedYes_), + supported, makeSection(enabled_), makeSection(vetoed_)); } @@ -170,17 +240,22 @@ class AmendmentTable_test final : public beast::unit_test::suite test::jtx::Env env{*this, makeConfig()}; auto table = makeTable(env, weeks(1)); - for (auto const& a : supportedYes_) - { + for (auto const& a : allSupported_) + BEAST_EXPECT(table->isSupported(amendmentId(a))); + + for (auto const& a : yes_) BEAST_EXPECT(table->isSupported(amendmentId(a))); - } for (auto const& a : enabled_) + BEAST_EXPECT(table->isSupported(amendmentId(a))); + + for (auto const& a : vetoed_) { BEAST_EXPECT(table->isSupported(amendmentId(a))); + BEAST_EXPECT(!table->isEnabled(amendmentId(a))); } - for (auto const& a : vetoed_) + for (auto const& a : obsolete_) { BEAST_EXPECT(table->isSupported(amendmentId(a))); BEAST_EXPECT(!table->isEnabled(amendmentId(a))); @@ -195,13 +270,14 @@ class AmendmentTable_test final : public beast::unit_test::suite test::jtx::Env env{*this, makeConfig()}; auto table = makeTable(env, weeks(1)); - for (auto const& a : supportedYes_) + for (auto const& a : yes_) BEAST_EXPECT(table->find(a) == amendmentId(a)); for (auto const& a : enabled_) BEAST_EXPECT(table->find(a) == amendmentId(a)); - for (auto const& a : vetoed_) BEAST_EXPECT(table->find(a) == amendmentId(a)); + for (auto const& a : obsolete_) + BEAST_EXPECT(table->find(a) == amendmentId(a)); for (auto const& a : unsupported_) BEAST_EXPECT(!table->find(a)); for (auto const& a : unsupportedMajority_) @@ -228,7 +304,7 @@ class AmendmentTable_test final : public beast::unit_test::suite void testBadConfig() { - auto const yesVotes = makeDefaultYes(supportedYes_); + auto const yesVotes = makeDefaultYes(yes_); auto const section = makeSection(vetoed_); auto const id = to_string(amendmentId(enabled_[0])); @@ -241,7 +317,7 @@ class AmendmentTable_test final : public beast::unit_test::suite try { test::jtx::Env env{*this, makeConfig()}; - if (makeTable(env, weeks(2), yesVotes, test, emptySection)) + if (makeTable(env, weeks(2), yesVotes, test, emptySection_)) fail("Accepted only amendment ID"); } catch (std::exception const& e) @@ -258,7 +334,7 @@ class AmendmentTable_test final : public beast::unit_test::suite try { test::jtx::Env env{*this, makeConfig()}; - if (makeTable(env, weeks(2), yesVotes, test, emptySection)) + if (makeTable(env, weeks(2), yesVotes, test, emptySection_)) fail("Accepted extra arguments"); } catch (std::exception const& e) @@ -279,7 +355,7 @@ class AmendmentTable_test final : public beast::unit_test::suite try { test::jtx::Env env{*this, makeConfig()}; - if (makeTable(env, weeks(2), yesVotes, test, emptySection)) + if (makeTable(env, weeks(2), yesVotes, test, emptySection_)) fail("Accepted short amendment ID"); } catch (std::exception const& e) @@ -299,7 +375,7 @@ class AmendmentTable_test final : public beast::unit_test::suite try { test::jtx::Env env{*this, makeConfig()}; - if (makeTable(env, weeks(2), yesVotes, test, emptySection)) + if (makeTable(env, weeks(2), yesVotes, test, emptySection_)) fail("Accepted long amendment ID"); } catch (std::exception const& e) @@ -320,7 +396,7 @@ class AmendmentTable_test final : public beast::unit_test::suite try { test::jtx::Env env{*this, makeConfig()}; - if (makeTable(env, weeks(2), yesVotes, test, emptySection)) + if (makeTable(env, weeks(2), yesVotes, test, emptySection_)) fail("Accepted non-hex amendment ID"); } catch (std::exception const& e) @@ -339,7 +415,7 @@ class AmendmentTable_test final : public beast::unit_test::suite test::jtx::Env env{*this, makeConfig()}; std::unique_ptr table = makeTable(env, weeks(2)); - // Note which entries are enabled + // Note which entries are enabled (convert the amendment names to IDs) std::set allEnabled; for (auto const& a : enabled_) allEnabled.insert(amendmentId(a)); @@ -351,7 +427,7 @@ class AmendmentTable_test final : public beast::unit_test::suite BEAST_EXPECT(!table->hasUnsupportedEnabled()); // Verify all enables are enabled and nothing else. - for (std::string const& a : supportedYes_) + for (std::string const& a : yes_) { uint256 const supportedID = amendmentId(a); bool const enabled = table->isEnabled(supportedID); @@ -375,7 +451,7 @@ class AmendmentTable_test final : public beast::unit_test::suite // Unveto an amendment that is already not vetoed. Shouldn't // hurt anything, but the values returned by getDesired() // shouldn't change. - BEAST_EXPECT(!table->unVeto(amendmentId(supportedYes_[1]))); + BEAST_EXPECT(!table->unVeto(amendmentId(yes_[1]))); BEAST_EXPECT(desired == table->getDesired()); } @@ -391,7 +467,7 @@ class AmendmentTable_test final : public beast::unit_test::suite } // Veto all supported amendments. Now desired should be empty. - for (std::string const& a : supportedYes_) + for (std::string const& a : allSupported_) { table->veto(amendmentId(a)); } @@ -533,7 +609,7 @@ class AmendmentTable_test final : public beast::unit_test::suite test::jtx::Env env{*this}; auto table = - makeTable(env, weeks(2), emptyYes, emptySection, emptySection); + makeTable(env, weeks(2), emptyYes_, emptySection_, emptySection_); std::vector> votes; std::vector ourVotes; @@ -594,7 +670,11 @@ class AmendmentTable_test final : public beast::unit_test::suite test::jtx::Env env{*this}; auto table = makeTable( - env, weeks(2), emptyYes, emptySection, makeSection(testAmendment)); + env, + weeks(2), + emptyYes_, + emptySection_, + makeSection(testAmendment)); auto const validators = makeValidators(10); @@ -653,11 +733,7 @@ class AmendmentTable_test final : public beast::unit_test::suite test::jtx::Env env{*this}; auto table = makeTable( - env, - weeks(2), - makeDefaultYes(supportedYes_), - emptySection, - emptySection); + env, weeks(2), makeDefaultYes(yes_), emptySection_, emptySection_); auto const validators = makeValidators(10); std::vector> votes; @@ -675,13 +751,13 @@ class AmendmentTable_test final : public beast::unit_test::suite ourVotes, enabled, majority); - BEAST_EXPECT(ourVotes.size() == supportedYes_.size()); + BEAST_EXPECT(ourVotes.size() == yes_.size()); BEAST_EXPECT(enabled.empty()); - for (auto const& i : supportedYes_) + for (auto const& i : yes_) BEAST_EXPECT(majority.find(amendmentId(i)) == majority.end()); // Now, everyone votes for this feature - for (auto const& i : supportedYes_) + for (auto const& i : yes_) votes.emplace_back(amendmentId(i), validators.size()); // Week 2: We should recognize a majority @@ -694,10 +770,10 @@ class AmendmentTable_test final : public beast::unit_test::suite ourVotes, enabled, majority); - BEAST_EXPECT(ourVotes.size() == supportedYes_.size()); + BEAST_EXPECT(ourVotes.size() == yes_.size()); BEAST_EXPECT(enabled.empty()); - for (auto const& i : supportedYes_) + for (auto const& i : yes_) BEAST_EXPECT(majority[amendmentId(i)] == weekTime(weeks{2})); // Week 5: We should enable the amendment @@ -710,7 +786,7 @@ class AmendmentTable_test final : public beast::unit_test::suite ourVotes, enabled, majority); - BEAST_EXPECT(enabled.size() == supportedYes_.size()); + BEAST_EXPECT(enabled.size() == yes_.size()); // Week 6: We should remove it from our votes and from having a majority doRound( @@ -722,9 +798,9 @@ class AmendmentTable_test final : public beast::unit_test::suite ourVotes, enabled, majority); - BEAST_EXPECT(enabled.size() == supportedYes_.size()); + BEAST_EXPECT(enabled.size() == yes_.size()); BEAST_EXPECT(ourVotes.empty()); - for (auto const& i : supportedYes_) + for (auto const& i : yes_) BEAST_EXPECT(majority.find(amendmentId(i)) == majority.end()); } @@ -740,8 +816,8 @@ class AmendmentTable_test final : public beast::unit_test::suite env, weeks(2), makeDefaultYes(testAmendment), - emptySection, - emptySection); + emptySection_, + emptySection_); auto const validators = makeValidators(16); @@ -811,8 +887,8 @@ class AmendmentTable_test final : public beast::unit_test::suite env, weeks(8), makeDefaultYes(testAmendment), - emptySection, - emptySection); + emptySection_, + emptySection_); std::set enabled; majorityAmendments_t majority; diff --git a/src/test/rpc/Feature_test.cpp b/src/test/rpc/Feature_test.cpp index 1ba4d865d44..dcd95c8a968 100644 --- a/src/test/rpc/Feature_test.cpp +++ b/src/test/rpc/Feature_test.cpp @@ -31,25 +31,33 @@ class Feature_test : public beast::unit_test::suite { testcase("internals"); - std::map const& supported = + std::map const& supported = ripple::detail::supportedAmendments(); BEAST_EXPECT( supported.size() == ripple::detail::numDownVotedAmendments() + ripple::detail::numUpVotedAmendments()); - std::size_t up = 0, down = 0; - for (std::pair const& amendment : + std::size_t up = 0, down = 0, obsolete = 0; + for (std::pair const& amendment : supported) { - if (amendment.second == DefaultVote::no) - ++down; - else + switch (amendment.second) { - if (BEAST_EXPECT(amendment.second == DefaultVote::yes)) + case VoteBehavior::DefaultYes: ++up; + break; + case VoteBehavior::DefaultNo: + ++down; + break; + case VoteBehavior::Obsolete: + ++obsolete; + break; + default: + fail("Unknown VoteBehavior", __FILE__, __LINE__); } } - BEAST_EXPECT(down == ripple::detail::numDownVotedAmendments()); + BEAST_EXPECT( + down + obsolete == ripple::detail::numDownVotedAmendments()); BEAST_EXPECT(up == ripple::detail::numUpVotedAmendments()); } @@ -105,7 +113,7 @@ class Feature_test : public beast::unit_test::suite using namespace test::jtx; Env env{*this}; - std::map const& votes = + std::map const& votes = ripple::detail::supportedAmendments(); auto jrr = env.rpc("feature")[jss::result]; @@ -118,15 +126,26 @@ class Feature_test : public beast::unit_test::suite // default config - so all should be disabled, and // supported. Some may be vetoed. bool expectVeto = - !(votes.at(feature[jss::name].asString()) == DefaultVote::yes); + (votes.at(feature[jss::name].asString()) == + VoteBehavior::DefaultNo); + bool expectObsolete = + (votes.at(feature[jss::name].asString()) == + VoteBehavior::Obsolete); BEAST_EXPECTS( - !feature[jss::enabled].asBool(), + feature.isMember(jss::enabled) && + !feature[jss::enabled].asBool(), feature[jss::name].asString() + " enabled"); BEAST_EXPECTS( - feature[jss::vetoed].asBool() == expectVeto, + feature.isMember(jss::vetoed) && + feature[jss::vetoed].isBool() == !expectObsolete && + (!feature[jss::vetoed].isBool() || + feature[jss::vetoed].asBool() == expectVeto) && + (feature[jss::vetoed].isBool() || + feature[jss::vetoed].asString() == "Obsolete"), feature[jss::name].asString() + " vetoed"); BEAST_EXPECTS( - feature[jss::supported].asBool(), + feature.isMember(jss::supported) && + feature[jss::supported].asBool(), feature[jss::name].asString() + " supported"); } } @@ -150,7 +169,9 @@ class Feature_test : public beast::unit_test::suite BEAST_EXPECTS(feature[jss::name] == "MultiSignReserve", "name"); BEAST_EXPECTS(!feature[jss::enabled].asBool(), "enabled"); - BEAST_EXPECTS(!feature[jss::vetoed].asBool(), "vetoed"); + BEAST_EXPECTS( + feature[jss::vetoed].isBool() && !feature[jss::vetoed].asBool(), + "vetoed"); BEAST_EXPECTS(feature[jss::supported].asBool(), "supported"); // feature names are case-sensitive - expect error here @@ -200,7 +221,7 @@ class Feature_test : public beast::unit_test::suite Env env{ *this, FeatureBitset(featureDepositAuth, featureDepositPreauth)}; - std::map const& votes = + std::map const& votes = ripple::detail::supportedAmendments(); auto jrr = env.rpc("feature")[jss::result]; @@ -218,15 +239,31 @@ class Feature_test : public beast::unit_test::suite bool expectSupported = env.app().getAmendmentTable().isSupported(id); bool expectVeto = - !(votes.at((*it)[jss::name].asString()) == DefaultVote::yes); + (votes.at((*it)[jss::name].asString()) == + VoteBehavior::DefaultNo); + bool expectObsolete = + (votes.at((*it)[jss::name].asString()) == + VoteBehavior::Obsolete); BEAST_EXPECTS( - (*it)[jss::enabled].asBool() == expectEnabled, + (*it).isMember(jss::enabled) && + (*it)[jss::enabled].asBool() == expectEnabled, (*it)[jss::name].asString() + " enabled"); + if (expectEnabled) + BEAST_EXPECTS( + !(*it).isMember(jss::vetoed), + (*it)[jss::name].asString() + " vetoed"); + else + BEAST_EXPECTS( + (*it).isMember(jss::vetoed) && + (*it)[jss::vetoed].isBool() == !expectObsolete && + (!(*it)[jss::vetoed].isBool() || + (*it)[jss::vetoed].asBool() == expectVeto) && + ((*it)[jss::vetoed].isBool() || + (*it)[jss::vetoed].asString() == "Obsolete"), + (*it)[jss::name].asString() + " vetoed"); BEAST_EXPECTS( - (*it)[jss::vetoed].asBool() == expectVeto, - (*it)[jss::name].asString() + " vetoed"); - BEAST_EXPECTS( - (*it)[jss::supported].asBool() == expectSupported, + (*it).isMember(jss::supported) && + (*it)[jss::supported].asBool() == expectSupported, (*it)[jss::name].asString() + " supported"); } } @@ -282,7 +319,7 @@ class Feature_test : public beast::unit_test::suite // There should be at least 5 amendments. Don't do exact comparison // to avoid maintenance as more amendments are added in the future. BEAST_EXPECT(majorities.size() >= 5); - std::map const& votes = + std::map const& votes = ripple::detail::supportedAmendments(); jrr = env.rpc("feature")[jss::result]; @@ -293,13 +330,22 @@ class Feature_test : public beast::unit_test::suite if (!BEAST_EXPECT(feature.isMember(jss::name))) return; bool expectVeto = - !(votes.at(feature[jss::name].asString()) == DefaultVote::yes); + (votes.at(feature[jss::name].asString()) == + VoteBehavior::DefaultNo); + bool expectObsolete = + (votes.at(feature[jss::name].asString()) == + VoteBehavior::Obsolete); BEAST_EXPECTS( - expectVeto ^ feature.isMember(jss::majority), + (expectVeto || expectObsolete) ^ + feature.isMember(jss::majority), feature[jss::name].asString() + " majority"); BEAST_EXPECTS( feature.isMember(jss::vetoed) && - feature[jss::vetoed].asBool() == expectVeto, + feature[jss::vetoed].isBool() == !expectObsolete && + (!feature[jss::vetoed].isBool() || + feature[jss::vetoed].asBool() == expectVeto) && + (feature[jss::vetoed].isBool() || + feature[jss::vetoed].asString() == "Obsolete"), feature[jss::name].asString() + " vetoed"); BEAST_EXPECTS( feature.isMember(jss::count), @@ -310,11 +356,13 @@ class Feature_test : public beast::unit_test::suite BEAST_EXPECTS( feature.isMember(jss::validations), feature[jss::name].asString() + " validations"); - BEAST_EXPECT(feature[jss::count] == (expectVeto ? 0 : 1)); + BEAST_EXPECT( + feature[jss::count] == + ((expectVeto || expectObsolete) ? 0 : 1)); BEAST_EXPECT(feature[jss::threshold] == 1); BEAST_EXPECT(feature[jss::validations] == 1); BEAST_EXPECTS( - expectVeto || feature[jss::majority] == 2540, + expectVeto || expectObsolete || feature[jss::majority] == 2540, "Majority: " + feature[jss::majority].asString()); } } @@ -326,39 +374,100 @@ class Feature_test : public beast::unit_test::suite using namespace test::jtx; Env env{*this, FeatureBitset(featureMultiSignReserve)}; + constexpr const char* featureName = "MultiSignReserve"; - auto jrr = env.rpc("feature", "MultiSignReserve")[jss::result]; + auto jrr = env.rpc("feature", featureName)[jss::result]; if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) return; jrr.removeMember(jss::status); if (!BEAST_EXPECT(jrr.size() == 1)) return; auto feature = *(jrr.begin()); - BEAST_EXPECTS(feature[jss::name] == "MultiSignReserve", "name"); - BEAST_EXPECTS(!feature[jss::vetoed].asBool(), "vetoed"); + BEAST_EXPECTS(feature[jss::name] == featureName, "name"); + BEAST_EXPECTS( + feature[jss::vetoed].isBool() && !feature[jss::vetoed].asBool(), + "vetoed"); - jrr = env.rpc("feature", "MultiSignReserve", "reject")[jss::result]; + jrr = env.rpc("feature", featureName, "reject")[jss::result]; if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) return; jrr.removeMember(jss::status); if (!BEAST_EXPECT(jrr.size() == 1)) return; feature = *(jrr.begin()); - BEAST_EXPECTS(feature[jss::name] == "MultiSignReserve", "name"); - BEAST_EXPECTS(feature[jss::vetoed].asBool(), "vetoed"); + BEAST_EXPECTS(feature[jss::name] == featureName, "name"); + BEAST_EXPECTS( + feature[jss::vetoed].isBool() && feature[jss::vetoed].asBool(), + "vetoed"); - jrr = env.rpc("feature", "MultiSignReserve", "accept")[jss::result]; + jrr = env.rpc("feature", featureName, "accept")[jss::result]; if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) return; jrr.removeMember(jss::status); if (!BEAST_EXPECT(jrr.size() == 1)) return; feature = *(jrr.begin()); - BEAST_EXPECTS(feature[jss::name] == "MultiSignReserve", "name"); - BEAST_EXPECTS(!feature[jss::vetoed].asBool(), "vetoed"); + BEAST_EXPECTS(feature[jss::name] == featureName, "name"); + BEAST_EXPECTS( + feature[jss::vetoed].isBool() && !feature[jss::vetoed].asBool(), + "vetoed"); + + // anything other than accept or reject is an error + jrr = env.rpc("feature", featureName, "maybe"); + BEAST_EXPECT(jrr[jss::error] == "invalidParams"); + BEAST_EXPECT(jrr[jss::error_message] == "Invalid parameters."); + } + + void + testObsolete() + { + testcase("Obsolete"); + + using namespace test::jtx; + Env env{*this}; + constexpr const char* featureName = "NonFungibleTokensV1"; + + auto jrr = env.rpc("feature", featureName)[jss::result]; + if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) + return; + jrr.removeMember(jss::status); + if (!BEAST_EXPECT(jrr.size() == 1)) + return; + auto feature = *(jrr.begin()); + BEAST_EXPECTS(feature[jss::name] == featureName, "name"); + BEAST_EXPECTS( + feature[jss::vetoed].isString() && + feature[jss::vetoed].asString() == "Obsolete", + "vetoed"); + + jrr = env.rpc("feature", featureName, "reject")[jss::result]; + if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) + return; + jrr.removeMember(jss::status); + if (!BEAST_EXPECT(jrr.size() == 1)) + return; + feature = *(jrr.begin()); + BEAST_EXPECTS(feature[jss::name] == featureName, "name"); + BEAST_EXPECTS( + feature[jss::vetoed].isString() && + feature[jss::vetoed].asString() == "Obsolete", + "vetoed"); + + jrr = env.rpc("feature", featureName, "accept")[jss::result]; + if (!BEAST_EXPECTS(jrr[jss::status] == jss::success, "status")) + return; + jrr.removeMember(jss::status); + if (!BEAST_EXPECT(jrr.size() == 1)) + return; + feature = *(jrr.begin()); + BEAST_EXPECTS(feature[jss::name] == featureName, "name"); + BEAST_EXPECTS( + feature[jss::vetoed].isString() && + feature[jss::vetoed].asString() == "Obsolete", + "vetoed"); // anything other than accept or reject is an error - jrr = env.rpc("feature", "MultiSignReserve", "maybe"); + jrr = env.rpc("feature", featureName, "maybe"); BEAST_EXPECT(jrr[jss::error] == "invalidParams"); BEAST_EXPECT(jrr[jss::error_message] == "Invalid parameters."); } @@ -376,6 +485,7 @@ class Feature_test : public beast::unit_test::suite testSomeEnabled(); testWithMajorities(); testVeto(); + testObsolete(); } }; From 66627b26cfae8e1c902546f0778dd9b013aedc5a Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 28 Mar 2023 09:03:25 -0700 Subject: [PATCH 137/508] Refactor fee initialization and configuration: (#4319) * Create the FeeSettings object in genesis ledger. * Initialize with default values from the config. Removes the need to pass a Config down into the Ledger initialization functions, including setup(). * Drop the undocumented fee config settings in favor of the [voting] section. * Fix #3734. * If you previously used fee_account_reserve and/or fee_owner_reserve, you should change to using the [voting] section instead. Example: ``` [voting] account_reserve=10000000 owner_reserve=2000000 ``` * Because old Mainnet ledgers (prior to 562177 - yes, I looked it up) don't have FeeSettings, some of the other ctors will default them to the config values before setup() tries to load the object. * Update default Config fee values to match Mainnet. * Fix unit tests: * Updated fees: Some tests are converted to use computed values of fee object, but the default Env config was also updated to fix the rest. * Unit tests that check the structure of the ledger have updated hashes and counts. --- cfg/rippled-example.cfg | 4 +- cfg/rippled-reporting.cfg | 4 +- src/ripple/app/ledger/Ledger.cpp | 68 +++++++++++---- src/ripple/app/ledger/Ledger.h | 10 ++- src/ripple/app/ledger/impl/BuildLedger.cpp | 6 +- src/ripple/app/ledger/impl/InboundLedger.cpp | 15 +++- src/ripple/app/main/Application.cpp | 17 +++- src/ripple/app/misc/FeeVote.h | 24 +---- src/ripple/app/misc/FeeVoteImpl.cpp | 28 +----- src/ripple/app/reporting/ReportingETL.cpp | 5 +- src/ripple/core/Config.h | 27 +++++- src/ripple/core/ConfigSections.h | 2 - src/ripple/core/impl/Config.cpp | 32 +++++-- src/ripple/ledger/ReadView.h | 3 + src/ripple/ledger/impl/ReadView.cpp | 6 ++ src/ripple/nodestore/impl/Shard.cpp | 5 +- src/ripple/protocol/Rules.h | 8 ++ src/ripple/protocol/SystemParameters.h | 4 + src/ripple/protocol/impl/Rules.cpp | 13 +++ src/ripple/rpc/impl/TransactionSign.cpp | 2 +- src/test/app/FeeVote_test.cpp | 2 +- src/test/app/LedgerHistory_test.cpp | 3 +- src/test/app/LedgerLoad_test.cpp | 4 +- src/test/app/LoadFeeTrack_test.cpp | 6 +- src/test/app/NFToken_test.cpp | 13 +-- src/test/app/Offer_test.cpp | 9 +- src/test/app/RCLValidations_test.cpp | 3 +- src/test/app/TxQ_test.cpp | 24 ++--- src/test/jtx/impl/envconfig.cpp | 11 ++- src/test/rpc/LedgerClosed_test.cpp | 4 +- src/test/rpc/LedgerData_test.cpp | 2 +- src/test/rpc/LedgerRPC_test.cpp | 30 +++---- src/test/rpc/LedgerRequestRPC_test.cpp | 92 ++++++++------------ src/test/rpc/ReportingETL_test.cpp | 2 +- 34 files changed, 289 insertions(+), 199 deletions(-) diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index 02ef02e920f..0a669313066 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -1441,7 +1441,7 @@ # default. Don't change this without understanding the consequences. # # Example: -# account_reserve = 20000000 # 20 XRP +# account_reserve = 10000000 # 10 XRP # # owner_reserve = # @@ -1453,7 +1453,7 @@ # default. Don't change this without understanding the consequences. # # Example: -# owner_reserve = 5000000 # 5 XRP +# owner_reserve = 2000000 # 2 XRP # #------------------------------------------------------------------------------- # diff --git a/cfg/rippled-reporting.cfg b/cfg/rippled-reporting.cfg index 7e69d76f4f5..dbafdd497fa 100644 --- a/cfg/rippled-reporting.cfg +++ b/cfg/rippled-reporting.cfg @@ -1401,7 +1401,7 @@ # default. Don't change this without understanding the consequences. # # Example: -# account_reserve = 20000000 # 20 XRP +# account_reserve = 10000000 # 10 XRP # # owner_reserve = # @@ -1413,7 +1413,7 @@ # default. Don't change this without understanding the consequences. # # Example: -# owner_reserve = 5000000 # 5 XRP +# owner_reserve = 2000000 # 2 XRP # #------------------------------------------------------------------------------- # diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 7552f755c6e..4de01cc909e 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -209,8 +209,34 @@ Ledger::Ledger( rawInsert(sle); } + { + auto sle = std::make_shared(keylet::fees()); + // Whether featureXRPFees is supported will depend on startup options. + if (std::find(amendments.begin(), amendments.end(), featureXRPFees) != + amendments.end()) + { + sle->at(sfBaseFeeDrops) = config.FEES.reference_fee; + sle->at(sfReserveBaseDrops) = config.FEES.account_reserve; + sle->at(sfReserveIncrementDrops) = config.FEES.owner_reserve; + } + else + { + if (auto const f = + config.FEES.reference_fee.dropsAs()) + sle->at(sfBaseFee) = *f; + if (auto const f = + config.FEES.account_reserve.dropsAs()) + sle->at(sfReserveBase) = *f; + if (auto const f = + config.FEES.owner_reserve.dropsAs()) + sle->at(sfReserveIncrement) = *f; + sle->at(sfReferenceFeeUnits) = Config::FEE_UNITS_DEPRECATED; + } + rawInsert(sle); + } + stateMap_->flushDirty(hotACCOUNT_NODE); - setImmutable(config); + setImmutable(); } Ledger::Ledger( @@ -259,7 +285,8 @@ Ledger::Ledger( txMap_->setImmutable(); stateMap_->setImmutable(); - if (!setup(config)) + defaultFees(config); + if (!setup()) loaded = false; if (!loaded) @@ -329,11 +356,12 @@ Ledger::Ledger( info_.seq = ledgerSeq; info_.closeTime = closeTime; info_.closeTimeResolution = ledgerDefaultTimeResolution; - setup(config); + defaultFees(config); + setup(); } void -Ledger::setImmutable(Config const& config, bool rehash) +Ledger::setImmutable(bool rehash) { // Force update, since this is the only // place the hash transitions to valid @@ -349,15 +377,14 @@ Ledger::setImmutable(Config const& config, bool rehash) mImmutable = true; txMap_->setImmutable(); stateMap_->setImmutable(); - setup(config); + setup(); } void Ledger::setAccepted( NetClock::time_point closeTime, NetClock::duration closeResolution, - bool correctCloseTime, - Config const& config) + bool correctCloseTime) { // Used when we witnessed the consensus. assert(!open()); @@ -365,7 +392,7 @@ Ledger::setAccepted( info_.closeTime = closeTime; info_.closeTimeResolution = closeResolution; info_.closeFlags = correctCloseTime ? 0 : sLCF_NoConsensusTime; - setImmutable(config); + setImmutable(); } bool @@ -587,13 +614,13 @@ Ledger::rawTxInsertWithHash( } bool -Ledger::setup(Config const& config) +Ledger::setup() { bool ret = true; try { - rules_ = makeRulesGivenLedger(*this, config.features); + rules_ = makeRulesGivenLedger(*this, rules_); } catch (SHAMapMissingNode const&) { @@ -604,10 +631,6 @@ Ledger::setup(Config const& config) Rethrow(); } - fees_.base = config.FEE_DEFAULT; - fees_.reserve = config.FEE_ACCOUNT_RESERVE; - fees_.increment = config.FEE_OWNER_RESERVE; - try { if (auto const sle = read(keylet::fees())) @@ -667,6 +690,18 @@ Ledger::setup(Config const& config) return ret; } +void +Ledger::defaultFees(Config const& config) +{ + assert(fees_.base == 0 && fees_.reserve == 0 && fees_.increment == 0); + if (fees_.base == 0) + fees_.base = config.FEES.reference_fee; + if (fees_.reserve == 0) + fees_.reserve = config.FEES.account_reserve; + if (fees_.increment == 0) + fees_.increment = config.FEES.owner_reserve; +} + std::shared_ptr Ledger::peek(Keylet const& k) const { @@ -1071,7 +1106,10 @@ finishLoadByIndexOrHash( if (!ledger) return; - ledger->setImmutable(config); + assert( + ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || + ledger->read(keylet::fees())); + ledger->setImmutable(); JLOG(j.trace()) << "Loaded ledger: " << to_string(ledger->info().hash); diff --git a/src/ripple/app/ledger/Ledger.h b/src/ripple/app/ledger/Ledger.h index caf68b3eac8..0b0830df9d9 100644 --- a/src/ripple/app/ledger/Ledger.h +++ b/src/ripple/app/ledger/Ledger.h @@ -266,11 +266,10 @@ class Ledger final : public std::enable_shared_from_this, setAccepted( NetClock::time_point closeTime, NetClock::duration closeResolution, - bool correctCloseTime, - Config const& config); + bool correctCloseTime); void - setImmutable(Config const& config, bool rehash = true); + setImmutable(bool rehash = true); bool isImmutable() const @@ -395,7 +394,10 @@ class Ledger final : public std::enable_shared_from_this, class txs_iter_impl; bool - setup(Config const& config); + setup(); + + void + defaultFees(Config const& config); bool mImmutable; diff --git a/src/ripple/app/ledger/impl/BuildLedger.cpp b/src/ripple/app/ledger/impl/BuildLedger.cpp index f70b754ab7c..87960d16eac 100644 --- a/src/ripple/app/ledger/impl/BuildLedger.cpp +++ b/src/ripple/app/ledger/impl/BuildLedger.cpp @@ -75,8 +75,10 @@ buildLedgerImpl( built->unshare(); // Accept ledger - built->setAccepted( - closeTime, closeResolution, closeTimeCorrect, app.config()); + assert( + built->info().seq < XRP_LEDGER_EARLIEST_FEES || + built->read(keylet::fees())); + built->setAccepted(closeTime, closeResolution, closeTimeCorrect); return built; } diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index 3ecba97b199..af3ba8a7a9b 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -155,7 +155,10 @@ InboundLedger::init(ScopedLockType& collectionLock) JLOG(journal_.debug()) << "Acquiring ledger we already have in " << " local store. " << hash_; - mLedger->setImmutable(app_.config()); + assert( + mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES || + mLedger->read(keylet::fees())); + mLedger->setImmutable(); if (mReason == Reason::HISTORY || mReason == Reason::SHARD) return; @@ -416,7 +419,10 @@ InboundLedger::tryDB(NodeStore::Database& srcDB) { JLOG(journal_.debug()) << "Had everything locally"; complete_ = true; - mLedger->setImmutable(app_.config()); + assert( + mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES || + mLedger->read(keylet::fees())); + mLedger->setImmutable(); } } @@ -513,7 +519,10 @@ InboundLedger::done() if (complete_ && !failed_ && mLedger) { - mLedger->setImmutable(app_.config()); + assert( + mLedger->info().seq < XRP_LEDGER_EARLIEST_FEES || + mLedger->read(keylet::fees())); + mLedger->setImmutable(); switch (mReason) { case Reason::SHARD: diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index dce11bc38f0..16781ac09d4 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -1699,7 +1699,7 @@ ApplicationImp::fdRequired() const void ApplicationImp::startGenesisLedger() { - std::vector initialAmendments = + std::vector const initialAmendments = (config_->START_UP == Config::FRESH) ? m_amendmentTable->getDesired() : std::vector{}; @@ -1710,7 +1710,10 @@ ApplicationImp::startGenesisLedger() auto const next = std::make_shared(*genesis, timeKeeper().closeTime()); next->updateSkipList(); - next->setImmutable(*config_); + assert( + next->info().seq < XRP_LEDGER_EARLIEST_FEES || + next->read(keylet::fees())); + next->setImmutable(); openLedger_.emplace(next, cachedSLEs_, logs_->journal("OpenLedger")); m_ledgerMaster->storeLedger(next); m_ledgerMaster->switchLCL(next); @@ -1728,7 +1731,10 @@ ApplicationImp::getLastFullLedger() if (!ledger) return ledger; - ledger->setImmutable(*config_); + assert( + ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || + ledger->read(keylet::fees())); + ledger->setImmutable(); if (getLedgerMaster().haveLedger(seq)) ledger->setValidated(); @@ -1879,8 +1885,11 @@ ApplicationImp::loadLedgerFromFile(std::string const& name) loadLedger->stateMap().flushDirty(hotACCOUNT_NODE); + assert( + loadLedger->info().seq < XRP_LEDGER_EARLIEST_FEES || + loadLedger->read(keylet::fees())); loadLedger->setAccepted( - closeTime, closeTimeResolution, !closeTimeEstimated, *config_); + closeTime, closeTimeResolution, !closeTimeEstimated); return loadLedger; } diff --git a/src/ripple/app/misc/FeeVote.h b/src/ripple/app/misc/FeeVote.h index 4fff64f7de3..a90f82efb35 100644 --- a/src/ripple/app/misc/FeeVote.h +++ b/src/ripple/app/misc/FeeVote.h @@ -32,23 +32,6 @@ namespace ripple { class FeeVote { public: - /** Fee schedule to vote for. - During voting ledgers, the FeeVote logic will try to move towards - these values when injecting fee-setting transactions. - A default-constructed Setup contains recommended values. - */ - struct Setup - { - /** The cost of a reference transaction in drops. */ - XRPAmount reference_fee{10}; - - /** The account reserve requirement in drops. */ - XRPAmount account_reserve{10 * DROPS_PER_XRP}; - - /** The per-owned item reserve requirement in drops. */ - XRPAmount owner_reserve{2 * DROPS_PER_XRP}; - }; - virtual ~FeeVote() = default; /** Add local fee preference to validation. @@ -74,16 +57,13 @@ class FeeVote std::shared_ptr const& initialPosition) = 0; }; -/** Build FeeVote::Setup from a config section. */ -FeeVote::Setup -setup_FeeVote(Section const& section); - +struct FeeSetup; /** Create an instance of the FeeVote logic. @param setup The fee schedule to vote for. @param journal Where to log. */ std::unique_ptr -make_FeeVote(FeeVote::Setup const& setup, beast::Journal journal); +make_FeeVote(FeeSetup const& setup, beast::Journal journal); } // namespace ripple diff --git a/src/ripple/app/misc/FeeVoteImpl.cpp b/src/ripple/app/misc/FeeVoteImpl.cpp index 73d98fbd58a..048f5a3fc61 100644 --- a/src/ripple/app/misc/FeeVoteImpl.cpp +++ b/src/ripple/app/misc/FeeVoteImpl.cpp @@ -93,11 +93,11 @@ VotableValue::getVotes() const -> std::pair class FeeVoteImpl : public FeeVote { private: - Setup target_; + FeeSetup target_; beast::Journal const journal_; public: - FeeVoteImpl(Setup const& setup, beast::Journal journal); + FeeVoteImpl(FeeSetup const& setup, beast::Journal journal); void doValidation(Fees const& lastFees, Rules const& rules, STValidation& val) @@ -112,7 +112,7 @@ class FeeVoteImpl : public FeeVote //-------------------------------------------------------------------------- -FeeVoteImpl::FeeVoteImpl(Setup const& setup, beast::Journal journal) +FeeVoteImpl::FeeVoteImpl(FeeSetup const& setup, beast::Journal journal) : target_(setup), journal_(journal) { } @@ -335,28 +335,8 @@ FeeVoteImpl::doVoting( //------------------------------------------------------------------------------ -FeeVote::Setup -setup_FeeVote(Section const& section) -{ - FeeVote::Setup setup; - { - std::uint64_t temp; - if (set(temp, "reference_fee", section) && - temp <= std::numeric_limits::max()) - setup.reference_fee = temp; - } - { - std::uint32_t temp; - if (set(temp, "account_reserve", section)) - setup.account_reserve = temp; - if (set(temp, "owner_reserve", section)) - setup.owner_reserve = temp; - } - return setup; -} - std::unique_ptr -make_FeeVote(FeeVote::Setup const& setup, beast::Journal journal) +make_FeeVote(FeeSetup const& setup, beast::Journal journal) { return std::make_unique(setup, journal); } diff --git a/src/ripple/app/reporting/ReportingETL.cpp b/src/ripple/app/reporting/ReportingETL.cpp index 7e15d242a72..d8d6af36881 100644 --- a/src/ripple/app/reporting/ReportingETL.cpp +++ b/src/ripple/app/reporting/ReportingETL.cpp @@ -189,7 +189,10 @@ ReportingETL::flushLedger(std::shared_ptr& ledger) auto& txHash = ledger->info().txHash; auto& ledgerHash = ledger->info().hash; - ledger->setImmutable(app_.config(), false); + assert( + ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || + ledger->read(keylet::fees())); + ledger->setImmutable(false); auto start = std::chrono::system_clock::now(); auto numFlushed = ledger->stateMap().flushDirty(hotACCOUNT_NODE); diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index 1e91f49263b..e805faad989 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -61,6 +61,26 @@ enum class SizedItem : std::size_t { accountIdCacheSize, }; +/** Fee schedule for startup / standalone, and to vote for. +During voting ledgers, the FeeVote logic will try to move towards +these values when injecting fee-setting transactions. +A default-constructed Setup contains recommended values. +*/ +struct FeeSetup +{ + /** The cost of a reference transaction in drops. */ + XRPAmount reference_fee{10}; + + /** The account reserve requirement in drops. */ + XRPAmount account_reserve{10 * DROPS_PER_XRP}; + + /** The per-owned item reserve requirement in drops. */ + XRPAmount owner_reserve{2 * DROPS_PER_XRP}; + + /* (Remember to update the example cfg files when changing any of these + * values.) */ +}; + // This entire derived class is deprecated. // For new config information use the style implied // in the base class. For existing config information @@ -185,9 +205,7 @@ class Config : public BasicConfig std::optional VALIDATION_QUORUM; // validations to consider ledger authoritative - XRPAmount FEE_DEFAULT{10}; - XRPAmount FEE_ACCOUNT_RESERVE{200 * DROPS_PER_XRP}; - XRPAmount FEE_OWNER_RESERVE{50 * DROPS_PER_XRP}; + FeeSetup FEES; // Node storage configuration std::uint32_t LEDGER_HISTORY = 256; @@ -366,6 +384,9 @@ class Config : public BasicConfig const; }; +FeeSetup +setup_FeeVote(Section const& section); + } // namespace ripple #endif diff --git a/src/ripple/core/ConfigSections.h b/src/ripple/core/ConfigSections.h index ba0f209c0e9..6fd59680d8b 100644 --- a/src/ripple/core/ConfigSections.h +++ b/src/ripple/core/ConfigSections.h @@ -54,8 +54,6 @@ struct ConfigSection #define SECTION_DEBUG_LOGFILE "debug_logfile" #define SECTION_ELB_SUPPORT "elb_support" #define SECTION_FEE_DEFAULT "fee_default" -#define SECTION_FEE_ACCOUNT_RESERVE "fee_account_reserve" -#define SECTION_FEE_OWNER_RESERVE "fee_owner_reserve" #define SECTION_FETCH_DEPTH "fetch_depth" #define SECTION_HISTORICAL_SHARD_PATHS "historical_shard_paths" #define SECTION_INSIGHT "insight" diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index 12374972866..4e25552cb8b 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -611,14 +611,12 @@ Config::loadFromString(std::string const& fileContents) if (getSingleSection(secConfig, SECTION_NETWORK_QUORUM, strTemp, j_)) NETWORK_QUORUM = beast::lexicalCastThrow(strTemp); - if (getSingleSection(secConfig, SECTION_FEE_ACCOUNT_RESERVE, strTemp, j_)) - FEE_ACCOUNT_RESERVE = beast::lexicalCastThrow(strTemp); - - if (getSingleSection(secConfig, SECTION_FEE_OWNER_RESERVE, strTemp, j_)) - FEE_OWNER_RESERVE = beast::lexicalCastThrow(strTemp); - + FEES = setup_FeeVote(section("voting")); + /* [fee_default] is documented in the example config files as useful for + * things like offline transaction signing. Until that's completely + * deprecated, allow it to override the [voting] section. */ if (getSingleSection(secConfig, SECTION_FEE_DEFAULT, strTemp, j_)) - FEE_DEFAULT = beast::lexicalCastThrow(strTemp); + FEES.reference_fee = beast::lexicalCastThrow(strTemp); if (getSingleSection(secConfig, SECTION_LEDGER_HISTORY, strTemp, j_)) { @@ -1017,4 +1015,24 @@ Config::getValueFor(SizedItem item, std::optional node) const return sizedItems.at(index).second.at(node.value_or(NODE_SIZE)); } +FeeSetup +setup_FeeVote(Section const& section) +{ + FeeSetup setup; + { + std::uint64_t temp; + if (set(temp, "reference_fee", section) && + temp <= std::numeric_limits::max()) + setup.reference_fee = temp; + } + { + std::uint32_t temp; + if (set(temp, "account_reserve", section)) + setup.account_reserve = temp; + if (set(temp, "owner_reserve", section)) + setup.owner_reserve = temp; + } + return setup; +} + } // namespace ripple diff --git a/src/ripple/ledger/ReadView.h b/src/ripple/ledger/ReadView.h index fb9e37c7458..e019d602f07 100644 --- a/src/ripple/ledger/ReadView.h +++ b/src/ripple/ledger/ReadView.h @@ -356,6 +356,9 @@ getCloseAgree(LedgerInfo const& info) void addRaw(LedgerInfo const&, Serializer&, bool includeHash = false); +Rules +makeRulesGivenLedger(DigestAwareReadView const& ledger, Rules const& current); + Rules makeRulesGivenLedger( DigestAwareReadView const& ledger, diff --git a/src/ripple/ledger/impl/ReadView.cpp b/src/ripple/ledger/impl/ReadView.cpp index 57af008b47c..1ce21777297 100644 --- a/src/ripple/ledger/impl/ReadView.cpp +++ b/src/ripple/ledger/impl/ReadView.cpp @@ -65,6 +65,12 @@ ReadView::txs_type::end() const -> iterator return iterator(view_, view_->txsEnd()); } +Rules +makeRulesGivenLedger(DigestAwareReadView const& ledger, Rules const& current) +{ + return makeRulesGivenLedger(ledger, current.presets()); +} + Rules makeRulesGivenLedger( DigestAwareReadView const& ledger, diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 030fbf4aa12..14bfe487303 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -688,7 +688,10 @@ Shard::finalize(bool writeSQLite, std::optional const& referenceHash) ledger->stateMap().setLedgerSeq(ledgerSeq); ledger->txMap().setLedgerSeq(ledgerSeq); - ledger->setImmutable(config); + assert( + ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || + ledger->read(keylet::fees())); + ledger->setImmutable(); if (!ledger->stateMap().fetchRoot( SHAMapHash{ledger->info().accountHash}, nullptr)) { diff --git a/src/ripple/protocol/Rules.h b/src/ripple/protocol/Rules.h index d8190e86a71..52d01c6f589 100644 --- a/src/ripple/protocol/Rules.h +++ b/src/ripple/protocol/Rules.h @@ -56,6 +56,11 @@ class Rules private: // Allow a friend function to construct Rules. + friend Rules + makeRulesGivenLedger( + DigestAwareReadView const& ledger, + Rules const& current); + friend Rules makeRulesGivenLedger( DigestAwareReadView const& ledger, @@ -66,6 +71,9 @@ class Rules std::optional const& digest, STVector256 const& amendments); + std::unordered_set> const& + presets() const; + public: /** Returns `true` if a feature is enabled. */ bool diff --git a/src/ripple/protocol/SystemParameters.h b/src/ripple/protocol/SystemParameters.h index db0c15dcad7..bc2f7136ff9 100644 --- a/src/ripple/protocol/SystemParameters.h +++ b/src/ripple/protocol/SystemParameters.h @@ -68,6 +68,10 @@ systemCurrencyCode() /** The XRP ledger network's earliest allowed sequence */ static constexpr std::uint32_t XRP_LEDGER_EARLIEST_SEQ{32570u}; +/** The XRP Ledger mainnet's earliest ledger with a FeeSettings object. Only + * used in asserts and tests. */ +static constexpr std::uint32_t XRP_LEDGER_EARLIEST_FEES{562177u}; + /** The number of ledgers in a shard */ static constexpr std::uint32_t DEFAULT_LEDGERS_PER_SHARD{16384u}; diff --git a/src/ripple/protocol/impl/Rules.cpp b/src/ripple/protocol/impl/Rules.cpp index 35a09b85658..c8f4720bd6c 100644 --- a/src/ripple/protocol/impl/Rules.cpp +++ b/src/ripple/protocol/impl/Rules.cpp @@ -45,6 +45,12 @@ class Rules::Impl set_.insert(amendments.begin(), amendments.end()); } + std::unordered_set> const& + presets() const + { + return presets_; + } + bool enabled(uint256 const& feature) const { @@ -60,6 +66,7 @@ class Rules::Impl return true; if (!digest_ || !other.digest_) return false; + assert(presets_ == other.presets_); return *digest_ == *other.digest_; } }; @@ -77,6 +84,12 @@ Rules::Rules( { } +std::unordered_set> const& +Rules::presets() const +{ + return impl_->presets(); +} + bool Rules::enabled(uint256 const& feature) const { diff --git a/src/ripple/rpc/impl/TransactionSign.cpp b/src/ripple/rpc/impl/TransactionSign.cpp index 4cf372e6b63..c903c26f8e3 100644 --- a/src/ripple/rpc/impl/TransactionSign.cpp +++ b/src/ripple/rpc/impl/TransactionSign.cpp @@ -720,7 +720,7 @@ checkFee( } } - XRPAmount const feeDefault = config.FEE_DEFAULT; + XRPAmount const feeDefault = config.FEES.reference_fee; auto ledger = app.openLedger().current(); // Administrative and identified endpoints are exempt from local fees. diff --git a/src/test/app/FeeVote_test.cpp b/src/test/app/FeeVote_test.cpp index 90dd8fa3dfc..ad38aefb20a 100644 --- a/src/test/app/FeeVote_test.cpp +++ b/src/test/app/FeeVote_test.cpp @@ -29,7 +29,7 @@ class FeeVote_test : public beast::unit_test::suite void testSetup() { - FeeVote::Setup const defaultSetup; + FeeSetup const defaultSetup; { // defaults Section config; diff --git a/src/test/app/LedgerHistory_test.cpp b/src/test/app/LedgerHistory_test.cpp index ba4faa9da05..880cbea5980 100644 --- a/src/test/app/LedgerHistory_test.cpp +++ b/src/test/app/LedgerHistory_test.cpp @@ -81,8 +81,7 @@ class LedgerHistory_test : public beast::unit_test::suite res->setAccepted( res->info().closeTime, res->info().closeTimeResolution, - true /* close time correct*/, - env.app().config()); + true /* close time correct*/); lh.insert(res, false); return res; } diff --git a/src/test/app/LedgerLoad_test.cpp b/src/test/app/LedgerLoad_test.cpp index d78d25ea05a..2685014e474 100644 --- a/src/test/app/LedgerLoad_test.cpp +++ b/src/test/app/LedgerLoad_test.cpp @@ -82,7 +82,7 @@ class LedgerLoad_test : public beast::unit_test::suite retval.ledger = env.rpc("ledger", "current", "full")[jss::result]; BEAST_EXPECT( - retval.ledger[jss::ledger][jss::accountState].size() == 101); + retval.ledger[jss::ledger][jss::accountState].size() == 102); retval.hashes = [&] { for (auto const& it : retval.ledger[jss::ledger][jss::accountState]) @@ -193,7 +193,7 @@ class LedgerLoad_test : public beast::unit_test::suite nullptr, beast::severities::kDisabled); auto jrb = env.rpc("ledger", "current", "full")[jss::result]; - BEAST_EXPECT(jrb[jss::ledger][jss::accountState].size() == 97); + BEAST_EXPECT(jrb[jss::ledger][jss::accountState].size() == 98); BEAST_EXPECT( jrb[jss::ledger][jss::accountState].size() <= sd.ledger[jss::ledger][jss::accountState].size()); diff --git a/src/test/app/LoadFeeTrack_test.cpp b/src/test/app/LoadFeeTrack_test.cpp index cc0b1c19529..f8e73cebd16 100644 --- a/src/test/app/LoadFeeTrack_test.cpp +++ b/src/test/app/LoadFeeTrack_test.cpp @@ -35,7 +35,7 @@ class LoadFeeTrack_test : public beast::unit_test::suite { Fees const fees = [&]() { Fees f; - f.base = d.FEE_DEFAULT; + f.base = d.FEES.reference_fee; f.reserve = 200 * DROPS_PER_XRP; f.increment = 50 * DROPS_PER_XRP; return f; @@ -52,7 +52,7 @@ class LoadFeeTrack_test : public beast::unit_test::suite { Fees const fees = [&]() { Fees f; - f.base = d.FEE_DEFAULT * 10; + f.base = d.FEES.reference_fee * 10; f.reserve = 200 * DROPS_PER_XRP; f.increment = 50 * DROPS_PER_XRP; return f; @@ -69,7 +69,7 @@ class LoadFeeTrack_test : public beast::unit_test::suite { Fees const fees = [&]() { Fees f; - f.base = d.FEE_DEFAULT; + f.base = d.FEES.reference_fee; f.reserve = 200 * DROPS_PER_XRP; f.increment = 50 * DROPS_PER_XRP; return f; diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 81abee34b07..d050579b1e7 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -215,12 +215,13 @@ class NFToken_test : public beast::unit_test::suite Account const minter{"minter"}; // Fund alice and minter enough to exist, but not enough to meet - // the reserve for creating their first NFT. Account reserve for - // unit tests is 200 XRP, not 20. - env.fund(XRP(200), alice, minter); + // the reserve for creating their first NFT. + auto const acctReserve = env.current()->fees().accountReserve(0); + auto const incReserve = env.current()->fees().increment; + env.fund(acctReserve, alice, minter); env.close(); - BEAST_EXPECT(env.balance(alice) == XRP(200)); - BEAST_EXPECT(env.balance(minter) == XRP(200)); + BEAST_EXPECT(env.balance(alice) == acctReserve); + BEAST_EXPECT(env.balance(minter) == acctReserve); BEAST_EXPECT(ownerCount(env, alice) == 0); BEAST_EXPECT(ownerCount(env, minter) == 0); @@ -233,7 +234,7 @@ class NFToken_test : public beast::unit_test::suite BEAST_EXPECT(burnedCount(env, alice) == 0); // Pay alice almost enough to make the reserve for an NFT page. - env(pay(env.master, alice, XRP(50) + drops(9))); + env(pay(env.master, alice, incReserve + drops(9))); env.close(); // A lambda that checks alice's ownerCount, mintedCount, and diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 1162612b733..0d1a4326440 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -2079,7 +2079,8 @@ class Offer_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::node][sfBalance.fieldName][jss::value] == "100"); jrr = ledgerEntryRoot(env, alice); BEAST_EXPECT( - jrr[jss::node][sfBalance.fieldName] == XRP(350).value().getText()); + jrr[jss::node][sfBalance.fieldName] == + STAmount(env.current()->fees().accountReserve(3)).getText()); jrr = ledgerEntryState(env, bob, gw1, "USD"); BEAST_EXPECT(jrr[jss::node][sfBalance.fieldName][jss::value] == "-400"); @@ -2176,7 +2177,8 @@ class Offer_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::node][sfBalance.fieldName][jss::value] == "-100"); jrr = ledgerEntryRoot(env, alice); BEAST_EXPECT( - jrr[jss::node][sfBalance.fieldName] == XRP(250).value().getText()); + jrr[jss::node][sfBalance.fieldName] == + STAmount(env.current()->fees().accountReserve(1)).getText()); jrr = ledgerEntryState(env, bob, gw, "USD"); BEAST_EXPECT(jrr[jss::node][sfBalance.fieldName][jss::value] == "-400"); @@ -2219,7 +2221,8 @@ class Offer_test : public beast::unit_test::suite BEAST_EXPECT(jrr[jss::node][sfBalance.fieldName][jss::value] == "-200"); jrr = ledgerEntryRoot(env, alice); BEAST_EXPECT( - jrr[jss::node][sfBalance.fieldName] == XRP(250).value().getText()); + jrr[jss::node][sfBalance.fieldName] == + STAmount(env.current()->fees().accountReserve(1)).getText()); jrr = ledgerEntryState(env, bob, gw, "USD"); BEAST_EXPECT(jrr[jss::node][sfBalance.fieldName][jss::value] == "-300"); diff --git a/src/test/app/RCLValidations_test.cpp b/src/test/app/RCLValidations_test.cpp index 14a54a1492f..0380795a8ae 100644 --- a/src/test/app/RCLValidations_test.cpp +++ b/src/test/app/RCLValidations_test.cpp @@ -106,9 +106,10 @@ class RCLValidations_test : public beast::unit_test::suite *prev, env.app().timeKeeper().closeTime()); // Force a different hash on the first iteration next->updateSkipList(); + BEAST_EXPECT(next->read(keylet::fees())); if (forceHash) { - next->setImmutable(config); + next->setImmutable(); forceHash = false; } diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index e25c9f60de1..8bf359e101c 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -1448,7 +1448,7 @@ class TxQ1_test : public beast::unit_test::suite // These tests may change if TxQ ordering is changed using namespace std::string_literals; BEAST_EXPECTS( - aliceSeq + 1 == env.seq(alice), + aliceSeq == env.seq(alice), "alice: "s + std::to_string(aliceSeq) + ", " + std::to_string(env.seq(alice))); BEAST_EXPECTS( @@ -1460,7 +1460,7 @@ class TxQ1_test : public beast::unit_test::suite "charlie: "s + std::to_string(charlieSeq) + ", " + std::to_string(env.seq(charlie))); BEAST_EXPECTS( - dariaSeq == env.seq(daria), + dariaSeq + 1 == env.seq(daria), "daria: "s + std::to_string(dariaSeq) + ", " + std::to_string(env.seq(daria))); BEAST_EXPECTS( @@ -1472,24 +1472,24 @@ class TxQ1_test : public beast::unit_test::suite "fred: "s + std::to_string(fredSeq) + ", " + std::to_string(env.seq(fred))); BEAST_EXPECTS( - gwenSeq + 1 == env.seq(gwen), + gwenSeq == env.seq(gwen), "gwen: "s + std::to_string(gwenSeq) + ", " + std::to_string(env.seq(gwen))); BEAST_EXPECTS( - hankSeq == env.seq(hank), + hankSeq + 1 == env.seq(hank), "hank: "s + std::to_string(hankSeq) + ", " + std::to_string(env.seq(hank))); // Which sequences get incremented may change if TxQ ordering is // changed - ++aliceSeq; + //++aliceSeq; ++bobSeq; ++(++charlieSeq); - // ++dariaSeq; + ++dariaSeq; ++elmoSeq; // ++fredSeq; - ++gwenSeq; - // ++hankSeq; + //++gwenSeq; + ++hankSeq; auto getTxsQueued = [&]() { auto const txs = env.app().getTxQ().getTxs(); @@ -2939,12 +2939,12 @@ class TxQ1_test : public beast::unit_test::suite // Verify that nothing can be added now that the gap is filled. env(noop(alice), seq(aliceSeq + 20), ter(telCAN_NOT_QUEUE_FULL)); - // Close ledger 6. That removes 6 of alice's transactions, - // but alice adds one more transaction at seq(aliceSeq + 20) so - // we only see a reduction by 5. + // Close ledger 6. That removes some of alice's transactions, + // but alice adds some more transaction(s) so expectedCount + // may not reduce to 8. env.close(); checkMetrics(__LINE__, env, 9, 50, 6, 5, 256); - BEAST_EXPECT(env.seq(alice) == aliceSeq + 15); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 13); // Close ledger 7. That should remove 7 more of alice's transactions. env.close(); diff --git a/src/test/jtx/impl/envconfig.cpp b/src/test/jtx/impl/envconfig.cpp index f34a444fa5f..77c43f5e40a 100644 --- a/src/test/jtx/impl/envconfig.cpp +++ b/src/test/jtx/impl/envconfig.cpp @@ -17,9 +17,11 @@ */ //============================================================================== +#include + #include #include -#include +#include namespace ripple { namespace test { @@ -40,6 +42,13 @@ setupConfigForUnitTests(Config& cfg) std::string port_rpc = std::to_string(port_base + 1); std::string port_ws = std::to_string(port_base + 2); + using namespace jtx; + // Default fees to old values, so tests don't have to worry about changes in + // Config.h + cfg.FEES.reference_fee = 10; + cfg.FEES.account_reserve = XRP(200).value().xrp().drops(); + cfg.FEES.owner_reserve = XRP(50).value().xrp().drops(); + cfg.overwrite(ConfigSection::nodeDatabase(), "type", "memory"); cfg.overwrite(ConfigSection::nodeDatabase(), "path", "main"); cfg.deprecatedClearSection(ConfigSection::importNodeDatabase()); diff --git a/src/test/rpc/LedgerClosed_test.cpp b/src/test/rpc/LedgerClosed_test.cpp index c5073db328e..2f81031f85c 100644 --- a/src/test/rpc/LedgerClosed_test.cpp +++ b/src/test/rpc/LedgerClosed_test.cpp @@ -37,7 +37,7 @@ class LedgerClosed_test : public beast::unit_test::suite auto lc_result = env.rpc("ledger_closed")[jss::result]; BEAST_EXPECT( lc_result[jss::ledger_hash] == - "A15F7FBE0B06286915D971BF9802C9431CD7DE40E2AC7D07C409EDB1C0715C60"); + "CCC3B3E88CCAC17F1BE6B4A648A55999411F19E3FE55EB721960EB0DF28EDDA5"); BEAST_EXPECT(lc_result[jss::ledger_index] == 2); env.close(); @@ -52,7 +52,7 @@ class LedgerClosed_test : public beast::unit_test::suite lc_result = env.rpc("ledger_closed")[jss::result]; BEAST_EXPECT( lc_result[jss::ledger_hash] == - "2E81FC6EC0DD943197E0C7E3FBE9AE307F2775F2F7485BB37307984C3C0F2340"); + "E86DE7F3D7A4D9CE17EF7C8BA08A8F4D8F643B9552F0D895A31CDA78F541DE4E"); BEAST_EXPECT(lc_result[jss::ledger_index] == 3); } diff --git a/src/test/rpc/LedgerData_test.cpp b/src/test/rpc/LedgerData_test.cpp index 1c55e907062..ab520181c05 100644 --- a/src/test/rpc/LedgerData_test.cpp +++ b/src/test/rpc/LedgerData_test.cpp @@ -123,7 +123,7 @@ class LedgerData_test : public beast::unit_test::suite jrr[jss::ledger_current_index].isIntegral() && jrr[jss::ledger_current_index].asInt() > 0); BEAST_EXPECT(!jrr.isMember(jss::marker)); - BEAST_EXPECT(checkArraySize(jrr[jss::state], num_accounts + 3)); + BEAST_EXPECT(checkArraySize(jrr[jss::state], num_accounts + 4)); } void diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 5494a81da63..6644e15e959 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -237,7 +237,7 @@ class LedgerRPC_test : public beast::unit_test::suite env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::ledger].isMember(jss::accountState)); BEAST_EXPECT(jrr[jss::ledger][jss::accountState].isArray()); - BEAST_EXPECT(jrr[jss::ledger][jss::accountState].size() == 2u); + BEAST_EXPECT(jrr[jss::ledger][jss::accountState].size() == 3u); } void @@ -276,7 +276,7 @@ class LedgerRPC_test : public beast::unit_test::suite env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::ledger].isMember(jss::accountState)); BEAST_EXPECT(jrr[jss::ledger][jss::accountState].isArray()); - BEAST_EXPECT(jrr[jss::ledger][jss::accountState].size() == 2u); + BEAST_EXPECT(jrr[jss::ledger][jss::accountState].size() == 3u); } void @@ -1315,11 +1315,12 @@ class LedgerRPC_test : public beast::unit_test::suite } { + std::string const hash3{ + "E86DE7F3D7A4D9CE17EF7C8BA08A8F4D" + "8F643B9552F0D895A31CDA78F541DE4E"}; // access via the ledger_hash field Json::Value jvParams; - jvParams[jss::ledger_hash] = - "2E81FC6EC0DD943197E0C7E3FBE9AE30" - "7F2775F2F7485BB37307984C3C0F2340"; + jvParams[jss::ledger_hash] = hash3; auto jrr = env.rpc( "json", "ledger", @@ -1328,11 +1329,8 @@ class LedgerRPC_test : public beast::unit_test::suite BEAST_EXPECT(jrr.isMember(jss::ledger_hash)); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "3"); - // extra leading hex chars in hash will be ignored - jvParams[jss::ledger_hash] = - "DEADBEEF" - "2E81FC6EC0DD943197E0C7E3FBE9AE30" - "7F2775F2F7485BB37307984C3C0F2340"; + // extra leading hex chars in hash are not allowed + jvParams[jss::ledger_hash] = "DEADBEEF" + hash3; jrr = env.rpc( "json", "ledger", @@ -1535,7 +1533,7 @@ class LedgerRPC_test : public beast::unit_test::suite env.close(); jrr = env.rpc("json", "ledger", to_string(jv))[jss::result]; - const std::string txid1 = [&]() { + const std::string txid0 = [&]() { auto const& parentHash = env.current()->info().parentHash; if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) { @@ -1579,22 +1577,22 @@ class LedgerRPC_test : public beast::unit_test::suite if (BEAST_EXPECT(jrr[jss::queue_data].size() == 2)) { auto const& parentHash = env.current()->info().parentHash; - auto const txid0 = [&]() { - auto const& txj = jrr[jss::queue_data][0u]; + auto const txid1 = [&]() { + auto const& txj = jrr[jss::queue_data][1u]; BEAST_EXPECT(txj[jss::account] == alice.human()); BEAST_EXPECT(txj[jss::fee_level] == "256"); BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); BEAST_EXPECT(txj.isMember(jss::tx)); return txj[jss::tx].asString(); }(); - auto const& txj = jrr[jss::queue_data][1u]; + auto const& txj = jrr[jss::queue_data][0u]; BEAST_EXPECT(txj[jss::account] == alice.human()); BEAST_EXPECT(txj[jss::fee_level] == "256"); BEAST_EXPECT(txj["preflight_result"] == "tesSUCCESS"); BEAST_EXPECT(txj["retries_remaining"] == 9); BEAST_EXPECT(txj["last_result"] == "terPRE_SEQ"); BEAST_EXPECT(txj.isMember(jss::tx)); - BEAST_EXPECT(txj[jss::tx] == txid1); + BEAST_EXPECT(txj[jss::tx] == txid0); uint256 tx0, tx1; BEAST_EXPECT(tx0.parseHex(txid0)); BEAST_EXPECT(tx1.parseHex(txid1)); @@ -1647,7 +1645,7 @@ class LedgerRPC_test : public beast::unit_test::suite BEAST_EXPECT(txj["retries_remaining"] == 1); BEAST_EXPECT(txj["last_result"] == "terPRE_SEQ"); BEAST_EXPECT(txj.isMember(jss::tx)); - BEAST_EXPECT(txj[jss::tx] != txid1); + BEAST_EXPECT(txj[jss::tx] != txid0); return txj[jss::tx].asString(); } return std::string{}; diff --git a/src/test/rpc/LedgerRequestRPC_test.cpp b/src/test/rpc/LedgerRequestRPC_test.cpp index de2ddeff8e4..14d06e9234b 100644 --- a/src/test/rpc/LedgerRequestRPC_test.cpp +++ b/src/test/rpc/LedgerRequestRPC_test.cpp @@ -30,6 +30,14 @@ namespace RPC { class LedgerRequestRPC_test : public beast::unit_test::suite { + static constexpr char const* hash1 = + "3020EB9E7BE24EF7D7A060CB051583EC117384636D1781AFB5B87F3E348DA489"; + static constexpr char const* accounthash1 = + "BD8A3D72CA73DDE887AD63666EC2BAD07875CBA997A102579B5B95ECDFFEAED8"; + + static constexpr char const* zerohash = + "0000000000000000000000000000000000000000000000000000000000000000"; + public: void testLedgerRequest() @@ -181,87 +189,69 @@ class LedgerRequestRPC_test : public beast::unit_test::suite BEAST_EXPECT( result[jss::ledger][jss::total_coins] == "100000000000000000"); BEAST_EXPECT(result[jss::ledger][jss::closed] == true); - BEAST_EXPECT( - result[jss::ledger][jss::ledger_hash] == - "E9BB323980D202EC7E51BAB2AA8E35353F9C7BDAB59BF17378EADD4D0486EF9F"); - BEAST_EXPECT( - result[jss::ledger][jss::parent_hash] == - "0000000000000000000000000000000000000000000000000000000000000000"); - BEAST_EXPECT( - result[jss::ledger][jss::account_hash] == - "A21ED30C04C88046FC61DB9DC19375EEDBD365FD8C17286F27127DF804E9CAA6"); - BEAST_EXPECT( - result[jss::ledger][jss::transaction_hash] == - "0000000000000000000000000000000000000000000000000000000000000000"); + BEAST_EXPECT(result[jss::ledger][jss::ledger_hash] == hash1); + BEAST_EXPECT(result[jss::ledger][jss::parent_hash] == zerohash); + BEAST_EXPECT(result[jss::ledger][jss::account_hash] == accounthash1); + BEAST_EXPECT(result[jss::ledger][jss::transaction_hash] == zerohash); result = env.rpc("ledger_request", "2")[jss::result]; + constexpr char const* hash2 = + "CCC3B3E88CCAC17F1BE6B4A648A55999411F19E3FE55EB721960EB0DF28EDDA5"; BEAST_EXPECT(result[jss::ledger][jss::ledger_index] == "2"); BEAST_EXPECT( result[jss::ledger][jss::total_coins] == "100000000000000000"); BEAST_EXPECT(result[jss::ledger][jss::closed] == true); - BEAST_EXPECT( - result[jss::ledger][jss::ledger_hash] == - "A15F7FBE0B06286915D971BF9802C9431CD7DE40E2AC7D07C409EDB1C0715C60"); - BEAST_EXPECT( - result[jss::ledger][jss::parent_hash] == - "E9BB323980D202EC7E51BAB2AA8E35353F9C7BDAB59BF17378EADD4D0486EF9F"); + BEAST_EXPECT(result[jss::ledger][jss::ledger_hash] == hash2); + BEAST_EXPECT(result[jss::ledger][jss::parent_hash] == hash1); BEAST_EXPECT( result[jss::ledger][jss::account_hash] == - "CB07F3CA0398BE969A5B88F874629D4DBB6E103DE7C6DB8037281A89E51AA8C6"); - BEAST_EXPECT( - result[jss::ledger][jss::transaction_hash] == - "0000000000000000000000000000000000000000000000000000000000000000"); + "3C834285F7F464FBE99AFEB84D354A968EB2CAA24523FF26797A973D906A3D29"); + BEAST_EXPECT(result[jss::ledger][jss::transaction_hash] == zerohash); result = env.rpc("ledger_request", "3")[jss::result]; + constexpr char const* hash3 = + "8D631B20BC989AF568FBA97375290544B0703A5ADC1CF9E9053580461690C9EE"; BEAST_EXPECT(result[jss::ledger][jss::ledger_index] == "3"); BEAST_EXPECT( result[jss::ledger][jss::total_coins] == "99999999999999980"); BEAST_EXPECT(result[jss::ledger][jss::closed] == true); - BEAST_EXPECT( - result[jss::ledger][jss::ledger_hash] == - "9BCA8AE5FD41D223D82E1B8288961D693EB1B2EFA10F51827A641AD4B12111D7"); - BEAST_EXPECT( - result[jss::ledger][jss::parent_hash] == - "A15F7FBE0B06286915D971BF9802C9431CD7DE40E2AC7D07C409EDB1C0715C60"); + BEAST_EXPECT(result[jss::ledger][jss::ledger_hash] == hash3); + BEAST_EXPECT(result[jss::ledger][jss::parent_hash] == hash2); BEAST_EXPECT( result[jss::ledger][jss::account_hash] == - "5B793533909906D15CE27D1A423732D113160AB166188D89A2DFD8737CBDCBD5"); + "BC9EF2A16BFF80BCFABA6FA84688D858D33BD0FA0435CAA9DF6DA4105A39A29E"); BEAST_EXPECT( result[jss::ledger][jss::transaction_hash] == "0213EC486C058B3942FBE3DAC6839949A5C5B02B8B4244C8998EFDF04DBD8222"); result = env.rpc("ledger_request", "4")[jss::result]; + constexpr char const* hash4 = + "1A8E7098B23597E73094DADA58C9D62F3AB93A12C6F7666D56CA85A6CFDE530F"; BEAST_EXPECT(result[jss::ledger][jss::ledger_index] == "4"); BEAST_EXPECT( result[jss::ledger][jss::total_coins] == "99999999999999960"); BEAST_EXPECT(result[jss::ledger][jss::closed] == true); - BEAST_EXPECT( - result[jss::ledger][jss::ledger_hash] == - "433D1E42F2735F926BF594E4F3DFC70AE3E74F51464156ED83A33D0FF121D136"); - BEAST_EXPECT( - result[jss::ledger][jss::parent_hash] == - "9BCA8AE5FD41D223D82E1B8288961D693EB1B2EFA10F51827A641AD4B12111D7"); + BEAST_EXPECT(result[jss::ledger][jss::ledger_hash] == hash4); + BEAST_EXPECT(result[jss::ledger][jss::parent_hash] == hash3); BEAST_EXPECT( result[jss::ledger][jss::account_hash] == - "39C91E2227ACECD057AFDC64AE8FEFF5A0E07CF26ED29D1AECC55B0385F3EFDE"); + "C690188F123C91355ADA8BDF4AC5B5C927076D3590C215096868A5255264C6DD"); BEAST_EXPECT( result[jss::ledger][jss::transaction_hash] == "3CBDB8F42E04333E1642166BFB93AC9A7E1C6C067092CD5D881D6F3AB3D67E76"); result = env.rpc("ledger_request", "5")[jss::result]; + constexpr char const* hash5 = + "C6A222D71AE65D7B4F240009EAD5DEB20D7EEDE5A4064F28BBDBFEEB6FBE48E5"; BEAST_EXPECT(result[jss::ledger][jss::ledger_index] == "5"); BEAST_EXPECT( result[jss::ledger][jss::total_coins] == "99999999999999940"); BEAST_EXPECT(result[jss::ledger][jss::closed] == true); - BEAST_EXPECT( - result[jss::ledger][jss::ledger_hash] == - "9ED4D0C397810980904AF3FC08583D23B09C3C7CCF835D2A4768145A8BAC1175"); - BEAST_EXPECT( - result[jss::ledger][jss::parent_hash] == - "433D1E42F2735F926BF594E4F3DFC70AE3E74F51464156ED83A33D0FF121D136"); + BEAST_EXPECT(result[jss::ledger][jss::ledger_hash] == hash5); + BEAST_EXPECT(result[jss::ledger][jss::parent_hash] == hash4); BEAST_EXPECT( result[jss::ledger][jss::account_hash] == - "8F047B6A0D2083DF4F69C17F7CC9AE997B0D59020A43D9799A31D22F55837147"); + "EA81CD9D36740736F00CB747E0D0E32D3C10B695823D961F0FB9A1CE7133DD4D"); BEAST_EXPECT( result[jss::ledger][jss::transaction_hash] == "C3D086CD6BDB9E97AD1D513B2C049EF2840BD21D0B3E22D84EBBB89B6D2EF59D"); @@ -340,18 +330,10 @@ class LedgerRequestRPC_test : public beast::unit_test::suite BEAST_EXPECT( result[jss::ledger][jss::total_coins] == "100000000000000000"); BEAST_EXPECT(result[jss::ledger][jss::closed] == true); - BEAST_EXPECT( - result[jss::ledger][jss::ledger_hash] == - "E9BB323980D202EC7E51BAB2AA8E35353F9C7BDAB59BF17378EADD4D0486EF9F"); - BEAST_EXPECT( - result[jss::ledger][jss::parent_hash] == - "0000000000000000000000000000000000000000000000000000000000000000"); - BEAST_EXPECT( - result[jss::ledger][jss::account_hash] == - "A21ED30C04C88046FC61DB9DC19375EEDBD365FD8C17286F27127DF804E9CAA6"); - BEAST_EXPECT( - result[jss::ledger][jss::transaction_hash] == - "0000000000000000000000000000000000000000000000000000000000000000"); + BEAST_EXPECT(result[jss::ledger][jss::ledger_hash] == hash1); + BEAST_EXPECT(result[jss::ledger][jss::parent_hash] == zerohash); + BEAST_EXPECT(result[jss::ledger][jss::account_hash] == accounthash1); + BEAST_EXPECT(result[jss::ledger][jss::transaction_hash] == zerohash); } void diff --git a/src/test/rpc/ReportingETL_test.cpp b/src/test/rpc/ReportingETL_test.cpp index d8e6fc684fd..77284dd776d 100644 --- a/src/test/rpc/ReportingETL_test.cpp +++ b/src/test/rpc/ReportingETL_test.cpp @@ -532,7 +532,7 @@ class ReportingETL_test : public beast::unit_test::suite BEAST_EXPECT(status.ok()); BEAST_EXPECT( - reply.ledger_objects().objects_size() == num_accounts + 3); + reply.ledger_objects().objects_size() == num_accounts + 4); BEAST_EXPECT(reply.marker().size() == 0); auto ledger = env.closed(); size_t idx = 0; From 79e621d96c526d53417da420c35626a25bd91700 Mon Sep 17 00:00:00 2001 From: Alloy Networks <45832257+alloynetworks@users.noreply.github.com> Date: Tue, 28 Mar 2023 22:04:06 +0300 Subject: [PATCH 138/508] Update README.md (#4463) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b2d0ff42a3d..2b6aa5512f1 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powered by a network of peer-to-peer nodes. The XRP Ledger uses a novel Byzantine Fault Tolerant consensus algorithm to settle and record transactions in a secure distributed database without a central operator. ## XRP -[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. Its creators gifted 80 billion XRP to a company, now called [Ripple](https://ripple.com/), to develop the XRP Ledger and its ecosystem. Ripple uses XRP to help build the Internet of Value, ushering in a world in which money moves as fast and efficiently as information does today. +[XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. ## rippled The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html). From 46167d1c46b3e4fcf512ff619e0b7ecea57f44cb Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 28 Mar 2023 15:55:53 -0700 Subject: [PATCH 139/508] Add link to BUILD.md: (#4450) In the release notes (current and historical), there is a link to the `Builds` directory. By creating `Builds/README.md` with a link to `BUILD.md`, it is easier to find the build instructions. --- Builds/README.md | 1 + 1 file changed, 1 insertion(+) create mode 100644 Builds/README.md diff --git a/Builds/README.md b/Builds/README.md new file mode 100644 index 00000000000..e99ba8c9a49 --- /dev/null +++ b/Builds/README.md @@ -0,0 +1 @@ +[Please see the BUILD instructions here](../BUILD.md) From 135b63dbe0a692ba6cf08987ce67a7f11962511a Mon Sep 17 00:00:00 2001 From: Brandon Wilson Date: Thu, 30 Mar 2023 01:01:41 -0500 Subject: [PATCH 140/508] Update example [validator_list_sites] (#4448) --- cfg/validators-example.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/cfg/validators-example.txt b/cfg/validators-example.txt index 9ad1e9b5016..8f7c04729e0 100644 --- a/cfg/validators-example.txt +++ b/cfg/validators-example.txt @@ -26,7 +26,6 @@ # # Examples: # https://vl.ripple.com -# https://vl.coil.com # https://vl.xrplf.org # http://127.0.0.1:8000 # file:///etc/opt/ripple/vl.txt From 8bfdbcbab5ac193a71ea10712d47a91ef1aaa8eb Mon Sep 17 00:00:00 2001 From: drlongle Date: Thu, 30 Mar 2023 19:13:30 +0200 Subject: [PATCH 141/508] Add logging for exceptions: (#4400) Log exception messages at several locations. Previously, these were locations where an exception was caught, but the exception message was not logged. Logging the exception messages can be useful for analysis or debugging. The additional logging could have a small negative performance impact. Fix #3213. --- src/ripple/app/consensus/RCLConsensus.cpp | 12 ++++++----- src/ripple/app/ledger/ConsensusTransSetSF.cpp | 6 ++++-- src/ripple/app/ledger/Ledger.cpp | 11 ++++++++-- src/ripple/app/ledger/Ledger.h | 1 + src/ripple/app/ledger/impl/BuildLedger.cpp | 5 +++-- src/ripple/app/ledger/impl/LedgerMaster.cpp | 20 +++++++++++-------- src/ripple/app/ledger/impl/LedgerToJson.cpp | 7 ++++++- src/ripple/app/ledger/impl/OpenLedger.cpp | 4 ++-- src/ripple/app/misc/Manifest.h | 18 +++++++++++------ src/ripple/app/misc/NetworkOPs.cpp | 5 +++-- src/ripple/app/misc/impl/Manifest.cpp | 12 +++++++---- src/ripple/app/misc/impl/ValidatorSite.cpp | 13 ++++++++---- src/ripple/app/tx/impl/apply.cpp | 4 ++-- src/ripple/basics/impl/make_SSLContext.cpp | 7 +++++-- src/ripple/overlay/impl/PeerImp.cpp | 16 +++++++++------ src/ripple/rpc/handlers/PayChanClaim.cpp | 7 ++++--- src/ripple/shamap/impl/SHAMap.cpp | 3 ++- 17 files changed, 99 insertions(+), 52 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index 7ba059881c0..4c80694cbbe 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -498,10 +498,11 @@ RCLConsensus::Adaptor::doAccept( std::make_shared(SerialIter{item.slice()})); JLOG(j_.debug()) << " Tx: " << item.key(); } - catch (std::exception const&) + catch (std::exception const& ex) { failed.insert(item.key()); - JLOG(j_.warn()) << " Tx: " << item.key() << " throws!"; + JLOG(j_.warn()) + << " Tx: " << item.key() << " throws: " << ex.what(); } } @@ -615,10 +616,11 @@ RCLConsensus::Adaptor::doAccept( anyDisputes = true; } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(j_.debug()) - << "Failed to apply transaction we voted NO on"; + JLOG(j_.debug()) << "Failed to apply transaction we voted " + "NO on. Exception: " + << ex.what(); } } } diff --git a/src/ripple/app/ledger/ConsensusTransSetSF.cpp b/src/ripple/app/ledger/ConsensusTransSetSF.cpp index 997a2aee147..476c757515f 100644 --- a/src/ripple/app/ledger/ConsensusTransSetSF.cpp +++ b/src/ripple/app/ledger/ConsensusTransSetSF.cpp @@ -66,9 +66,11 @@ ConsensusTransSetSF::gotNode( pap->getOPs().submitTransaction(stx); }); } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(j_.warn()) << "Fetched invalid transaction in proposed set"; + JLOG(j_.warn()) + << "Fetched invalid transaction in proposed set. Exception: " + << ex.what(); } } } diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 4de01cc909e..d96bf5939e6 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -186,6 +186,7 @@ Ledger::Ledger( , txMap_(std::make_shared(SHAMapType::TRANSACTION, family)) , stateMap_(std::make_shared(SHAMapType::STATE, family)) , rules_{config.features} + , j_(beast::Journal(beast::Journal::getNullSink())) { info_.seq = 1; info_.drops = INITIAL_XRP; @@ -255,6 +256,7 @@ Ledger::Ledger( std::make_shared(SHAMapType::STATE, info.accountHash, family)) , rules_(config.features) , info_(info) + , j_(j) { loaded = true; @@ -306,6 +308,7 @@ Ledger::Ledger(Ledger const& prevLedger, NetClock::time_point closeTime) , stateMap_(prevLedger.stateMap_->snapShot(true)) , fees_(prevLedger.fees_) , rules_(prevLedger.rules_) + , j_(beast::Journal(beast::Journal::getNullSink())) { info_.seq = prevLedger.info_.seq + 1; info_.parentCloseTime = prevLedger.info_.closeTime; @@ -339,6 +342,7 @@ Ledger::Ledger(LedgerInfo const& info, Config const& config, Family& family) std::make_shared(SHAMapType::STATE, info.accountHash, family)) , rules_{config.features} , info_(info) + , j_(beast::Journal(beast::Journal::getNullSink())) { info_.hash = calculateLedgerHash(info_); } @@ -352,6 +356,7 @@ Ledger::Ledger( , txMap_(std::make_shared(SHAMapType::TRANSACTION, family)) , stateMap_(std::make_shared(SHAMapType::STATE, family)) , rules_{config.features} + , j_(beast::Journal(beast::Journal::getNullSink())) { info_.seq = ledgerSeq; info_.closeTime = closeTime; @@ -626,8 +631,9 @@ Ledger::setup() { ret = false; } - catch (std::exception const&) + catch (std::exception const& ex) { + JLOG(j_.error()) << "Exception in " << __func__ << ": " << ex.what(); Rethrow(); } @@ -682,8 +688,9 @@ Ledger::setup() { ret = false; } - catch (std::exception const&) + catch (std::exception const& ex) { + JLOG(j_.error()) << "Exception in " << __func__ << ": " << ex.what(); Rethrow(); } diff --git a/src/ripple/app/ledger/Ledger.h b/src/ripple/app/ledger/Ledger.h index 0b0830df9d9..84e65ecffc7 100644 --- a/src/ripple/app/ledger/Ledger.h +++ b/src/ripple/app/ledger/Ledger.h @@ -410,6 +410,7 @@ class Ledger final : public std::enable_shared_from_this, Fees fees_; Rules rules_; LedgerInfo info_; + beast::Journal j_; }; /** A ledger wrapped in a CachedView. */ diff --git a/src/ripple/app/ledger/impl/BuildLedger.cpp b/src/ripple/app/ledger/impl/BuildLedger.cpp index 87960d16eac..363c110cd18 100644 --- a/src/ripple/app/ledger/impl/BuildLedger.cpp +++ b/src/ripple/app/ledger/impl/BuildLedger.cpp @@ -143,9 +143,10 @@ applyTransactions( ++it; } } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(j.warn()) << "Transaction " << txid << " throws"; + JLOG(j.warn()) + << "Transaction " << txid << " throws: " << ex.what(); failed.insert(txid); it = txns.erase(it); } diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 7476698ee3a..7ae7476948b 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -866,10 +866,11 @@ LedgerMaster::fixMismatch(ReadView const& ledger) { hash = hashOfSeq(ledger, lSeq, m_journal); } - catch (std::exception const&) + catch (std::exception const& ex) { JLOG(m_journal.warn()) - << "fixMismatch encounters partial ledger"; + << "fixMismatch encounters partial ledger. Exception: " + << ex.what(); clearLedger(lSeq); return; } @@ -1407,10 +1408,11 @@ LedgerMaster::findNewLedgersToPublish( JLOG(m_journal.trace()) << "ready to publish " << ret.size() << " ledgers."; } - catch (std::exception const&) + catch (std::exception const& ex) { JLOG(m_journal.error()) - << "Exception while trying to find ledgers to publish."; + << "Exception while trying to find ledgers to publish: " + << ex.what(); } if (app_.config().LEDGER_REPLAY) @@ -2009,9 +2011,10 @@ LedgerMaster::fetchForHistory( } } } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(m_journal.warn()) << "Threw while prefetching"; + JLOG(m_journal.warn()) + << "Threw while prefetching: " << ex.what(); } } } @@ -2346,9 +2349,10 @@ LedgerMaster::makeFetchPack( peer->send(msg); } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(m_journal.warn()) << "Exception building fetch pach"; + JLOG(m_journal.warn()) + << "Exception building fetch pach. Exception: " << ex.what(); } } diff --git a/src/ripple/app/ledger/impl/LedgerToJson.cpp b/src/ripple/app/ledger/impl/LedgerToJson.cpp index f4fba08d732..e32303c492d 100644 --- a/src/ripple/app/ledger/impl/LedgerToJson.cpp +++ b/src/ripple/app/ledger/impl/LedgerToJson.cpp @@ -192,9 +192,14 @@ fillJsonTx(Object& json, LedgerFill const& fill) appendAll(fill.ledger.txs); } } - catch (std::exception const&) + catch (std::exception const& ex) { // Nothing the user can do about this. + if (fill.context) + { + JLOG(fill.context->j.error()) + << "Exception in " << __func__ << ": " << ex.what(); + } } } diff --git a/src/ripple/app/ledger/impl/OpenLedger.cpp b/src/ripple/app/ledger/impl/OpenLedger.cpp index 5bb6e53ace6..7eef84fc554 100644 --- a/src/ripple/app/ledger/impl/OpenLedger.cpp +++ b/src/ripple/app/ledger/impl/OpenLedger.cpp @@ -206,9 +206,9 @@ debugTostr(SHAMap const& set) auto const tx = std::make_shared(sit); ss << debugTxstr(tx) << ", "; } - catch (std::exception const&) + catch (std::exception const& ex) { - ss << "THRO, "; + ss << "THROW:" << ex.what() << ", "; } } return ss.str(); diff --git a/src/ripple/app/misc/Manifest.h b/src/ripple/app/misc/Manifest.h index a1658428cb7..88452e5e0cc 100644 --- a/src/ripple/app/misc/Manifest.h +++ b/src/ripple/app/misc/Manifest.h @@ -138,12 +138,14 @@ to_string(Manifest const& m); */ /** @{ */ std::optional -deserializeManifest(Slice s); +deserializeManifest(Slice s, beast::Journal journal); inline std::optional -deserializeManifest(std::string const& s) +deserializeManifest( + std::string const& s, + beast::Journal journal = beast::Journal(beast::Journal::getNullSink())) { - return deserializeManifest(makeSlice(s)); + return deserializeManifest(makeSlice(s), journal); } template < @@ -151,9 +153,11 @@ template < class = std::enable_if_t< std::is_same::value || std::is_same::value>> std::optional -deserializeManifest(std::vector const& v) +deserializeManifest( + std::vector const& v, + beast::Journal journal = beast::Journal(beast::Journal::getNullSink())) { - return deserializeManifest(makeSlice(v)); + return deserializeManifest(makeSlice(v), journal); } /** @} */ @@ -180,7 +184,9 @@ struct ValidatorToken }; std::optional -loadValidatorToken(std::vector const& blob); +loadValidatorToken( + std::vector const& blob, + beast::Journal journal = beast::Journal(beast::Journal::getNullSink())); enum class ManifestDisposition { /// Manifest is valid diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index d6816b1d420..6be11c7dd6c 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -1162,9 +1162,10 @@ NetworkOPsImp::submitTransaction(std::shared_ptr const& iTrans) return; } } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(m_journal.warn()) << "Exception checking transaction" << txid; + JLOG(m_journal.warn()) + << "Exception checking transaction " << txid << ": " << ex.what(); return; } diff --git a/src/ripple/app/misc/impl/Manifest.cpp b/src/ripple/app/misc/impl/Manifest.cpp index 931b632159c..60b52133053 100644 --- a/src/ripple/app/misc/impl/Manifest.cpp +++ b/src/ripple/app/misc/impl/Manifest.cpp @@ -50,7 +50,7 @@ to_string(Manifest const& m) } std::optional -deserializeManifest(Slice s) +deserializeManifest(Slice s, beast::Journal journal) { if (s.empty()) return std::nullopt; @@ -148,8 +148,10 @@ deserializeManifest(Slice s) return m; } - catch (std::exception const&) + catch (std::exception const& ex) { + JLOG(journal.error()) + << "Exception in " << __func__ << ": " << ex.what(); return std::nullopt; } } @@ -239,7 +241,7 @@ Manifest::getMasterSignature() const } std::optional -loadValidatorToken(std::vector const& blob) +loadValidatorToken(std::vector const& blob, beast::Journal journal) { try { @@ -277,8 +279,10 @@ loadValidatorToken(std::vector const& blob) return std::nullopt; } - catch (std::exception const&) + catch (std::exception const& ex) { + JLOG(journal.error()) + << "Exception in " << __func__ << ": " << ex.what(); return std::nullopt; } } diff --git a/src/ripple/app/misc/impl/ValidatorSite.cpp b/src/ripple/app/misc/impl/ValidatorSite.cpp index a73d348ee04..013d7e96ffd 100644 --- a/src/ripple/app/misc/impl/ValidatorSite.cpp +++ b/src/ripple/app/misc/impl/ValidatorSite.cpp @@ -357,8 +357,9 @@ ValidatorSite::onTimer(std::size_t siteIdx, error_code const& ec) // the WorkSSL client ctor can throw if SSL init fails makeRequest(sites_[siteIdx].startingResource, siteIdx, lock); } - catch (std::exception&) + catch (std::exception const& ex) { + JLOG(j_.error()) << "Exception in " << __func__ << ": " << ex.what(); onSiteFetch( boost::system::error_code{-1, boost::system::generic_category()}, {}, @@ -526,7 +527,7 @@ ValidatorSite::processRedirect( throw std::runtime_error( "invalid scheme in redirect " + newLocation->pUrl.scheme); } - catch (std::exception&) + catch (std::exception const& ex) { JLOG(j_.error()) << "Invalid redirect location: " << res[field::location]; @@ -606,8 +607,10 @@ ValidatorSite::onSiteFetch( } } } - catch (std::exception& ex) + catch (std::exception const& ex) { + JLOG(j_.error()) + << "Exception in " << __func__ << ": " << ex.what(); onError(ex.what(), false); } } @@ -643,8 +646,10 @@ ValidatorSite::onTextFetch( parseJsonResponse(res, siteIdx, lock_sites); } - catch (std::exception& ex) + catch (std::exception const& ex) { + JLOG(j_.error()) + << "Exception in " << __func__ << ": " << ex.what(); sites_[siteIdx].lastRefreshStatus.emplace(Site::Status{ clock_type::now(), ListDisposition::invalid, ex.what()}); } diff --git a/src/ripple/app/tx/impl/apply.cpp b/src/ripple/app/tx/impl/apply.cpp index 5144e05fea3..c0704c5c3ae 100644 --- a/src/ripple/app/tx/impl/apply.cpp +++ b/src/ripple/app/tx/impl/apply.cpp @@ -159,9 +159,9 @@ applyTransaction( JLOG(j.debug()) << "Transaction retry: " << transHuman(result.first); return ApplyResult::Retry; } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(j.warn()) << "Throws"; + JLOG(j.warn()) << "Throws: " << ex.what(); return ApplyResult::Fail; } } diff --git a/src/ripple/basics/impl/make_SSLContext.cpp b/src/ripple/basics/impl/make_SSLContext.cpp index 79a0e9009be..7d72d9e08b4 100644 --- a/src/ripple/basics/impl/make_SSLContext.cpp +++ b/src/ripple/basics/impl/make_SSLContext.cpp @@ -292,10 +292,13 @@ initAuthenticated( fclose(f); } - catch (std::exception const&) + catch (std::exception const& ex) { fclose(f); - LogicError("Reading the SSL chain file generated an exception."); + LogicError( + std::string( + "Reading the SSL chain file generated an exception: ") + + ex.what()); } } diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index 8e014a10f73..a07c457458c 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -1608,10 +1608,11 @@ PeerImp::handleTransaction( }); } } - catch (std::exception const&) + catch (std::exception const& ex) { JLOG(p_journal_.warn()) - << "Transaction invalid: " << strHex(m->rawtransaction()); + << "Transaction invalid: " << strHex(m->rawtransaction()) + << ". Exception: " << ex.what(); } } @@ -3110,8 +3111,10 @@ PeerImp::checkTransaction( app_.getOPs().processTransaction( tx, trusted, false, NetworkOPs::FailHard::no); } - catch (std::exception const&) + catch (std::exception const& ex) { + JLOG(p_journal_.warn()) + << "Exception in " << __func__ << ": " << ex.what(); app_.getHashRouter().setFlags(stx->getTransactionID(), SF_BAD); charge(Resource::feeBadData); } @@ -3195,9 +3198,10 @@ PeerImp::checkValidation( } } } - catch (std::exception const&) + catch (std::exception const& ex) { - JLOG(p_journal_.trace()) << "Exception processing validation"; + JLOG(p_journal_.trace()) + << "Exception processing validation: " << ex.what(); charge(Resource::feeInvalidRequest); } } @@ -3566,7 +3570,7 @@ PeerImp::processLedgerRequest(std::shared_ptr const& m) << "processLedgerRequest: getNodeFat returns false"; } } - catch (std::exception& e) + catch (std::exception const& e) { std::string info; switch (itype) diff --git a/src/ripple/rpc/handlers/PayChanClaim.cpp b/src/ripple/rpc/handlers/PayChanClaim.cpp index c5b1202fab5..6353124cb39 100644 --- a/src/ripple/rpc/handlers/PayChanClaim.cpp +++ b/src/ripple/rpc/handlers/PayChanClaim.cpp @@ -80,10 +80,11 @@ doChannelAuthorize(RPC::JsonContext& context) auto const buf = sign(pk, sk, msg.slice()); result[jss::signature] = strHex(buf); } - catch (std::exception&) + catch (std::exception const& ex) { - result = - RPC::make_error(rpcINTERNAL, "Exception occurred during signing."); + result = RPC::make_error( + rpcINTERNAL, + "Exception occurred during signing: " + std::string(ex.what())); } return result; } diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index fa42c8e8f82..ce031003c23 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -193,13 +193,14 @@ SHAMap::finishFetch( canonicalize(hash, node); return node; } + catch (SHAMapMissingNode const& e) { JLOG(journal_.warn()) << "Missing node: " << hash << " : " << e.what(); } catch (std::runtime_error const& e) { - JLOG(journal_.warn()) << e.what(); + JLOG(journal_.warn()) << __func__ << " : " << e.what(); } catch (...) { From 5ebcaf0a6c98f42ecf0dd5ecacd8e64091affbf8 Mon Sep 17 00:00:00 2001 From: drlongle Date: Thu, 30 Mar 2023 20:46:18 +0200 Subject: [PATCH 142/508] Add account flags to account_info response: (#4459) Previously, the object `account_data` in the `account_info` response contained a single field `Flags` that contains flags of an account. API consumers must perform bitwise operations on this field to retrieve the account flags. This change adds a new object, `account_flags`, at the top level of the `account_info` response `result`. The object contains relevant flags of the account. This makes it easier to write simple code to check a flag's value. The flags included may depend on the amendments that are enabled. Fix #2457. --- src/ripple/protocol/jss.h | 1 + src/ripple/rpc/handlers/AccountInfo.cpp | 33 ++++++++ src/test/rpc/AccountInfo_test.cpp | 101 +++++++++++++++++++++++- 3 files changed, 134 insertions(+), 1 deletion(-) diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 1c5bf8463b0..3932349c82a 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -114,6 +114,7 @@ JSS(account); // in/out: many JSS(accountState); // out: LedgerToJson JSS(accountTreeHash); // out: ledger/Ledger.cpp JSS(account_data); // out: AccountInfo +JSS(account_flags); // out: AccountInfo JSS(account_hash); // out: LedgerToJson JSS(account_id); // out: WalletPropose JSS(account_nfts); // out: AccountNFTs diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index f08805761ee..ef130ef185f 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -77,6 +77,28 @@ doAccountInfo(RPC::JsonContext& context) if (jvAccepted) return jvAccepted; + static constexpr std:: + array, 9> + lsFlags{ + {{"defaultRipple", lsfDefaultRipple}, + {"depositAuth", lsfDepositAuth}, + {"disableMasterKey", lsfDisableMaster}, + {"disallowIncomingXRP", lsfDisallowXRP}, + {"globalFreeze", lsfGlobalFreeze}, + {"noFreeze", lsfNoFreeze}, + {"passwordSpent", lsfPasswordSpent}, + {"requireAuthorization", lsfRequireAuth}, + {"requireDestinationTag", lsfRequireDestTag}}}; + + static constexpr std:: + array, 4> + disallowIncomingFlags{ + {{"disallowIncomingNFTokenOffer", + lsfDisallowIncomingNFTokenOffer}, + {"disallowIncomingCheck", lsfDisallowIncomingCheck}, + {"disallowIncomingPayChan", lsfDisallowIncomingPayChan}, + {"disallowIncomingTrustline", lsfDisallowIncomingTrustline}}}; + auto const sleAccepted = ledger->read(keylet::account(accountID)); if (sleAccepted) { @@ -94,6 +116,17 @@ doAccountInfo(RPC::JsonContext& context) RPC::injectSLE(jvAccepted, *sleAccepted); result[jss::account_data] = jvAccepted; + Json::Value acctFlags{Json::objectValue}; + for (auto const& lsf : lsFlags) + acctFlags[lsf.first.data()] = sleAccepted->isFlag(lsf.second); + + if (ledger->rules().enabled(featureDisallowIncoming)) + { + for (auto const& lsf : disallowIncomingFlags) + acctFlags[lsf.first.data()] = sleAccepted->isFlag(lsf.second); + } + result[jss::account_flags] = std::move(acctFlags); + // Return SignerList(s) if that is requested. if (params.isMember(jss::signer_lists) && params[jss::signer_lists].asBool()) diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 9772a0ffb70..0cda0632edc 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -46,7 +46,7 @@ class AccountInfo_test : public beast::unit_test::suite "Missing field 'account'."); } { - // account_info with a malformed account sting. + // account_info with a malformed account string. auto const info = env.rpc( "json", "account_info", @@ -491,6 +491,100 @@ class AccountInfo_test : public beast::unit_test::suite } } + void + testAccountFlags(FeatureBitset const& features) + { + using namespace jtx; + + Env env(*this, features); + Account const alice{"alice"}; + env.fund(XRP(1000), alice); + + auto getAccountFlag = [&env, &alice](std::string_view fName) { + auto const info = env.rpc( + "json", + "account_info", + R"({"account" : ")" + alice.human() + R"("})"); + + std::optional res; + if (info[jss::result][jss::status] == "success" && + info[jss::result][jss::account_flags].isMember(fName.data())) + res.emplace(info[jss::result][jss::account_flags][fName.data()] + .asBool()); + + return res; + }; + + static constexpr std:: + array, 7> + asFlags{ + {{"defaultRipple", asfDefaultRipple}, + {"depositAuth", asfDepositAuth}, + {"disallowIncomingXRP", asfDisallowXRP}, + {"globalFreeze", asfGlobalFreeze}, + {"noFreeze", asfNoFreeze}, + {"requireAuthorization", asfRequireAuth}, + {"requireDestinationTag", asfRequireDest}}}; + + for (auto& asf : asFlags) + { + // Clear a flag and check that account_info returns results + // as expected + env(fclear(alice, asf.second)); + env.close(); + auto const f1 = getAccountFlag(asf.first); + BEAST_EXPECT(f1.has_value()); + BEAST_EXPECT(!f1.value()); + + // Set a flag and check that account_info returns results + // as expected + env(fset(alice, asf.second)); + env.close(); + auto const f2 = getAccountFlag(asf.first); + BEAST_EXPECT(f2.has_value()); + BEAST_EXPECT(f2.value()); + } + + static constexpr std:: + array, 4> + disallowIncomingFlags{ + {{"disallowIncomingCheck", asfDisallowIncomingCheck}, + {"disallowIncomingNFTokenOffer", + asfDisallowIncomingNFTokenOffer}, + {"disallowIncomingPayChan", asfDisallowIncomingPayChan}, + {"disallowIncomingTrustline", + asfDisallowIncomingTrustline}}}; + + if (features[featureDisallowIncoming]) + { + for (auto& asf : disallowIncomingFlags) + { + // Clear a flag and check that account_info returns results + // as expected + env(fclear(alice, asf.second)); + env.close(); + auto const f1 = getAccountFlag(asf.first); + BEAST_EXPECT(f1.has_value()); + BEAST_EXPECT(!f1.value()); + + // Set a flag and check that account_info returns results + // as expected + env(fset(alice, asf.second)); + env.close(); + auto const f2 = getAccountFlag(asf.first); + BEAST_EXPECT(f2.has_value()); + BEAST_EXPECT(f2.value()); + } + } + else + { + for (auto& asf : disallowIncomingFlags) + { + BEAST_EXPECT(!getAccountFlag(asf.first)); + } + } + } + void run() override { @@ -498,6 +592,11 @@ class AccountInfo_test : public beast::unit_test::suite testSignerLists(); testSignerListsApiVersion2(); testSignerListsV2(); + + FeatureBitset const allFeatures{ + ripple::test::jtx::supported_amendments()}; + testAccountFlags(allFeatures); + testAccountFlags(allFeatures - featureDisallowIncoming); } }; From 2f3f6dcb03661ae75c6607b4925c3c2e05e0eb28 Mon Sep 17 00:00:00 2001 From: drlongle Date: Thu, 30 Mar 2023 20:59:10 +0200 Subject: [PATCH 143/508] Fix ledger_data to return an empty list: (#4398) Change `ledger_data` to return an empty list when all entries are filtered out. When the `type` field is specified for the `ledger_data` method, it is possible that no objects of the specified type are found. This can even occur if those objects exist, but not in the section that the server checked while serving your request. Previously, the `state` field of the response has the value `null`, instead of an empty array `[]`. By changing this to an empty array, the response is the same data type so that clients can handle it consistently. For example, in Python, `for entry in state` should now work correctly. It would raise an exception if `state` is `null` (or `None`). This could break client code that explicitly checks for null. However, this fix aligns the response with the documentation, where the `state` field is an array. Fix #4392. --- src/ripple/rpc/handlers/LedgerData.cpp | 4 +++ src/test/rpc/LedgerData_test.cpp | 37 +++++++++++++++++++------- 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/src/ripple/rpc/handlers/LedgerData.cpp b/src/ripple/rpc/handlers/LedgerData.cpp index 7392b505126..f5433945772 100644 --- a/src/ripple/rpc/handlers/LedgerData.cpp +++ b/src/ripple/rpc/handlers/LedgerData.cpp @@ -94,6 +94,10 @@ doLedgerData(RPC::JsonContext& context) return jvResult; } Json::Value& nodes = jvResult[jss::state]; + if (nodes.type() == Json::nullValue) + { + nodes = Json::Value(Json::arrayValue); + } auto e = lpLedger->sles.end(); for (auto i = lpLedger->sles.upper_bound(key); i != e; ++i) diff --git a/src/test/rpc/LedgerData_test.cpp b/src/test/rpc/LedgerData_test.cpp index ab520181c05..ae57d6dcf92 100644 --- a/src/test/rpc/LedgerData_test.cpp +++ b/src/test/rpc/LedgerData_test.cpp @@ -314,6 +314,34 @@ class LedgerData_test : public beast::unit_test::suite auto const USD = gw["USD"]; env.fund(XRP(100000), gw); + auto makeRequest = [&env](Json::StaticString const& type) { + Json::Value jvParams; + jvParams[jss::ledger_index] = "current"; + jvParams[jss::type] = type; + return env.rpc( + "json", + "ledger_data", + boost::lexical_cast(jvParams))[jss::result]; + }; + + // Assert that state is an empty array. + for (auto const& type : + {jss::amendments, + jss::check, + jss::directory, + jss::fee, + jss::offer, + jss::signer_list, + jss::state, + jss::ticket, + jss::escrow, + jss::payment_channel, + jss::deposit_preauth}) + { + auto const jrr = makeRequest(type); + BEAST_EXPECT(checkArraySize(jrr[jss::state], 0)); + } + int const num_accounts = 10; for (auto i = 0; i < num_accounts; i++) @@ -372,15 +400,6 @@ class LedgerData_test : public beast::unit_test::suite env.close(); // Now fetch each type - auto makeRequest = [&env](Json::StaticString t) { - Json::Value jvParams; - jvParams[jss::ledger_index] = "current"; - jvParams[jss::type] = t; - return env.rpc( - "json", - "ledger_data", - boost::lexical_cast(jvParams))[jss::result]; - }; { // jvParams[jss::type] = "account"; auto const jrr = makeRequest(jss::account); From e6f49040f5c405bc6b481f940a5f889e8a34e179 Mon Sep 17 00:00:00 2001 From: drlongle Date: Fri, 31 Mar 2023 20:18:42 +0200 Subject: [PATCH 144/508] Fix unit test app.LedgerData (#4484) --- src/test/rpc/LedgerData_test.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/src/test/rpc/LedgerData_test.cpp b/src/test/rpc/LedgerData_test.cpp index ae57d6dcf92..96e9d4395ad 100644 --- a/src/test/rpc/LedgerData_test.cpp +++ b/src/test/rpc/LedgerData_test.cpp @@ -329,7 +329,6 @@ class LedgerData_test : public beast::unit_test::suite {jss::amendments, jss::check, jss::directory, - jss::fee, jss::offer, jss::signer_list, jss::state, From f191c911d4d9f1d22727dcd60a912bc476efea49 Mon Sep 17 00:00:00 2001 From: RichardAH Date: Wed, 5 Apr 2023 22:58:55 +0200 Subject: [PATCH 145/508] Add NFTokenPages to account_objects RPC: (#4352) - Include NFTokenPages in account_objects to make it easier to understand an account's Owner Reserve and simplify app development. - Update related tests and documentation. - Fix #4347. For info about the Owner Reserve, see https://xrpl.org/reserves.html --------- Co-authored-by: Scott Schurr Co-authored-by: Ed Hennis --- src/ripple/rpc/handlers/AccountObjects.cpp | 1 + src/ripple/rpc/impl/RPCHelpers.cpp | 118 +++++++++-- src/ripple/rpc/impl/RPCHelpers.h | 2 +- src/test/rpc/AccountLinesRPC_test.cpp | 3 +- src/test/rpc/AccountObjects_test.cpp | 230 ++++++++++++++++++++- 5 files changed, 331 insertions(+), 23 deletions(-) diff --git a/src/ripple/rpc/handlers/AccountObjects.cpp b/src/ripple/rpc/handlers/AccountObjects.cpp index 6424c3afd3a..687b8717970 100644 --- a/src/ripple/rpc/handlers/AccountObjects.cpp +++ b/src/ripple/rpc/handlers/AccountObjects.cpp @@ -204,6 +204,7 @@ doAccountObjects(RPC::JsonContext& context) } static constexpr deletionBlockers[] = { {jss::check, ltCHECK}, {jss::escrow, ltESCROW}, + {jss::nft_page, ltNFTOKEN_PAGE}, {jss::payment_channel, ltPAYCHAN}, {jss::state, ltRIPPLE_STATE}}; diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index ad84d7b12f5..44eebdcaba7 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -28,13 +28,13 @@ #include #include #include +#include +#include #include #include #include #include -#include - namespace ripple { namespace RPC { @@ -153,10 +153,88 @@ getAccountObjects( AccountID const& account, std::optional> const& typeFilter, uint256 dirIndex, - uint256 const& entryIndex, + uint256 entryIndex, std::uint32_t const limit, Json::Value& jvResult) { + auto typeMatchesFilter = [](std::vector const& typeFilter, + LedgerEntryType ledgerType) { + auto it = std::find(typeFilter.begin(), typeFilter.end(), ledgerType); + return it != typeFilter.end(); + }; + + // if dirIndex != 0, then all NFTs have already been returned. only + // iterate NFT pages if the filter says so AND dirIndex == 0 + bool iterateNFTPages = + (!typeFilter.has_value() || + typeMatchesFilter(typeFilter.value(), ltNFTOKEN_PAGE)) && + dirIndex == beast::zero; + + Keylet const firstNFTPage = keylet::nftpage_min(account); + + // we need to check the marker to see if it is an NFTTokenPage index. + if (iterateNFTPages && entryIndex != beast::zero) + { + // if it is we will try to iterate the pages up to the limit + // and then change over to the owner directory + + if (firstNFTPage.key != (entryIndex & ~nft::pageMask)) + iterateNFTPages = false; + } + + auto& jvObjects = (jvResult[jss::account_objects] = Json::arrayValue); + + // this is a mutable version of limit, used to seemlessly switch + // to iterating directory entries when nftokenpages are exhausted + uint32_t mlimit = limit; + + // iterate NFTokenPages preferentially + if (iterateNFTPages) + { + Keylet const first = entryIndex == beast::zero + ? firstNFTPage + : Keylet{ltNFTOKEN_PAGE, entryIndex}; + + Keylet const last = keylet::nftpage_max(account); + + // current key + uint256 ck = ledger.succ(first.key, last.key.next()).value_or(last.key); + + // current page + auto cp = ledger.read(Keylet{ltNFTOKEN_PAGE, ck}); + + while (cp) + { + jvObjects.append(cp->getJson(JsonOptions::none)); + auto const npm = (*cp)[~sfNextPageMin]; + if (npm) + cp = ledger.read(Keylet(ltNFTOKEN_PAGE, *npm)); + else + cp = nullptr; + + if (--mlimit == 0) + { + if (cp) + { + jvResult[jss::limit] = limit; + jvResult[jss::marker] = std::string("0,") + to_string(ck); + return true; + } + } + + if (!npm) + break; + + ck = *npm; + } + + // if execution reaches here then we're about to transition + // to iterating the root directory (and the conventional + // behaviour of this RPC function.) Therefore we should + // zero entryIndex so as not to terribly confuse things. + entryIndex = beast::zero; + } + auto const root = keylet::ownerDir(account); auto found = false; @@ -168,10 +246,13 @@ getAccountObjects( auto dir = ledger.read({ltDIR_NODE, dirIndex}); if (!dir) - return false; + { + // it's possible the user had nftoken pages but no + // directory entries + return mlimit < limit; + } std::uint32_t i = 0; - auto& jvObjects = (jvResult[jss::account_objects] = Json::arrayValue); for (;;) { auto const& entries = dir->getFieldV256(sfIndexes); @@ -186,25 +267,27 @@ getAccountObjects( found = true; } + // it's possible that the returned NFTPages exactly filled the + // response. Check for that condition. + if (i == mlimit && mlimit < limit) + { + jvResult[jss::limit] = limit; + jvResult[jss::marker] = + to_string(dirIndex) + ',' + to_string(*iter); + return true; + } + for (; iter != entries.end(); ++iter) { auto const sleNode = ledger.read(keylet::child(*iter)); - auto typeMatchesFilter = - [](std::vector const& typeFilter, - LedgerEntryType ledgerType) { - auto it = std::find( - typeFilter.begin(), typeFilter.end(), ledgerType); - return it != typeFilter.end(); - }; - if (!typeFilter.has_value() || typeMatchesFilter(typeFilter.value(), sleNode->getType())) { jvObjects.append(sleNode->getJson(JsonOptions::none)); } - if (++i == limit) + if (++i == mlimit) { if (++iter != entries.end()) { @@ -227,7 +310,7 @@ getAccountObjects( if (!dir) return true; - if (i == limit) + if (i == mlimit) { auto const& e = dir->getFieldV256(sfIndexes); if (!e.empty()) @@ -898,7 +981,7 @@ chooseLedgerEntryType(Json::Value const& params) std::pair result{RPC::Status::OK, ltANY}; if (params.isMember(jss::type)) { - static constexpr std::array, 14> + static constexpr std::array, 15> types{ {{jss::account, ltACCOUNT_ROOT}, {jss::amendments, ltAMENDMENTS}, @@ -913,7 +996,8 @@ chooseLedgerEntryType(Json::Value const& params) {jss::signer_list, ltSIGNER_LIST}, {jss::state, ltRIPPLE_STATE}, {jss::ticket, ltTICKET}, - {jss::nft_offer, ltNFTOKEN_OFFER}}}; + {jss::nft_offer, ltNFTOKEN_OFFER}, + {jss::nft_page, ltNFTOKEN_PAGE}}}; auto const& p = params[jss::type]; if (!p.isString()) diff --git a/src/ripple/rpc/impl/RPCHelpers.h b/src/ripple/rpc/impl/RPCHelpers.h index 12f27641ddf..6184b357515 100644 --- a/src/ripple/rpc/impl/RPCHelpers.h +++ b/src/ripple/rpc/impl/RPCHelpers.h @@ -106,7 +106,7 @@ getAccountObjects( AccountID const& account, std::optional> const& typeFilter, uint256 dirIndex, - uint256 const& entryIndex, + uint256 entryIndex, std::uint32_t const limit, Json::Value& jvResult); diff --git a/src/test/rpc/AccountLinesRPC_test.cpp b/src/test/rpc/AccountLinesRPC_test.cpp index cdc61922097..1b099f7b7b4 100644 --- a/src/test/rpc/AccountLinesRPC_test.cpp +++ b/src/test/rpc/AccountLinesRPC_test.cpp @@ -679,6 +679,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite auto aliceLines = getNextLine(env, alice, std::nullopt); constexpr std::size_t expectedIterations = 16; constexpr std::size_t expectedLines = 2; + constexpr std::size_t expectedNFTs = 1; std::size_t foundLines = 0; auto hasMarker = [](auto const& aliceLines) { @@ -729,7 +730,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite // this test will need to be updated. BEAST_EXPECT( aliceObjects[jss::result][jss::account_objects].size() == - iterations); + iterations + expectedNFTs); // If ledger object association ever changes, for whatever // reason, this test will need to be updated. BEAST_EXPECTS( diff --git a/src/test/rpc/AccountObjects_test.cpp b/src/test/rpc/AccountObjects_test.cpp index 64e77b9305d..07fd5d6ddd7 100644 --- a/src/test/rpc/AccountObjects_test.cpp +++ b/src/test/rpc/AccountObjects_test.cpp @@ -294,7 +294,7 @@ class AccountObjects_test : public beast::unit_test::suite { Json::Value params; params[jss::account] = bob.human(); - params[jss::type] = "state"; + params[jss::type] = jss::state; auto resp = env.rpc("json", "account_objects", to_string(params)); BEAST_EXPECT(!resp.isMember(jss::marker)); @@ -321,14 +321,217 @@ class AccountObjects_test : public beast::unit_test::suite auto& aobj = aobjs[0U]; if (i < 3) BEAST_EXPECT(resp[jss::result][jss::limit] == 1); + else + BEAST_EXPECT(!resp[jss::result].isMember(jss::limit)); aobj.removeMember("PreviousTxnID"); aobj.removeMember("PreviousTxnLgrSeq"); BEAST_EXPECT(aobj == bobj[i]); - auto resume_marker = resp[jss::result][jss::marker]; - params[jss::marker] = resume_marker; + params[jss::marker] = resp[jss::result][jss::marker]; + } + } + } + + void + testUnsteppedThenSteppedWithNFTs() + { + // The preceding test case, unsteppedThenStepped(), found a bug in the + // support for NFToken Pages. So we're leaving that test alone when + // adding tests to exercise NFTokenPages. + testcase("unsteppedThenSteppedWithNFTs"); + + using namespace jtx; + Env env(*this); + + Account const gw1{"G1"}; + Account const gw2{"G2"}; + Account const bob{"bob"}; + + auto const USD1 = gw1["USD"]; + auto const USD2 = gw2["USD"]; + + env.fund(XRP(1000), gw1, gw2, bob); + env.close(); + + // Check behavior if there are no account objects. + { + // Unpaged + Json::Value params; + params[jss::account] = bob.human(); + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + BEAST_EXPECT(resp[jss::result][jss::account_objects].size() == 0); + + // Limit == 1 + params[jss::limit] = 1; + resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + BEAST_EXPECT(resp[jss::result][jss::account_objects].size() == 0); + } + + // Check behavior if there are only NFTokens. + env(token::mint(bob, 0u), txflags(tfTransferable)); + env.close(); + + // test 'unstepped' + // i.e. request account objects without explicit limit/marker paging + Json::Value unpaged; + { + Json::Value params; + params[jss::account] = bob.human(); + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + + unpaged = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(unpaged.size() == 1); + } + // test request with type parameter as filter, unstepped + { + Json::Value params; + params[jss::account] = bob.human(); + params[jss::type] = jss::nft_page; + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + Json::Value& aobjs = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(aobjs.size() == 1); + BEAST_EXPECT( + aobjs[0u][sfLedgerEntryType.jsonName] == jss::NFTokenPage); + BEAST_EXPECT(aobjs[0u][sfNFTokens.jsonName].size() == 1); + } + // test stepped one-at-a-time with limit=1, resume from prev marker + { + Json::Value params; + params[jss::account] = bob.human(); + params[jss::limit] = 1; + + Json::Value resp = + env.rpc("json", "account_objects", to_string(params)); + Json::Value& aobjs = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(aobjs.size() == 1); + auto& aobj = aobjs[0U]; + BEAST_EXPECT(!resp[jss::result].isMember(jss::limit)); + BEAST_EXPECT(!resp[jss::result].isMember(jss::marker)); + + BEAST_EXPECT(aobj == unpaged[0u]); + } + + // Add more objects in addition to the NFToken Page. + env.trust(USD1(1000), bob); + env.trust(USD2(1000), bob); + + env(pay(gw1, bob, USD1(1000))); + env(pay(gw2, bob, USD2(1000))); + + env(offer(bob, XRP(100), bob["USD"](1)), txflags(tfPassive)); + env(offer(bob, XRP(100), USD1(1)), txflags(tfPassive)); + env.close(); + + // test 'unstepped' + { + Json::Value params; + params[jss::account] = bob.human(); + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + + unpaged = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(unpaged.size() == 5); + } + // test request with type parameter as filter, unstepped + { + Json::Value params; + params[jss::account] = bob.human(); + params[jss::type] = jss::nft_page; + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + Json::Value& aobjs = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(aobjs.size() == 1); + BEAST_EXPECT( + aobjs[0u][sfLedgerEntryType.jsonName] == jss::NFTokenPage); + BEAST_EXPECT(aobjs[0u][sfNFTokens.jsonName].size() == 1); + } + // test stepped one-at-a-time with limit=1, resume from prev marker + { + Json::Value params; + params[jss::account] = bob.human(); + params[jss::limit] = 1; + for (int i = 0; i < 5; ++i) + { + Json::Value resp = + env.rpc("json", "account_objects", to_string(params)); + Json::Value& aobjs = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(aobjs.size() == 1); + auto& aobj = aobjs[0U]; + if (i < 4) + { + BEAST_EXPECT(resp[jss::result][jss::limit] == 1); + BEAST_EXPECT(resp[jss::result].isMember(jss::marker)); + } + else + { + BEAST_EXPECT(!resp[jss::result].isMember(jss::limit)); + BEAST_EXPECT(!resp[jss::result].isMember(jss::marker)); + } + + BEAST_EXPECT(aobj == unpaged[i]); + + params[jss::marker] = resp[jss::result][jss::marker]; + } + } + + // Make sure things still work if there is more than 1 NFT Page. + for (int i = 0; i < 32; ++i) + { + env(token::mint(bob, 0u), txflags(tfTransferable)); + env.close(); + } + // test 'unstepped' + { + Json::Value params; + params[jss::account] = bob.human(); + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + + unpaged = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(unpaged.size() == 6); + } + // test request with type parameter as filter, unstepped + { + Json::Value params; + params[jss::account] = bob.human(); + params[jss::type] = jss::nft_page; + auto resp = env.rpc("json", "account_objects", to_string(params)); + BEAST_EXPECT(!resp.isMember(jss::marker)); + Json::Value& aobjs = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(aobjs.size() == 2); + } + // test stepped one-at-a-time with limit=1, resume from prev marker + { + Json::Value params; + params[jss::account] = bob.human(); + params[jss::limit] = 1; + for (int i = 0; i < 6; ++i) + { + Json::Value resp = + env.rpc("json", "account_objects", to_string(params)); + Json::Value& aobjs = resp[jss::result][jss::account_objects]; + BEAST_EXPECT(aobjs.size() == 1); + auto& aobj = aobjs[0U]; + if (i < 5) + { + BEAST_EXPECT(resp[jss::result][jss::limit] == 1); + BEAST_EXPECT(resp[jss::result].isMember(jss::marker)); + } + else + { + BEAST_EXPECT(!resp[jss::result].isMember(jss::limit)); + BEAST_EXPECT(!resp[jss::result].isMember(jss::marker)); + } + + BEAST_EXPECT(aobj == unpaged[i]); + + params[jss::marker] = resp[jss::result][jss::marker]; } } } @@ -376,12 +579,29 @@ class AccountObjects_test : public beast::unit_test::suite BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::escrow), 0)); BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::fee), 0)); BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::hashes), 0)); + BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::nft_page), 0)); BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::offer), 0)); BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::payment_channel), 0)); BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::signer_list), 0)); BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::state), 0)); BEAST_EXPECT(acct_objs_is_size(acct_objs(gw, jss::ticket), 0)); + // gw mints an NFT so we can find it. + uint256 const nftID{token::getNextID(env, gw, 0u, tfTransferable)}; + env(token::mint(gw, 0u), txflags(tfTransferable)); + env.close(); + { + // Find the NFToken page and make sure it's the right one. + Json::Value const resp = acct_objs(gw, jss::nft_page); + BEAST_EXPECT(acct_objs_is_size(resp, 1)); + + auto const& nftPage = resp[jss::result][jss::account_objects][0u]; + BEAST_EXPECT(nftPage[sfNFTokens.jsonName].size() == 1); + BEAST_EXPECT( + nftPage[sfNFTokens.jsonName][0u][sfNFToken.jsonName] + [sfNFTokenID.jsonName] == to_string(nftID)); + } + // Set up a trust line so we can find it. env.trust(USD(1000), alice); env.close(); @@ -510,7 +730,7 @@ class AccountObjects_test : public beast::unit_test::suite auto const& ticket = resp[jss::result][jss::account_objects][0u]; BEAST_EXPECT(ticket[sfAccount.jsonName] == gw.human()); BEAST_EXPECT(ticket[sfLedgerEntryType.jsonName] == jss::Ticket); - BEAST_EXPECT(ticket[sfTicketSequence.jsonName].asUInt() == 12); + BEAST_EXPECT(ticket[sfTicketSequence.jsonName].asUInt() == 13); } { // See how "deletion_blockers_only" handles gw's directory. @@ -523,6 +743,7 @@ class AccountObjects_test : public beast::unit_test::suite std::vector v{ jss::Escrow.c_str(), jss::Check.c_str(), + jss::NFTokenPage.c_str(), jss::RippleState.c_str(), jss::PayChannel.c_str()}; std::sort(v.begin(), v.end()); @@ -583,6 +804,7 @@ class AccountObjects_test : public beast::unit_test::suite { testErrors(); testUnsteppedThenStepped(); + testUnsteppedThenSteppedWithNFTs(); testObjectTypes(); } }; From 9346842eed474793823468db010ce3c6a4f20859 Mon Sep 17 00:00:00 2001 From: ledhed2222 Date: Thu, 6 Apr 2023 14:33:20 -0400 Subject: [PATCH 146/508] Add jss fields used by Clio `nft_info`: (#4320) Add Clio-specific JSS constants to ensure a common vocabulary of keywords in Clio and this project. By providing visibility of the full API keyword namespace, it reduces the likelihood of developers introducing minor variations on names used by Clio, or unknowingly claiming a keyword that Clio has already claimed. This change moves this project slightly away from having only the code necessary for running the core server, but it is a step toward the goal of keeping this server's and Clio's APIs similar. The added JSS constants are annotated to indicate their relevance to Clio. Clio can be found here: https://github.com/XRPLF/clio Signed-off-by: ledhed2222 --- src/ripple/protocol/jss.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 3932349c82a..1cc06806569 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -309,6 +309,7 @@ JSS(invalid_API_version); // out: Many, when a request has an invalid // version JSS(io_latency_ms); // out: NetworkOPs JSS(ip); // in: Connect, out: OverlayImpl +JSS(is_burned); // out: nft_info (clio) JSS(issuer); // in: RipplePathFind, Subscribe, // Unsubscribe, BookOffers // out: STPathSet, STAmount @@ -413,6 +414,7 @@ JSS(nft_offer); // in: LedgerEntry JSS(nft_offer_index); // out nft_buy_offers, nft_sell_offers JSS(nft_page); // in: LedgerEntry JSS(nft_serial); // out: account_nfts +JSS(nft_taxon); // out: nft_info (clio) JSS(no_ripple); // out: AccountLines JSS(no_ripple_peer); // out: AccountLines JSS(node); // out: LedgerEntry @@ -585,6 +587,7 @@ JSS(transaction); // in: Tx JSS(transaction_hash); // out: RCLCxPeerPos, LedgerToJson JSS(transactions); // out: LedgerToJson, // in: AccountTx*, Unsubscribe +JSS(transfer_rate); // out: nft_info (clio) JSS(transitions); // out: NetworkOPs JSS(treenode_cache_size); // out: GetCounts JSS(treenode_track_size); // out: GetCounts From b7f588b789d132cc8dc0acff05b781bee452e827 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Tue, 28 Jun 2022 19:03:05 -0700 Subject: [PATCH 147/508] Introduce support for a slabbed allocator: (#4218) When instantiating a large amount of fixed-sized objects on the heap the overhead that dynamic memory allocation APIs impose will quickly become significant. In some cases, allocating a large amount of memory at once and using a slabbing allocator to carve the large block into fixed-sized units that are used to service requests for memory out will help to reduce memory fragmentation significantly and, potentially, improve overall performance. This commit introduces a new `SlabAllocator<>` class that exposes an API that is _similar_ to the C++ concept of an `Allocator` but it is not meant to be a general-purpose allocator. It should not be used unless profiling and analysis of specific memory allocation patterns indicates that the additional complexity introduced will improve the performance of the system overall, and subsequent profiling proves it. A helper class, `SlabAllocatorSet<>` simplifies handling of variably sized objects that benefit from slab allocations. This commit incorporates improvements suggested by Greg Popovitch (@greg7mdp). Commit 1 of 3 in #4218. --- src/ripple/basics/SlabAllocator.h | 421 ++++++++++++++++++++++++++++++ 1 file changed, 421 insertions(+) create mode 100644 src/ripple/basics/SlabAllocator.h diff --git a/src/ripple/basics/SlabAllocator.h b/src/ripple/basics/SlabAllocator.h new file mode 100644 index 00000000000..c966af318b6 --- /dev/null +++ b/src/ripple/basics/SlabAllocator.h @@ -0,0 +1,421 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright 2022, Nikolaos D. Bougalis + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED +#define RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if BOOST_OS_LINUX +#include +#endif + +namespace ripple { + +template +class SlabAllocator +{ + static_assert( + sizeof(Type) >= sizeof(std::uint8_t*), + "SlabAllocator: the requested object must be larger than a pointer."); + + static_assert(alignof(Type) == 8 || alignof(Type) == 4); + + /** A block of memory that is owned by a slab allocator */ + struct SlabBlock + { + // A mutex to protect the freelist for this block: + std::mutex m_; + + // A linked list of appropriately sized free buffers: + std::uint8_t* l_ = nullptr; + + // The next memory block + SlabBlock* next_; + + // The underlying memory block: + std::uint8_t const* const p_ = nullptr; + + // The extent of the underlying memory block: + std::size_t const size_; + + SlabBlock( + SlabBlock* next, + std::uint8_t* data, + std::size_t size, + std::size_t item) + : next_(next), p_(data), size_(size) + { + // We don't need to grab the mutex here, since we're the only + // ones with access at this moment. + + while (data + item <= p_ + size_) + { + *reinterpret_cast(data) = l_; + l_ = data; + data += item; + } + } + + ~SlabBlock() + { + // Calling this destructor will release the allocated memory but + // will not properly destroy any objects that are constructed in + // the block itself. + } + + SlabBlock(SlabBlock const& other) = delete; + SlabBlock& + operator=(SlabBlock const& other) = delete; + + SlabBlock(SlabBlock&& other) = delete; + SlabBlock& + operator=(SlabBlock&& other) = delete; + + /** Determines whether the given pointer belongs to this allocator */ + bool + own(std::uint8_t const* p) const noexcept + { + return (p >= p_) && (p < p_ + size_); + } + + std::uint8_t* + allocate() noexcept + { + std::uint8_t* ret; + + { + std::lock_guard l(m_); + + ret = l_; + + if (ret) + l_ = *reinterpret_cast(ret); + } + + return ret; + } + + /** Return an item to this allocator's freelist. + + @param ptr The pointer to the chunk of memory being deallocated. + + @note This is a dangerous, private interface; the item being + returned should belong to this allocator. Debug builds + will check and assert if this is not the case. Release + builds will not. + */ + void + deallocate(std::uint8_t* ptr) noexcept + { + assert(own(ptr)); + + std::lock_guard l(m_); + *reinterpret_cast(ptr) = l_; + l_ = ptr; + } + }; + +private: + // A linked list of slabs + std::atomic slabs_ = nullptr; + + // The alignment requirements of the item we're allocating: + std::size_t const itemAlignment_; + + // The size of an item, including the extra bytes requested and + // any padding needed for alignment purposes: + std::size_t const itemSize_; + + // The size of each individual slab: + std::size_t const slabSize_; + +public: + /** Constructs a slab allocator able to allocate objects of a fixed size + + @param count the number of items the slab allocator can allocate; note + that a count of 0 is valid and means that the allocator + is, effectively, disabled. This can be very useful in some + contexts (e.g. when mimimal memory usage is needed) and + allows for graceful failure. + */ + constexpr explicit SlabAllocator( + std::size_t extra, + std::size_t alloc = 0, + std::size_t align = 0) + : itemAlignment_(align ? align : alignof(Type)) + , itemSize_( + boost::alignment::align_up(sizeof(Type) + extra, itemAlignment_)) + , slabSize_(alloc) + { + assert((itemAlignment_ & (itemAlignment_ - 1)) == 0); + } + + SlabAllocator(SlabAllocator const& other) = delete; + SlabAllocator& + operator=(SlabAllocator const& other) = delete; + + SlabAllocator(SlabAllocator&& other) = delete; + SlabAllocator& + operator=(SlabAllocator&& other) = delete; + + ~SlabAllocator() + { + // FIXME: We can't destroy the memory blocks we've allocated, because + // we can't be sure that they are not being used. Cleaning the + // shutdown process up could make this possible. + } + + /** Returns the size of the memory block this allocator returns. */ + constexpr std::size_t + size() const noexcept + { + return itemSize_; + } + + /** Returns a suitably aligned pointer, if one is available. + + @return a pointer to a block of memory from the allocator, or + nullptr if the allocator can't satisfy this request. + */ + std::uint8_t* + allocate() noexcept + { + auto slab = slabs_.load(); + + while (slab != nullptr) + { + if (auto ret = slab->allocate()) + return ret; + + slab = slab->next_; + } + + // No slab can satisfy our request, so we attempt to allocate a new + // one here: + std::size_t size = slabSize_; + + // We want to allocate the memory at a 2 MiB boundary, to make it + // possible to use hugepage mappings on Linux: + auto buf = + boost::alignment::aligned_alloc(megabytes(std::size_t(2)), size); + + // clang-format off + if (!buf) [[unlikely]] + return nullptr; + // clang-format on + +#if BOOST_OS_LINUX + // When allocating large blocks, attempt to leverage Linux's + // transparent hugepage support. It is unclear and difficult + // to accurately determine if doing this impacts performance + // enough to justify using platform-specific tricks. + if (size >= megabytes(std::size_t(4))) + madvise(buf, size, MADV_HUGEPAGE); +#endif + + // We need to carve out a bit of memory for the slab header + // and then align the rest appropriately: + auto slabData = reinterpret_cast( + reinterpret_cast(buf) + sizeof(SlabBlock)); + auto slabSize = size - sizeof(SlabBlock); + + // This operation is essentially guaranteed not to fail but + // let's be careful anyways. + if (!boost::alignment::align( + itemAlignment_, itemSize_, slabData, slabSize)) + { + boost::alignment::aligned_free(buf); + return nullptr; + } + + slab = new (buf) SlabBlock( + slabs_.load(), + reinterpret_cast(slabData), + slabSize, + itemSize_); + + // Link the new slab + while (!slabs_.compare_exchange_weak( + slab->next_, + slab, + std::memory_order_release, + std::memory_order_relaxed)) + { + ; // Nothing to do + } + + return slab->allocate(); + } + + /** Returns the memory block to the allocator. + + @param ptr A pointer to a memory block. + @param size If non-zero, a hint as to the size of the block. + @return true if this memory block belonged to the allocator and has + been released; false otherwise. + */ + bool + deallocate(std::uint8_t* ptr) noexcept + { + assert(ptr); + + for (auto slab = slabs_.load(); slab != nullptr; slab = slab->next_) + { + if (slab->own(ptr)) + { + slab->deallocate(ptr); + return true; + } + } + + return false; + } +}; + +/** A collection of slab allocators of various sizes for a given type. */ +template +class SlabAllocatorSet +{ +private: + // The list of allocators that belong to this set + boost::container::static_vector, 64> allocators_; + + std::size_t maxSize_ = 0; + +public: + class SlabConfig + { + friend class SlabAllocatorSet; + + private: + std::size_t extra; + std::size_t alloc; + std::size_t align; + + public: + constexpr SlabConfig( + std::size_t extra_, + std::size_t alloc_ = 0, + std::size_t align_ = alignof(Type)) + : extra(extra_), alloc(alloc_), align(align_) + { + } + }; + + constexpr SlabAllocatorSet(std::vector cfg) + { + // Ensure that the specified allocators are sorted from smallest to + // largest by size: + std::sort( + std::begin(cfg), + std::end(cfg), + [](SlabConfig const& a, SlabConfig const& b) { + return a.extra < b.extra; + }); + + // We should never have two slabs of the same size + if (std::adjacent_find( + std::begin(cfg), + std::end(cfg), + [](SlabConfig const& a, SlabConfig const& b) { + return a.extra == b.extra; + }) != cfg.end()) + { + throw std::runtime_error( + "SlabAllocatorSet<" + beast::type_name() + + ">: duplicate slab size"); + } + + for (auto const& c : cfg) + { + auto& a = allocators_.emplace_back(c.extra, c.alloc, c.align); + + if (a.size() > maxSize_) + maxSize_ = a.size(); + } + } + + SlabAllocatorSet(SlabAllocatorSet const& other) = delete; + SlabAllocatorSet& + operator=(SlabAllocatorSet const& other) = delete; + + SlabAllocatorSet(SlabAllocatorSet&& other) = delete; + SlabAllocatorSet& + operator=(SlabAllocatorSet&& other) = delete; + + ~SlabAllocatorSet() + { + } + + /** Returns a suitably aligned pointer, if one is available. + + @param extra The number of extra bytes, above and beyond the size of + the object, that should be returned by the allocator. + + @return a pointer to a block of memory, or nullptr if the allocator + can't satisfy this request. + */ + std::uint8_t* + allocate(std::size_t extra) noexcept + { + if (auto const size = sizeof(Type) + extra; size <= maxSize_) + { + for (auto& a : allocators_) + { + if (a.size() >= size) + return a.allocate(); + } + } + + return nullptr; + } + + /** Returns the memory block to the allocator. + + @param ptr A pointer to a memory block. + + @return true if this memory block belonged to one of the allocators + in this set and has been released; false otherwise. + */ + bool + deallocate(std::uint8_t* ptr) noexcept + { + for (auto& a : allocators_) + { + if (a.deallocate(ptr)) + return true; + } + + return false; + } +}; + +} // namespace ripple + +#endif // RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED From c3acbce82dc5db8765cfc9dfeae36954cec0312f Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Fri, 31 Mar 2023 21:42:07 -0700 Subject: [PATCH 148/508] Optimize `SHAMapItem` and leverage new slab allocator: (#4218) The `SHAMapItem` class contains a variable-sized buffer that holds the serialized data associated with a particular item inside a `SHAMap`. Prior to this commit, the buffer for the serialized data was allocated separately. Coupled with the fact that most instances of `SHAMapItem` were wrapped around a `std::shared_ptr` meant that an instantiation might result in up to three separate memory allocations. This commit switches away from `std::shared_ptr` for `SHAMapItem` and uses `boost::intrusive_ptr` instead, allowing the reference count for an instance to live inside the instance itself. Coupled with using a slab-based allocator to optimize memory allocation for the most commonly sized buffers, the net result is significant memory savings. In testing, the reduction in memory usage hovers between 400MB and 650MB. Other scenarios might result in larger savings. In performance testing with NFTs, this commit reduces memory size by about 15% sustained over long duration. Commit 2 of 3 in #4218. --- src/ripple/app/consensus/RCLConsensus.cpp | 11 +- src/ripple/app/consensus/RCLCxTx.h | 11 +- src/ripple/app/ledger/Ledger.cpp | 20 ++- src/ripple/app/ledger/LedgerReplayer.h | 2 +- src/ripple/app/ledger/TransactionMaster.h | 2 +- .../ledger/impl/LedgerReplayMsgHandler.cpp | 26 ++-- src/ripple/app/ledger/impl/LedgerReplayer.cpp | 2 +- .../app/ledger/impl/SkipListAcquire.cpp | 2 +- src/ripple/app/ledger/impl/SkipListAcquire.h | 2 +- .../app/ledger/impl/TransactionMaster.cpp | 2 +- src/ripple/app/misc/AmendmentTable.h | 3 +- src/ripple/app/misc/FeeVoteImpl.cpp | 2 +- src/ripple/app/misc/NegativeUNLVote.cpp | 8 +- src/ripple/consensus/Consensus.h | 2 +- src/ripple/net/ShardDownloader.md | 2 +- src/ripple/nodestore/DeterministicShard.md | 3 +- src/ripple/nodestore/ShardSizeTuning.md | 2 +- src/ripple/nodestore/impl/DecodedBlob.cpp | 2 +- src/ripple/shamap/SHAMap.h | 30 ++-- .../shamap/SHAMapAccountStateLeafNode.h | 4 +- src/ripple/shamap/SHAMapItem.h | 146 ++++++++++++++++-- src/ripple/shamap/SHAMapLeafNode.h | 13 +- src/ripple/shamap/SHAMapTxLeafNode.h | 4 +- src/ripple/shamap/SHAMapTxPlusMetaLeafNode.h | 4 +- src/ripple/shamap/impl/SHAMap.cpp | 28 ++-- src/ripple/shamap/impl/SHAMapDelta.cpp | 38 ++--- src/ripple/shamap/impl/SHAMapLeafNode.cpp | 10 +- src/ripple/shamap/impl/SHAMapSync.cpp | 4 +- src/ripple/shamap/impl/SHAMapTreeNode.cpp | 8 +- src/test/app/LedgerReplay_test.cpp | 4 +- .../consensus/ByzantineFailureSim_test.cpp | 2 +- src/test/csf/Tx.h | 6 + src/test/csf/ledgers.h | 2 +- src/test/shamap/FetchPack_test.cpp | 9 +- src/test/shamap/SHAMapSync_test.cpp | 11 +- src/test/shamap/SHAMap_test.cpp | 44 +++--- 36 files changed, 293 insertions(+), 178 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index 4c80694cbbe..e60c8cf37d3 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -181,7 +181,7 @@ RCLConsensus::Adaptor::share(RCLCxTx const& tx) if (app_.getHashRouter().shouldRelay(tx.id())) { JLOG(j_.debug()) << "Relaying disputed tx " << tx.id(); - auto const slice = tx.tx_.slice(); + auto const slice = tx.tx_->slice(); protocol::TMTransaction msg; msg.set_rawtransaction(slice.data(), slice.size()); msg.set_status(protocol::tsNEW); @@ -325,7 +325,7 @@ RCLConsensus::Adaptor::onClose( tx.first->add(s); initialSet->addItem( SHAMapNodeType::tnTRANSACTION_NM, - SHAMapItem(tx.first->getTransactionID(), s.slice())); + make_shamapitem(tx.first->getTransactionID(), s.slice())); } // Add pseudo-transactions to the set @@ -369,7 +369,8 @@ RCLConsensus::Adaptor::onClose( RCLCensorshipDetector::TxIDSeqVec proposed; initialSet->visitLeaves( - [&proposed, seq](std::shared_ptr const& item) { + [&proposed, + seq](boost::intrusive_ptr const& item) { proposed.emplace_back(item->key(), seq); }); @@ -529,7 +530,7 @@ RCLConsensus::Adaptor::doAccept( std::vector accepted; result.txns.map_->visitLeaves( - [&accepted](std::shared_ptr const& item) { + [&accepted](boost::intrusive_ptr const& item) { accepted.push_back(item->key()); }); @@ -604,7 +605,7 @@ RCLConsensus::Adaptor::doAccept( << "Test applying disputed transaction that did" << " not get in " << dispute.tx().id(); - SerialIter sit(dispute.tx().tx_.slice()); + SerialIter sit(dispute.tx().tx_->slice()); auto txn = std::make_shared(sit); // Disputed pseudo-transactions that were not accepted diff --git a/src/ripple/app/consensus/RCLCxTx.h b/src/ripple/app/consensus/RCLCxTx.h index f1c34238c26..c6abfdfee94 100644 --- a/src/ripple/app/consensus/RCLCxTx.h +++ b/src/ripple/app/consensus/RCLCxTx.h @@ -42,7 +42,7 @@ class RCLCxTx @param txn The transaction to wrap */ - RCLCxTx(SHAMapItem const& txn) : tx_{txn} + RCLCxTx(boost::intrusive_ptr txn) : tx_(std::move(txn)) { } @@ -50,11 +50,11 @@ class RCLCxTx ID const& id() const { - return tx_.key(); + return tx_->key(); } //! The SHAMapItem that represents the transaction. - SHAMapItem const tx_; + boost::intrusive_ptr tx_; }; /** Represents a set of transactions in RCLConsensus. @@ -90,8 +90,7 @@ class RCLTxSet bool insert(Tx const& t) { - return map_->addItem( - SHAMapNodeType::tnTRANSACTION_NM, SHAMapItem{t.tx_}); + return map_->addItem(SHAMapNodeType::tnTRANSACTION_NM, t.tx_); } /** Remove a transaction from the set. @@ -145,7 +144,7 @@ class RCLTxSet code uses the shared_ptr semantics to know whether the find was successful and properly creates a Tx as needed. */ - std::shared_ptr const& + boost::intrusive_ptr const& find(Tx::ID const& entry) const { return map_->peekItem(entry); diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index d96bf5939e6..fbe95e9c9d3 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -119,9 +119,8 @@ class Ledger::sles_iter_impl : public sles_type::iter_base sles_type::value_type dereference() const override { - auto const item = *iter_; - SerialIter sit(item.slice()); - return std::make_shared(sit, item.key()); + SerialIter sit(iter_->slice()); + return std::make_shared(sit, iter_->key()); } }; @@ -168,7 +167,7 @@ class Ledger::txs_iter_impl : public txs_type::iter_base txs_type::value_type dereference() const override { - auto const item = *iter_; + auto const& item = *iter_; if (metadata_) return deserializeTxPlusMeta(item); return {deserializeTx(item), nullptr}; @@ -404,8 +403,8 @@ bool Ledger::addSLE(SLE const& sle) { auto const s = sle.getSerializer(); - SHAMapItem item(sle.key(), s.slice()); - return stateMap_->addItem(SHAMapNodeType::tnACCOUNT_STATE, std::move(item)); + return stateMap_->addItem( + SHAMapNodeType::tnACCOUNT_STATE, make_shamapitem(sle.key(), s.slice())); } //------------------------------------------------------------------------------ @@ -565,7 +564,7 @@ Ledger::rawInsert(std::shared_ptr const& sle) sle->add(ss); if (!stateMap_->addGiveItem( SHAMapNodeType::tnACCOUNT_STATE, - std::make_shared(sle->key(), ss.slice()))) + make_shamapitem(sle->key(), ss.slice()))) LogicError("Ledger::rawInsert: key already exists"); } @@ -576,7 +575,7 @@ Ledger::rawReplace(std::shared_ptr const& sle) sle->add(ss); if (!stateMap_->updateGiveItem( SHAMapNodeType::tnACCOUNT_STATE, - std::make_shared(sle->key(), ss.slice()))) + make_shamapitem(sle->key(), ss.slice()))) LogicError("Ledger::rawReplace: key not found"); } @@ -593,8 +592,7 @@ Ledger::rawTxInsert( s.addVL(txn->peekData()); s.addVL(metaData->peekData()); if (!txMap().addGiveItem( - SHAMapNodeType::tnTRANSACTION_MD, - std::make_shared(key, s.slice()))) + SHAMapNodeType::tnTRANSACTION_MD, make_shamapitem(key, s.slice()))) LogicError("duplicate_tx: " + to_string(key)); } @@ -610,7 +608,7 @@ Ledger::rawTxInsertWithHash( Serializer s(txn->getDataLength() + metaData->getDataLength() + 16); s.addVL(txn->peekData()); s.addVL(metaData->peekData()); - auto item = std::make_shared(key, s.slice()); + auto item = make_shamapitem(key, s.slice()); auto hash = sha512Half(HashPrefix::txNode, item->slice(), item->key()); if (!txMap().addGiveItem(SHAMapNodeType::tnTRANSACTION_MD, std::move(item))) LogicError("duplicate_tx: " + to_string(key)); diff --git a/src/ripple/app/ledger/LedgerReplayer.h b/src/ripple/app/ledger/LedgerReplayer.h index e9e94548b74..6866250485d 100644 --- a/src/ripple/app/ledger/LedgerReplayer.h +++ b/src/ripple/app/ledger/LedgerReplayer.h @@ -105,7 +105,7 @@ class LedgerReplayer final void gotSkipList( LedgerInfo const& info, - std::shared_ptr const& data); + boost::intrusive_ptr const& data); /** * Process a ledger delta (extracted from a TMReplayDeltaResponse message) diff --git a/src/ripple/app/ledger/TransactionMaster.h b/src/ripple/app/ledger/TransactionMaster.h index a902fadf9d7..9bebac37d73 100644 --- a/src/ripple/app/ledger/TransactionMaster.h +++ b/src/ripple/app/ledger/TransactionMaster.h @@ -68,7 +68,7 @@ class TransactionMaster std::shared_ptr fetch( - std::shared_ptr const& item, + boost::intrusive_ptr const& item, SHAMapNodeType type, std::uint32_t uCommitLedger); diff --git a/src/ripple/app/ledger/impl/LedgerReplayMsgHandler.cpp b/src/ripple/app/ledger/impl/LedgerReplayMsgHandler.cpp index 780b466497c..57c2fd08344 100644 --- a/src/ripple/app/ledger/impl/LedgerReplayMsgHandler.cpp +++ b/src/ripple/app/ledger/impl/LedgerReplayMsgHandler.cpp @@ -163,15 +163,15 @@ LedgerReplayMsgHandler::processProofPathResponse( JLOG(journal_.debug()) << "Bad message: Cannot deserialize"; return false; } - auto item = static_cast(node.get())->peekItem(); - if (!item) + + if (auto item = static_cast(node.get())->peekItem()) { - JLOG(journal_.debug()) << "Bad message: Cannot get ShaMapItem"; - return false; + replayer_.gotSkipList(info, item); + return true; } - replayer_.gotSkipList(info, item); - return true; + JLOG(journal_.debug()) << "Bad message: Cannot get ShaMapItem"; + return false; } protocol::TMReplayDeltaResponse @@ -206,9 +206,10 @@ LedgerReplayMsgHandler::processReplayDeltaRequest( reply.set_ledgerheader(nData.getDataPtr(), nData.getLength()); // pack transactions auto const& txMap = ledger->txMap(); - txMap.visitLeaves([&](std::shared_ptr const& txNode) { - reply.add_transaction(txNode->data(), txNode->size()); - }); + txMap.visitLeaves( + [&](boost::intrusive_ptr const& txNode) { + reply.add_transaction(txNode->data(), txNode->size()); + }); JLOG(journal_.debug()) << "getReplayDelta for ledger " << ledgerHash << " txMap hash " << txMap.getHash().as_uint256(); @@ -264,10 +265,9 @@ LedgerReplayMsgHandler::processReplayDeltaResponse( STObject meta(metaSit, sfMetadata); orderedTxns.emplace(meta[sfTransactionIndex], std::move(tx)); - auto item = - std::make_shared(tid, shaMapItemData.slice()); - if (!item || - !txMap.addGiveItem(SHAMapNodeType::tnTRANSACTION_MD, item)) + if (!txMap.addGiveItem( + SHAMapNodeType::tnTRANSACTION_MD, + make_shamapitem(tid, shaMapItemData.slice()))) { JLOG(journal_.debug()) << "Bad message: Cannot deserialize"; return false; diff --git a/src/ripple/app/ledger/impl/LedgerReplayer.cpp b/src/ripple/app/ledger/impl/LedgerReplayer.cpp index c7aa5d9ca07..903f72dd117 100644 --- a/src/ripple/app/ledger/impl/LedgerReplayer.cpp +++ b/src/ripple/app/ledger/impl/LedgerReplayer.cpp @@ -172,7 +172,7 @@ LedgerReplayer::createDeltas(std::shared_ptr task) void LedgerReplayer::gotSkipList( LedgerInfo const& info, - std::shared_ptr const& item) + boost::intrusive_ptr const& item) { std::shared_ptr skipList = {}; { diff --git a/src/ripple/app/ledger/impl/SkipListAcquire.cpp b/src/ripple/app/ledger/impl/SkipListAcquire.cpp index 00340d24bc2..aa9b8564eb3 100644 --- a/src/ripple/app/ledger/impl/SkipListAcquire.cpp +++ b/src/ripple/app/ledger/impl/SkipListAcquire.cpp @@ -137,7 +137,7 @@ SkipListAcquire::pmDowncast() void SkipListAcquire::processData( std::uint32_t ledgerSeq, - std::shared_ptr const& item) + boost::intrusive_ptr const& item) { assert(ledgerSeq != 0 && item); ScopedLockType sl(mtx_); diff --git a/src/ripple/app/ledger/impl/SkipListAcquire.h b/src/ripple/app/ledger/impl/SkipListAcquire.h index 3901e108004..df24d68312c 100644 --- a/src/ripple/app/ledger/impl/SkipListAcquire.h +++ b/src/ripple/app/ledger/impl/SkipListAcquire.h @@ -96,7 +96,7 @@ class SkipListAcquire final void processData( std::uint32_t ledgerSeq, - std::shared_ptr const& item); + boost::intrusive_ptr const& item); /** * Add a callback that will be called when the skipList is ready or failed. diff --git a/src/ripple/app/ledger/impl/TransactionMaster.cpp b/src/ripple/app/ledger/impl/TransactionMaster.cpp index 861a6503e36..c4205887740 100644 --- a/src/ripple/app/ledger/impl/TransactionMaster.cpp +++ b/src/ripple/app/ledger/impl/TransactionMaster.cpp @@ -107,7 +107,7 @@ TransactionMaster::fetch( std::shared_ptr TransactionMaster::fetch( - std::shared_ptr const& item, + boost::intrusive_ptr const& item, SHAMapNodeType type, std::uint32_t uCommitLedger) { diff --git a/src/ripple/app/misc/AmendmentTable.h b/src/ripple/app/misc/AmendmentTable.h index 0a5f6a011ad..10396d8591f 100644 --- a/src/ripple/app/misc/AmendmentTable.h +++ b/src/ripple/app/misc/AmendmentTable.h @@ -172,8 +172,7 @@ class AmendmentTable initialPosition->addGiveItem( SHAMapNodeType::tnTRANSACTION_NM, - std::make_shared( - amendTx.getTransactionID(), s.slice())); + make_shamapitem(amendTx.getTransactionID(), s.slice())); } } }; diff --git a/src/ripple/app/misc/FeeVoteImpl.cpp b/src/ripple/app/misc/FeeVoteImpl.cpp index 048f5a3fc61..0d60dc6b78e 100644 --- a/src/ripple/app/misc/FeeVoteImpl.cpp +++ b/src/ripple/app/misc/FeeVoteImpl.cpp @@ -326,7 +326,7 @@ FeeVoteImpl::doVoting( if (!initialPosition->addGiveItem( SHAMapNodeType::tnTRANSACTION_NM, - std::make_shared(txID, s.slice()))) + make_shamapitem(txID, s.slice()))) { JLOG(journal_.warn()) << "Ledger already had fee change"; } diff --git a/src/ripple/app/misc/NegativeUNLVote.cpp b/src/ripple/app/misc/NegativeUNLVote.cpp index 5c133403b46..aa9db60c33d 100644 --- a/src/ripple/app/misc/NegativeUNLVote.cpp +++ b/src/ripple/app/misc/NegativeUNLVote.cpp @@ -20,6 +20,7 @@ #include #include #include +#include namespace ripple { @@ -115,12 +116,11 @@ NegativeUNLVote::addTx( obj.setFieldVL(sfUNLModifyValidator, vp.slice()); }); - uint256 txID = negUnlTx.getTransactionID(); Serializer s; negUnlTx.add(s); if (!initialSet->addGiveItem( SHAMapNodeType::tnTRANSACTION_NM, - std::make_shared(txID, s.slice()))) + make_shamapitem(negUnlTx.getTransactionID(), s.slice()))) { JLOG(j_.warn()) << "N-UNL: ledger seq=" << seq << ", add ttUNL_MODIFY tx failed"; @@ -128,8 +128,8 @@ NegativeUNLVote::addTx( else { JLOG(j_.debug()) << "N-UNL: ledger seq=" << seq - << ", add a ttUNL_MODIFY Tx with txID: " << txID - << ", the validator to " + << ", add a ttUNL_MODIFY Tx with txID: " + << negUnlTx.getTransactionID() << ", the validator to " << (modify == ToDisable ? "disable: " : "re-enable: ") << vp; } diff --git a/src/ripple/consensus/Consensus.h b/src/ripple/consensus/Consensus.h index 1d02e0f13b8..71ceed71431 100644 --- a/src/ripple/consensus/Consensus.h +++ b/src/ripple/consensus/Consensus.h @@ -1643,7 +1643,7 @@ Consensus::createDisputes(TxSet_t const& o) (inThisSet && result_->txns.find(txId) && !o.find(txId)) || (!inThisSet && !result_->txns.find(txId) && o.find(txId))); - Tx_t tx = inThisSet ? *result_->txns.find(txId) : *o.find(txId); + Tx_t tx = inThisSet ? result_->txns.find(txId) : o.find(txId); auto txID = tx.id(); if (result_->disputes.find(txID) != result_->disputes.end()) diff --git a/src/ripple/net/ShardDownloader.md b/src/ripple/net/ShardDownloader.md index 9d8a33ae40e..d961df61c65 100644 --- a/src/ripple/net/ShardDownloader.md +++ b/src/ripple/net/ShardDownloader.md @@ -195,7 +195,7 @@ three database entries upon completion. Since downloads execute serially by design, the entries in this table always correspond to the contents of a single file. -| Bytes | Size | Part | +| Bytes | size | Part | |:------:|:----------:|:----:| | 0x... | 2147483647 | 0 | | 0x... | 2147483647 | 1 | diff --git a/src/ripple/nodestore/DeterministicShard.md b/src/ripple/nodestore/DeterministicShard.md index aff733a4fa7..70d0584567b 100644 --- a/src/ripple/nodestore/DeterministicShard.md +++ b/src/ripple/nodestore/DeterministicShard.md @@ -22,7 +22,7 @@ uint64 Appnum Application defined constant uint16 KeySize Key size in bytes uint64 Salt A random seed uint64 Pepper The salt hashed -uint16 BlockSize Size of a file block in bytes +uint16 BlockSize size of a file block in bytes uint16 LoadFactor Target fraction in 65536ths uint8[56] Reserved Zeroes uint8[] Reserved Zero-pad to block size @@ -160,4 +160,3 @@ Iteration 0: RIPEMD160[nudb.dat] = FAE6AE84C15968B0419FDFC014931EA12A396C71 Iteration 1: RIPEMD160[nudb.key] = F96BF2722AB2EE009FFAE4A36AAFC4F220E21951 Iteration 1: RIPEMD160[nudb.dat] = FAE6AE84C15968B0419FDFC014931EA12A396C71 ``` - diff --git a/src/ripple/nodestore/ShardSizeTuning.md b/src/ripple/nodestore/ShardSizeTuning.md index 3368fb69a7c..bded73c43c5 100644 --- a/src/ripple/nodestore/ShardSizeTuning.md +++ b/src/ripple/nodestore/ShardSizeTuning.md @@ -1,4 +1,4 @@ -# Shard Size Tuning +# Shard size Tuning The purpose of this document is to compare the sizes of shards containing varying amounts of ledgers. diff --git a/src/ripple/nodestore/impl/DecodedBlob.cpp b/src/ripple/nodestore/impl/DecodedBlob.cpp index 0c5a5de20c6..debb907b73b 100644 --- a/src/ripple/nodestore/impl/DecodedBlob.cpp +++ b/src/ripple/nodestore/impl/DecodedBlob.cpp @@ -38,7 +38,7 @@ DecodedBlob::DecodedBlob(void const* key, void const* value, int valueBytes) m_success = false; m_key = key; - // VFALCO NOTE Ledger indexes should have started at 1 + // VFALCO NOTE Ledger indexes should hav e started at 1 m_objectType = hotUNKNOWN; m_objectData = nullptr; m_dataBytes = std::max(0, valueBytes - 9); diff --git a/src/ripple/shamap/SHAMap.h b/src/ripple/shamap/SHAMap.h index 2f0a677f972..2170cf020e9 100644 --- a/src/ripple/shamap/SHAMap.h +++ b/src/ripple/shamap/SHAMap.h @@ -120,8 +120,8 @@ class SHAMap static inline constexpr unsigned int leafDepth = 64; using DeltaItem = std::pair< - std::shared_ptr, - std::shared_ptr>; + boost::intrusive_ptr, + boost::intrusive_ptr>; using Delta = std::map; SHAMap(SHAMap const&) = delete; @@ -190,23 +190,27 @@ class SHAMap delItem(uint256 const& id); bool - addItem(SHAMapNodeType type, SHAMapItem&& i); + addItem(SHAMapNodeType type, boost::intrusive_ptr item); SHAMapHash getHash() const; // save a copy if you have a temporary anyway bool - updateGiveItem(SHAMapNodeType type, std::shared_ptr); + updateGiveItem( + SHAMapNodeType type, + boost::intrusive_ptr item); bool - addGiveItem(SHAMapNodeType type, std::shared_ptr item); + addGiveItem( + SHAMapNodeType type, + boost::intrusive_ptr item); // Save a copy if you need to extend the life // of the SHAMapItem beyond this SHAMap - std::shared_ptr const& + boost::intrusive_ptr const& peekItem(uint256 const& id) const; - std::shared_ptr const& + boost::intrusive_ptr const& peekItem(uint256 const& id, SHAMapHash& hash) const; // traverse functions @@ -253,8 +257,8 @@ class SHAMap */ void visitLeaves( - std::function const&)> const&) - const; + std::function< + void(boost::intrusive_ptr const&)> const&) const; // comparison/sync functions @@ -361,8 +365,8 @@ class SHAMap using SharedPtrNodeStack = std::stack, SHAMapNodeID>>; using DeltaRef = std::pair< - std::shared_ptr const&, - std::shared_ptr const&>; + boost::intrusive_ptr, + boost::intrusive_ptr>; // tree node cache operations std::shared_ptr @@ -475,7 +479,7 @@ class SHAMap descendNoStore(std::shared_ptr const&, int branch) const; /** If there is only one leaf below this node, get its contents */ - std::shared_ptr const& + boost::intrusive_ptr const& onlyBelow(SHAMapTreeNode*) const; bool @@ -490,7 +494,7 @@ class SHAMap bool walkBranch( SHAMapTreeNode* node, - std::shared_ptr const& otherMapItem, + boost::intrusive_ptr const& otherMapItem, bool isFirstMap, Delta& differences, int& maxCount) const; diff --git a/src/ripple/shamap/SHAMapAccountStateLeafNode.h b/src/ripple/shamap/SHAMapAccountStateLeafNode.h index 8daaf24953e..45f0c508078 100644 --- a/src/ripple/shamap/SHAMapAccountStateLeafNode.h +++ b/src/ripple/shamap/SHAMapAccountStateLeafNode.h @@ -36,7 +36,7 @@ class SHAMapAccountStateLeafNode final { public: SHAMapAccountStateLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid) : SHAMapLeafNode(std::move(item), cowid) { @@ -44,7 +44,7 @@ class SHAMapAccountStateLeafNode final } SHAMapAccountStateLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid, SHAMapHash const& hash) : SHAMapLeafNode(std::move(item), cowid, hash) diff --git a/src/ripple/shamap/SHAMapItem.h b/src/ripple/shamap/SHAMapItem.h index 24e6c08c2d5..160cc3cb49d 100644 --- a/src/ripple/shamap/SHAMapItem.h +++ b/src/ripple/shamap/SHAMapItem.h @@ -20,52 +20,170 @@ #ifndef RIPPLE_SHAMAP_SHAMAPITEM_H_INCLUDED #define RIPPLE_SHAMAP_SHAMAPITEM_H_INCLUDED -#include +#include #include +#include #include #include +#include +#include namespace ripple { // an item stored in a SHAMap class SHAMapItem : public CountedObject { + // These are used to support boost::intrusive_ptr reference counting + // These functions are used internally by boost::intrusive_ptr to handle + // lifetime management. + friend void + intrusive_ptr_add_ref(SHAMapItem const* x); + + friend void + intrusive_ptr_release(SHAMapItem const* x); + + // This is the interface for creating new instances of this class. + friend boost::intrusive_ptr + make_shamapitem(uint256 const& tag, Slice data); + private: - uint256 tag_; - Buffer data_; + uint256 const tag_; -public: - SHAMapItem() = delete; + // We use std::uint32_t to minimize the size; there's no SHAMapItem whose + // size exceeds 4GB and there won't ever be (famous last words?), so this + // is safe. + std::uint32_t const size_; + + // This is the reference count used to support boost::intrusive_ptr + mutable std::atomic refcount_ = 1; - SHAMapItem(uint256 const& tag, Slice data) : tag_(tag), data_(data) + // Because of the unusual way in which SHAMapItem objects are constructed + // the only way to properly create one is to first allocate enough memory + // so we limit this constructor to codepaths that do this right and limit + // arbitrary construction. + SHAMapItem(uint256 const& tag, Slice data) + : tag_(tag), size_(static_cast(data.size())) { + std::memcpy( + reinterpret_cast(this) + sizeof(*this), + data.data(), + data.size()); } +public: + SHAMapItem() = delete; + + SHAMapItem(SHAMapItem const& other) = delete; + + SHAMapItem& + operator=(SHAMapItem const& other) = delete; + + SHAMapItem(SHAMapItem&& other) = delete; + + SHAMapItem& + operator=(SHAMapItem&&) = delete; + uint256 const& key() const { return tag_; } - Slice - slice() const - { - return static_cast(data_); - } - std::size_t size() const { - return data_.size(); + return size_; } void const* data() const { - return data_.data(); + return reinterpret_cast(this) + sizeof(*this); + } + + Slice + slice() const + { + return {data(), size()}; } }; +namespace detail { + +// clang-format off +// The slab cutoffs and the number of megabytes per allocation are customized +// based on the number of objects of each size we expect to need at any point +// in time and with an eye to minimize the number of slack bytes in a block. +inline SlabAllocatorSet slabber({ + { 128, megabytes(std::size_t(60)) }, + { 192, megabytes(std::size_t(46)) }, + { 272, megabytes(std::size_t(60)) }, + { 384, megabytes(std::size_t(56)) }, + { 564, megabytes(std::size_t(40)) }, + { 772, megabytes(std::size_t(46)) }, + { 1052, megabytes(std::size_t(60)) }, +}); +// clang-format on + +} // namespace detail + +inline void +intrusive_ptr_add_ref(SHAMapItem const* x) +{ + // This can only happen if someone releases the last reference to the + // item while we were trying to increment the refcount. + if (x->refcount_++ == 0) + LogicError("SHAMapItem: the reference count is 0!"); +} + +inline void +intrusive_ptr_release(SHAMapItem const* x) +{ + if (--x->refcount_ == 0) + { + auto p = reinterpret_cast(x); + + // The SHAMapItem constuctor isn't trivial (because the destructor + // for CountedObject isn't) so we can't avoid calling it here, but + // plan for a future where we might not need to. + if constexpr (!std::is_trivially_destructible_v) + std::destroy_at(x); + + // If the slabber doens't claim this pointer, it was allocated + // manually, so we free it manually. + if (!detail::slabber.deallocate(const_cast(p))) + delete[] p; + } +} + +inline boost::intrusive_ptr +make_shamapitem(uint256 const& tag, Slice data) +{ + assert(data.size() <= megabytes(16)); + + std::uint8_t* raw = detail::slabber.allocate(data.size()); + + // If we can't grab memory from the slab allocators, we fall back to + // the standard library and try to grab a precisely-sized memory block: + if (raw == nullptr) + raw = new std::uint8_t[sizeof(SHAMapItem) + data.size()]; + + // We do not increment the reference count here on purpose: the + // constructor of SHAMapItem explicitly sets it to 1. We use the fact + // that the refcount can never be zero before incrementing as an + // invariant. + return {new (raw) SHAMapItem{tag, data}, false}; +} + +static_assert(alignof(SHAMapItem) != 40); +static_assert(alignof(SHAMapItem) == 8 || alignof(SHAMapItem) == 4); + +inline boost::intrusive_ptr +make_shamapitem(SHAMapItem const& other) +{ + return make_shamapitem(other.key(), other.slice()); +} + } // namespace ripple #endif diff --git a/src/ripple/shamap/SHAMapLeafNode.h b/src/ripple/shamap/SHAMapLeafNode.h index 776aca76db8..f24d7053cbe 100644 --- a/src/ripple/shamap/SHAMapLeafNode.h +++ b/src/ripple/shamap/SHAMapLeafNode.h @@ -32,11 +32,14 @@ namespace ripple { class SHAMapLeafNode : public SHAMapTreeNode { protected: - std::shared_ptr item_; + boost::intrusive_ptr item_; - SHAMapLeafNode(std::shared_ptr item, std::uint32_t cowid); SHAMapLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, + std::uint32_t cowid); + + SHAMapLeafNode( + boost::intrusive_ptr item, std::uint32_t cowid, SHAMapHash const& hash); @@ -61,7 +64,7 @@ class SHAMapLeafNode : public SHAMapTreeNode invariants(bool is_root = false) const final override; public: - std::shared_ptr const& + boost::intrusive_ptr const& peekItem() const; /** Set the item that this node points to and update the node's hash. @@ -71,7 +74,7 @@ class SHAMapLeafNode : public SHAMapTreeNode hash was unchanged); true otherwise. */ bool - setItem(std::shared_ptr i); + setItem(boost::intrusive_ptr i); std::string getString(SHAMapNodeID const&) const final override; diff --git a/src/ripple/shamap/SHAMapTxLeafNode.h b/src/ripple/shamap/SHAMapTxLeafNode.h index d9a2e01e9a8..e794a1a8f32 100644 --- a/src/ripple/shamap/SHAMapTxLeafNode.h +++ b/src/ripple/shamap/SHAMapTxLeafNode.h @@ -35,7 +35,7 @@ class SHAMapTxLeafNode final : public SHAMapLeafNode, { public: SHAMapTxLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid) : SHAMapLeafNode(std::move(item), cowid) { @@ -43,7 +43,7 @@ class SHAMapTxLeafNode final : public SHAMapLeafNode, } SHAMapTxLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid, SHAMapHash const& hash) : SHAMapLeafNode(std::move(item), cowid, hash) diff --git a/src/ripple/shamap/SHAMapTxPlusMetaLeafNode.h b/src/ripple/shamap/SHAMapTxPlusMetaLeafNode.h index be2c2390393..ff32c64e09b 100644 --- a/src/ripple/shamap/SHAMapTxPlusMetaLeafNode.h +++ b/src/ripple/shamap/SHAMapTxPlusMetaLeafNode.h @@ -36,7 +36,7 @@ class SHAMapTxPlusMetaLeafNode final { public: SHAMapTxPlusMetaLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid) : SHAMapLeafNode(std::move(item), cowid) { @@ -44,7 +44,7 @@ class SHAMapTxPlusMetaLeafNode final } SHAMapTxPlusMetaLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid, SHAMapHash const& hash) : SHAMapLeafNode(std::move(item), cowid, hash) diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index ce031003c23..51a11680c93 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -30,7 +30,7 @@ namespace ripple { [[nodiscard]] std::shared_ptr makeTypedLeaf( SHAMapNodeType type, - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t owner) { if (type == SHAMapNodeType::tnTRANSACTION_NM) @@ -512,9 +512,9 @@ SHAMap::firstBelow( return belowHelper(node, stack, branch, {init, cmp, incr}); } -static const std::shared_ptr no_item; +static const boost::intrusive_ptr no_item; -std::shared_ptr const& +boost::intrusive_ptr const& SHAMap::onlyBelow(SHAMapTreeNode* node) const { // If there is only one item below this node, return it @@ -593,7 +593,7 @@ SHAMap::peekNextItem(uint256 const& id, SharedPtrNodeStack& stack) const return nullptr; } -std::shared_ptr const& +boost::intrusive_ptr const& SHAMap::peekItem(uint256 const& id) const { SHAMapLeafNode* leaf = findKey(id); @@ -604,7 +604,7 @@ SHAMap::peekItem(uint256 const& id) const return leaf->peekItem(); } -std::shared_ptr const& +boost::intrusive_ptr const& SHAMap::peekItem(uint256 const& id, SHAMapHash& hash) const { SHAMapLeafNode* leaf = findKey(id); @@ -776,7 +776,9 @@ SHAMap::delItem(uint256 const& id) } bool -SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr item) +SHAMap::addGiveItem( + SHAMapNodeType type, + boost::intrusive_ptr item) { assert(state_ != SHAMapState::Immutable); assert(type != SHAMapNodeType::tnINNER); @@ -813,7 +815,7 @@ SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr item) // this is a leaf node that has to be made an inner node holding two // items auto leaf = std::static_pointer_cast(node); - std::shared_ptr otherItem = leaf->peekItem(); + auto otherItem = leaf->peekItem(); assert(otherItem && (tag != otherItem->key())); node = std::make_shared(node->cowid()); @@ -844,9 +846,11 @@ SHAMap::addGiveItem(SHAMapNodeType type, std::shared_ptr item) } bool -SHAMap::addItem(SHAMapNodeType type, SHAMapItem&& i) +SHAMap::addItem( + SHAMapNodeType type, + boost::intrusive_ptr item) { - return addGiveItem(type, std::make_shared(std::move(i))); + return addGiveItem(type, std::move(item)); } SHAMapHash @@ -864,7 +868,7 @@ SHAMap::getHash() const bool SHAMap::updateGiveItem( SHAMapNodeType type, - std::shared_ptr item) + boost::intrusive_ptr item) { // can't change the tag but can change the hash uint256 tag = item->key(); @@ -889,13 +893,13 @@ SHAMap::updateGiveItem( if (node->getType() != type) { - JLOG(journal_.fatal()) << "SHAMap::setItem: cross-type change!"; + JLOG(journal_.fatal()) << "SHAMap::updateGiveItem: cross-type change!"; return false; } node = unshareNode(std::move(node), nodeID); - if (node->setItem(std::move(item))) + if (node->setItem(item)) dirtyUp(stack, tag, node); return true; diff --git a/src/ripple/shamap/impl/SHAMapDelta.cpp b/src/ripple/shamap/impl/SHAMapDelta.cpp index 896678ef930..ab9e329eb30 100644 --- a/src/ripple/shamap/impl/SHAMapDelta.cpp +++ b/src/ripple/shamap/impl/SHAMapDelta.cpp @@ -37,7 +37,7 @@ namespace ripple { bool SHAMap::walkBranch( SHAMapTreeNode* node, - std::shared_ptr const& otherMapItem, + boost::intrusive_ptr const& otherMapItem, bool isFirstMap, Delta& differences, int& maxCount) const @@ -71,13 +71,11 @@ SHAMap::walkBranch( { // unmatched if (isFirstMap) - differences.insert(std::make_pair( - item->key(), - DeltaRef(item, std::shared_ptr()))); + differences.insert( + std::make_pair(item->key(), DeltaRef(item, nullptr))); else - differences.insert(std::make_pair( - item->key(), - DeltaRef(std::shared_ptr(), item))); + differences.insert( + std::make_pair(item->key(), DeltaRef(nullptr, item))); if (--maxCount <= 0) return false; @@ -110,12 +108,10 @@ SHAMap::walkBranch( // otherMapItem was unmatched, must add if (isFirstMap) // this is first map, so other item is from second differences.insert(std::make_pair( - otherMapItem->key(), - DeltaRef(std::shared_ptr(), otherMapItem))); + otherMapItem->key(), DeltaRef(nullptr, otherMapItem))); else differences.insert(std::make_pair( - otherMapItem->key(), - DeltaRef(otherMapItem, std::shared_ptr()))); + otherMapItem->key(), DeltaRef(otherMapItem, nullptr))); if (--maxCount <= 0) return false; @@ -173,17 +169,13 @@ SHAMap::compare(SHAMap const& otherMap, Delta& differences, int maxCount) const { differences.insert(std::make_pair( ours->peekItem()->key(), - DeltaRef( - ours->peekItem(), - std::shared_ptr()))); + DeltaRef(ours->peekItem(), nullptr))); if (--maxCount <= 0) return false; differences.insert(std::make_pair( other->peekItem()->key(), - DeltaRef( - std::shared_ptr(), - other->peekItem()))); + DeltaRef(nullptr, other->peekItem()))); if (--maxCount <= 0) return false; } @@ -216,11 +208,7 @@ SHAMap::compare(SHAMap const& otherMap, Delta& differences, int maxCount) const // We have a branch, the other tree does not SHAMapTreeNode* iNode = descendThrow(ours, i); if (!walkBranch( - iNode, - std::shared_ptr(), - true, - differences, - maxCount)) + iNode, nullptr, true, differences, maxCount)) return false; } else if (ours->isEmptyBranch(i)) @@ -228,11 +216,7 @@ SHAMap::compare(SHAMap const& otherMap, Delta& differences, int maxCount) const // The other tree has a branch, we do not SHAMapTreeNode* iNode = otherMap.descendThrow(other, i); if (!otherMap.walkBranch( - iNode, - std::shared_ptr(), - false, - differences, - maxCount)) + iNode, nullptr, false, differences, maxCount)) return false; } else // The two trees have different non-empty branches diff --git a/src/ripple/shamap/impl/SHAMapLeafNode.cpp b/src/ripple/shamap/impl/SHAMapLeafNode.cpp index 1f1f3c7ff38..8f634cfad88 100644 --- a/src/ripple/shamap/impl/SHAMapLeafNode.cpp +++ b/src/ripple/shamap/impl/SHAMapLeafNode.cpp @@ -24,7 +24,7 @@ namespace ripple { SHAMapLeafNode::SHAMapLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid) : SHAMapTreeNode(cowid), item_(std::move(item)) { @@ -32,7 +32,7 @@ SHAMapLeafNode::SHAMapLeafNode( } SHAMapLeafNode::SHAMapLeafNode( - std::shared_ptr item, + boost::intrusive_ptr item, std::uint32_t cowid, SHAMapHash const& hash) : SHAMapTreeNode(cowid, hash), item_(std::move(item)) @@ -40,17 +40,17 @@ SHAMapLeafNode::SHAMapLeafNode( assert(item_->size() >= 12); } -std::shared_ptr const& +boost::intrusive_ptr const& SHAMapLeafNode::peekItem() const { return item_; } bool -SHAMapLeafNode::setItem(std::shared_ptr i) +SHAMapLeafNode::setItem(boost::intrusive_ptr item) { assert(cowid_ != 0); - item_ = std::move(i); + item_ = std::move(item); auto const oldHash = hash_; diff --git a/src/ripple/shamap/impl/SHAMapSync.cpp b/src/ripple/shamap/impl/SHAMapSync.cpp index 1bada85133d..3f24047eb7a 100644 --- a/src/ripple/shamap/impl/SHAMapSync.cpp +++ b/src/ripple/shamap/impl/SHAMapSync.cpp @@ -25,8 +25,8 @@ namespace ripple { void SHAMap::visitLeaves( - std::function const& item)> const& - leafFunction) const + std::function const& + item)> const& leafFunction) const { visitNodes([&leafFunction](SHAMapTreeNode& node) { if (!node.isInner()) diff --git a/src/ripple/shamap/impl/SHAMapTreeNode.cpp b/src/ripple/shamap/impl/SHAMapTreeNode.cpp index 480a560a2ae..e7645a16a4e 100644 --- a/src/ripple/shamap/impl/SHAMapTreeNode.cpp +++ b/src/ripple/shamap/impl/SHAMapTreeNode.cpp @@ -42,8 +42,8 @@ SHAMapTreeNode::makeTransaction( SHAMapHash const& hash, bool hashValid) { - auto item = std::make_shared( - sha512Half(HashPrefix::transactionID, data), data); + auto item = + make_shamapitem(sha512Half(HashPrefix::transactionID, data), data); if (hashValid) return std::make_shared(std::move(item), 0, hash); @@ -71,7 +71,7 @@ SHAMapTreeNode::makeTransactionWithMeta( s.chop(tag.bytes); - auto item = std::make_shared(tag, s.slice()); + auto item = make_shamapitem(tag, s.slice()); if (hashValid) return std::make_shared( @@ -103,7 +103,7 @@ SHAMapTreeNode::makeAccountState( if (tag.isZero()) Throw("Invalid AS node"); - auto item = std::make_shared(tag, s.slice()); + auto item = make_shamapitem(tag, s.slice()); if (hashValid) return std::make_shared( diff --git a/src/test/app/LedgerReplay_test.cpp b/src/test/app/LedgerReplay_test.cpp index cff94ee045b..b535739353b 100644 --- a/src/test/app/LedgerReplay_test.cpp +++ b/src/test/app/LedgerReplay_test.cpp @@ -1292,8 +1292,8 @@ struct LedgerReplayer_test : public beast::unit_test::suite std::uint8_t payload[55] = { 0x6A, 0x09, 0xE6, 0x67, 0xF3, 0xBC, 0xC9, 0x08, 0xB2}; - auto item = std::make_shared( - uint256(12345), Slice(payload, sizeof(payload))); + auto item = + make_shamapitem(uint256(12345), Slice(payload, sizeof(payload))); skipList->processData(l->seq(), item); std::vector deltaStatuses; diff --git a/src/test/consensus/ByzantineFailureSim_test.cpp b/src/test/consensus/ByzantineFailureSim_test.cpp index 73104ab5e81..a907b7c224e 100644 --- a/src/test/consensus/ByzantineFailureSim_test.cpp +++ b/src/test/consensus/ByzantineFailureSim_test.cpp @@ -78,7 +78,7 @@ class ByzantineFailureSim_test : public beast::unit_test::suite // All peers see some TX 0 for (Peer* peer : network) { - peer->submit(Tx(0)); + peer->submit(Tx{0}); // Peers 0,1,2,6 will close the next ledger differently by injecting // a non-consensus approved transaciton if (byzantineNodes.contains(peer)) diff --git a/src/test/csf/Tx.h b/src/test/csf/Tx.h index 5ccd910b80d..d271338141a 100644 --- a/src/test/csf/Tx.h +++ b/src/test/csf/Tx.h @@ -25,6 +25,7 @@ #include #include #include +#include namespace ripple { namespace test { @@ -40,6 +41,11 @@ class Tx { } + template >> + Tx(T const* t) : id_{t->id_} + { + } + ID id() const { diff --git a/src/test/csf/ledgers.h b/src/test/csf/ledgers.h index 635fbec2117..987c5e706c6 100644 --- a/src/test/csf/ledgers.h +++ b/src/test/csf/ledgers.h @@ -348,7 +348,7 @@ struct LedgerHistoryHelper assert(seen.emplace(s.back()).second); Ledger const& parent = (*this)[s.substr(0, s.size() - 1)]; - return ledgers.emplace(s, oracle.accept(parent, ++nextTx)) + return ledgers.emplace(s, oracle.accept(parent, Tx{++nextTx})) .first->second; } }; diff --git a/src/test/shamap/FetchPack_test.cpp b/src/test/shamap/FetchPack_test.cpp index ae1174b4d5e..f15f3163e55 100644 --- a/src/test/shamap/FetchPack_test.cpp +++ b/src/test/shamap/FetchPack_test.cpp @@ -85,13 +85,13 @@ class FetchPack_test : public beast::unit_test::suite beast::Journal mJournal; }; - std::shared_ptr + boost::intrusive_ptr make_random_item(beast::xor_shift_engine& r) { Serializer s; for (int d = 0; d < 3; ++d) s.add32(ripple::rand_int(r)); - return std::make_shared(s.getSHA512Half(), s.slice()); + return make_shamapitem(s.getSHA512Half(), s.slice()); } void @@ -99,9 +99,8 @@ class FetchPack_test : public beast::unit_test::suite { while (n--) { - std::shared_ptr item(make_random_item(r)); - auto const result( - t.addItem(SHAMapNodeType::tnACCOUNT_STATE, std::move(*item))); + auto const result(t.addItem( + SHAMapNodeType::tnACCOUNT_STATE, make_random_item(r))); assert(result); (void)result; } diff --git a/src/test/shamap/SHAMapSync_test.cpp b/src/test/shamap/SHAMapSync_test.cpp index 6b2648a96c9..70e578b5fb5 100644 --- a/src/test/shamap/SHAMapSync_test.cpp +++ b/src/test/shamap/SHAMapSync_test.cpp @@ -34,14 +34,14 @@ class SHAMapSync_test : public beast::unit_test::suite public: beast::xor_shift_engine eng_; - std::shared_ptr + boost::intrusive_ptr makeRandomAS() { Serializer s; for (int d = 0; d < 3; ++d) s.add32(rand_int(eng_)); - return std::make_shared(s.getSHA512Half(), s.slice()); + return make_shamapitem(s.getSHA512Half(), s.slice()); } bool @@ -55,10 +55,10 @@ class SHAMapSync_test : public beast::unit_test::suite for (int i = 0; i < count; ++i) { - std::shared_ptr item = makeRandomAS(); + auto item = makeRandomAS(); items.push_back(item->key()); - if (!map.addItem(SHAMapNodeType::tnACCOUNT_STATE, std::move(*item))) + if (!map.addItem(SHAMapNodeType::tnACCOUNT_STATE, item)) { log << "Unable to add item to map\n"; return false; @@ -97,8 +97,7 @@ class SHAMapSync_test : public beast::unit_test::suite int items = 10000; for (int i = 0; i < items; ++i) { - source.addItem( - SHAMapNodeType::tnACCOUNT_STATE, std::move(*makeRandomAS())); + source.addItem(SHAMapNodeType::tnACCOUNT_STATE, makeRandomAS()); if (i % 100 == 0) source.invariants(); } diff --git a/src/test/shamap/SHAMap_test.cpp b/src/test/shamap/SHAMap_test.cpp index 182b443ce09..83bbc13253e 100644 --- a/src/test/shamap/SHAMap_test.cpp +++ b/src/test/shamap/SHAMap_test.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include @@ -45,10 +44,7 @@ static_assert(std::is_move_assignable{}, ""); static_assert(std::is_nothrow_destructible{}, ""); static_assert(!std::is_default_constructible{}, ""); -static_assert(std::is_copy_constructible{}, ""); -static_assert(std::is_copy_assignable{}, ""); -static_assert(std::is_move_constructible{}, ""); -static_assert(std::is_move_assignable{}, ""); +static_assert(!std::is_copy_constructible{}, ""); static_assert(std::is_nothrow_destructible{}, ""); static_assert(std::is_default_constructible{}, ""); @@ -155,37 +151,43 @@ class SHAMap_test : public beast::unit_test::suite if (!backed) sMap.setUnbacked(); - SHAMapItem i1(h1, IntToVUC(1)), i2(h2, IntToVUC(2)), - i3(h3, IntToVUC(3)), i4(h4, IntToVUC(4)), i5(h5, IntToVUC(5)); + auto i1 = make_shamapitem(h1, IntToVUC(1)); + auto i2 = make_shamapitem(h2, IntToVUC(2)); + auto i3 = make_shamapitem(h3, IntToVUC(3)); + auto i4 = make_shamapitem(h4, IntToVUC(4)); + auto i5 = make_shamapitem(h5, IntToVUC(5)); + unexpected( - !sMap.addItem(SHAMapNodeType::tnTRANSACTION_NM, SHAMapItem{i2}), + !sMap.addItem( + SHAMapNodeType::tnTRANSACTION_NM, make_shamapitem(*i2)), "no add"); sMap.invariants(); unexpected( - !sMap.addItem(SHAMapNodeType::tnTRANSACTION_NM, SHAMapItem{i1}), + !sMap.addItem( + SHAMapNodeType::tnTRANSACTION_NM, make_shamapitem(*i1)), "no add"); sMap.invariants(); auto i = sMap.begin(); auto e = sMap.end(); - unexpected(i == e || (*i != i1), "bad traverse"); + unexpected(i == e || (*i != *i1), "bad traverse"); ++i; - unexpected(i == e || (*i != i2), "bad traverse"); + unexpected(i == e || (*i != *i2), "bad traverse"); ++i; unexpected(i != e, "bad traverse"); - sMap.addItem(SHAMapNodeType::tnTRANSACTION_NM, SHAMapItem{i4}); + sMap.addItem(SHAMapNodeType::tnTRANSACTION_NM, make_shamapitem(*i4)); sMap.invariants(); - sMap.delItem(i2.key()); + sMap.delItem(i2->key()); sMap.invariants(); - sMap.addItem(SHAMapNodeType::tnTRANSACTION_NM, SHAMapItem{i3}); + sMap.addItem(SHAMapNodeType::tnTRANSACTION_NM, make_shamapitem(*i3)); sMap.invariants(); i = sMap.begin(); e = sMap.end(); - unexpected(i == e || (*i != i1), "bad traverse"); + unexpected(i == e || (*i != *i1), "bad traverse"); ++i; - unexpected(i == e || (*i != i3), "bad traverse"); + unexpected(i == e || (*i != *i3), "bad traverse"); ++i; - unexpected(i == e || (*i != i4), "bad traverse"); + unexpected(i == e || (*i != *i4), "bad traverse"); ++i; unexpected(i != e, "bad traverse"); @@ -265,9 +267,9 @@ class SHAMap_test : public beast::unit_test::suite BEAST_EXPECT(map.getHash() == beast::zero); for (int k = 0; k < keys.size(); ++k) { - SHAMapItem item(keys[k], IntToVUC(k)); BEAST_EXPECT(map.addItem( - SHAMapNodeType::tnTRANSACTION_NM, std::move(item))); + SHAMapNodeType::tnTRANSACTION_NM, + make_shamapitem(keys[k], IntToVUC(k)))); BEAST_EXPECT(map.getHash().as_uint256() == hashes[k]); map.invariants(); } @@ -312,7 +314,7 @@ class SHAMap_test : public beast::unit_test::suite { map.addItem( SHAMapNodeType::tnTRANSACTION_NM, - SHAMapItem{k, IntToVUC(0)}); + make_shamapitem(k, IntToVUC(0))); map.invariants(); } @@ -346,7 +348,7 @@ class SHAMapPathProof_test : public beast::unit_test::suite uint256 k(c); map.addItem( SHAMapNodeType::tnACCOUNT_STATE, - SHAMapItem{k, Slice{k.data(), k.size()}}); + make_shamapitem(k, Slice{k.data(), k.size()})); map.invariants(); auto root = map.getHash().as_uint256(); From 066f91ca07be9a31fad601844af057a0bd0b2508 Mon Sep 17 00:00:00 2001 From: Nik Bougalis Date: Tue, 11 Apr 2023 15:50:25 -0700 Subject: [PATCH 149/508] Avoid using std::shared_ptr when not necessary: (#4218) The `Ledger` class contains two `SHAMap` instances: the state and transaction maps. Previously, the maps were dynamically allocated using `std::make_shared` despite the fact that they did not require lifetime management separate from the lifetime of the `Ledger` instance to which they belong. The two `SHAMap` instances are now regular member variables. Some smart pointers and dynamic memory allocation was avoided by using stack-based alternatives. Commit 3 of 3 in #4218. --- src/ripple/app/ledger/Ledger.cpp | 132 +++++++++----------- src/ripple/app/ledger/Ledger.h | 27 ++-- src/ripple/app/ledger/LedgerHistory.cpp | 39 +++--- src/ripple/app/ledger/LedgerHistory.h | 2 +- src/ripple/app/ledger/impl/LedgerToJson.cpp | 12 +- src/ripple/app/rdb/impl/UnitaryShard.cpp | 16 +-- src/ripple/nodestore/impl/DecodedBlob.cpp | 1 - src/ripple/protocol/impl/TxMeta.cpp | 6 +- src/ripple/shamap/SHAMap.h | 4 + src/ripple/shamap/impl/SHAMap.cpp | 49 ++++---- 10 files changed, 142 insertions(+), 146 deletions(-) diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index fbe95e9c9d3..7cd6f89cad3 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -182,8 +182,8 @@ Ledger::Ledger( std::vector const& amendments, Family& family) : mImmutable(false) - , txMap_(std::make_shared(SHAMapType::TRANSACTION, family)) - , stateMap_(std::make_shared(SHAMapType::STATE, family)) + , txMap_(SHAMapType::TRANSACTION, family) + , stateMap_(SHAMapType::STATE, family) , rules_{config.features} , j_(beast::Journal(beast::Journal::getNullSink())) { @@ -235,7 +235,7 @@ Ledger::Ledger( rawInsert(sle); } - stateMap_->flushDirty(hotACCOUNT_NODE); + stateMap_.flushDirty(hotACCOUNT_NODE); setImmutable(); } @@ -247,12 +247,8 @@ Ledger::Ledger( Family& family, beast::Journal j) : mImmutable(true) - , txMap_(std::make_shared( - SHAMapType::TRANSACTION, - info.txHash, - family)) - , stateMap_( - std::make_shared(SHAMapType::STATE, info.accountHash, family)) + , txMap_(SHAMapType::TRANSACTION, info.txHash, family) + , stateMap_(SHAMapType::STATE, info.accountHash, family) , rules_(config.features) , info_(info) , j_(j) @@ -260,7 +256,7 @@ Ledger::Ledger( loaded = true; if (info_.txHash.isNonZero() && - !txMap_->fetchRoot(SHAMapHash{info_.txHash}, nullptr)) + !txMap_.fetchRoot(SHAMapHash{info_.txHash}, nullptr)) { if (config.reporting()) { @@ -272,7 +268,7 @@ Ledger::Ledger( } if (info_.accountHash.isNonZero() && - !stateMap_->fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) + !stateMap_.fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) { if (config.reporting()) { @@ -283,8 +279,8 @@ Ledger::Ledger( JLOG(j.warn()) << "Don't have state data root for ledger" << info_.seq; } - txMap_->setImmutable(); - stateMap_->setImmutable(); + txMap_.setImmutable(); + stateMap_.setImmutable(); defaultFees(config); if (!setup()) @@ -301,10 +297,8 @@ Ledger::Ledger( // Create a new ledger that follows this one Ledger::Ledger(Ledger const& prevLedger, NetClock::time_point closeTime) : mImmutable(false) - , txMap_(std::make_shared( - SHAMapType::TRANSACTION, - prevLedger.stateMap_->family())) - , stateMap_(prevLedger.stateMap_->snapShot(true)) + , txMap_(SHAMapType::TRANSACTION, prevLedger.txMap_.family()) + , stateMap_(prevLedger.stateMap_, true) , fees_(prevLedger.fees_) , rules_(prevLedger.rules_) , j_(beast::Journal(beast::Journal::getNullSink())) @@ -333,12 +327,8 @@ Ledger::Ledger(Ledger const& prevLedger, NetClock::time_point closeTime) Ledger::Ledger(LedgerInfo const& info, Config const& config, Family& family) : mImmutable(true) - , txMap_(std::make_shared( - SHAMapType::TRANSACTION, - info.txHash, - family)) - , stateMap_( - std::make_shared(SHAMapType::STATE, info.accountHash, family)) + , txMap_(SHAMapType::TRANSACTION, info.txHash, family) + , stateMap_(SHAMapType::STATE, info.accountHash, family) , rules_{config.features} , info_(info) , j_(beast::Journal(beast::Journal::getNullSink())) @@ -352,8 +342,8 @@ Ledger::Ledger( Config const& config, Family& family) : mImmutable(false) - , txMap_(std::make_shared(SHAMapType::TRANSACTION, family)) - , stateMap_(std::make_shared(SHAMapType::STATE, family)) + , txMap_(SHAMapType::TRANSACTION, family) + , stateMap_(SHAMapType::STATE, family) , rules_{config.features} , j_(beast::Journal(beast::Journal::getNullSink())) { @@ -371,16 +361,16 @@ Ledger::setImmutable(bool rehash) // place the hash transitions to valid if (!mImmutable && rehash) { - info_.txHash = txMap_->getHash().as_uint256(); - info_.accountHash = stateMap_->getHash().as_uint256(); + info_.txHash = txMap_.getHash().as_uint256(); + info_.accountHash = stateMap_.getHash().as_uint256(); } if (rehash) info_.hash = calculateLedgerHash(info_); mImmutable = true; - txMap_->setImmutable(); - stateMap_->setImmutable(); + txMap_.setImmutable(); + stateMap_.setImmutable(); setup(); } @@ -403,7 +393,7 @@ bool Ledger::addSLE(SLE const& sle) { auto const s = sle.getSerializer(); - return stateMap_->addItem( + return stateMap_.addItem( SHAMapNodeType::tnACCOUNT_STATE, make_shamapitem(sle.key(), s.slice())); } @@ -439,20 +429,20 @@ bool Ledger::exists(Keylet const& k) const { // VFALCO NOTE Perhaps check the type for debug builds? - return stateMap_->hasItem(k.key); + return stateMap_.hasItem(k.key); } bool Ledger::exists(uint256 const& key) const { - return stateMap_->hasItem(key); + return stateMap_.hasItem(key); } std::optional Ledger::succ(uint256 const& key, std::optional const& last) const { - auto item = stateMap_->upper_bound(key); - if (item == stateMap_->end()) + auto item = stateMap_.upper_bound(key); + if (item == stateMap_.end()) return std::nullopt; if (last && item->key() >= last) return std::nullopt; @@ -467,7 +457,7 @@ Ledger::read(Keylet const& k) const assert(false); return nullptr; } - auto const& item = stateMap_->peekItem(k.key); + auto const& item = stateMap_.peekItem(k.key); if (!item) return nullptr; auto sle = std::make_shared(SerialIter{item->slice()}, item->key()); @@ -481,45 +471,44 @@ Ledger::read(Keylet const& k) const auto Ledger::slesBegin() const -> std::unique_ptr { - return std::make_unique(stateMap_->begin()); + return std::make_unique(stateMap_.begin()); } auto Ledger::slesEnd() const -> std::unique_ptr { - return std::make_unique(stateMap_->end()); + return std::make_unique(stateMap_.end()); } auto Ledger::slesUpperBound(uint256 const& key) const -> std::unique_ptr { - return std::make_unique(stateMap_->upper_bound(key)); + return std::make_unique(stateMap_.upper_bound(key)); } auto Ledger::txsBegin() const -> std::unique_ptr { - return std::make_unique(!open(), txMap_->begin()); + return std::make_unique(!open(), txMap_.begin()); } auto Ledger::txsEnd() const -> std::unique_ptr { - return std::make_unique(!open(), txMap_->end()); + return std::make_unique(!open(), txMap_.end()); } bool Ledger::txExists(uint256 const& key) const { - return txMap_->hasItem(key); + return txMap_.hasItem(key); } auto Ledger::txRead(key_type const& key) const -> tx_type { - assert(txMap_); - auto const& item = txMap_->peekItem(key); + auto const& item = txMap_.peekItem(key); if (!item) return {}; if (!open()) @@ -536,7 +525,7 @@ Ledger::digest(key_type const& key) const -> std::optional SHAMapHash digest; // VFALCO Unfortunately this loads the item // from the NodeStore needlessly. - if (!stateMap_->peekItem(key, digest)) + if (!stateMap_.peekItem(key, digest)) return std::nullopt; return digest.as_uint256(); } @@ -546,14 +535,14 @@ Ledger::digest(key_type const& key) const -> std::optional void Ledger::rawErase(std::shared_ptr const& sle) { - if (!stateMap_->delItem(sle->key())) + if (!stateMap_.delItem(sle->key())) LogicError("Ledger::rawErase: key not found"); } void Ledger::rawErase(uint256 const& key) { - if (!stateMap_->delItem(key)) + if (!stateMap_.delItem(key)) LogicError("Ledger::rawErase: key not found"); } @@ -562,7 +551,7 @@ Ledger::rawInsert(std::shared_ptr const& sle) { Serializer ss; sle->add(ss); - if (!stateMap_->addGiveItem( + if (!stateMap_.addGiveItem( SHAMapNodeType::tnACCOUNT_STATE, make_shamapitem(sle->key(), ss.slice()))) LogicError("Ledger::rawInsert: key already exists"); @@ -573,7 +562,7 @@ Ledger::rawReplace(std::shared_ptr const& sle) { Serializer ss; sle->add(ss); - if (!stateMap_->updateGiveItem( + if (!stateMap_.updateGiveItem( SHAMapNodeType::tnACCOUNT_STATE, make_shamapitem(sle->key(), ss.slice()))) LogicError("Ledger::rawReplace: key not found"); @@ -591,7 +580,7 @@ Ledger::rawTxInsert( Serializer s(txn->getDataLength() + metaData->getDataLength() + 16); s.addVL(txn->peekData()); s.addVL(metaData->peekData()); - if (!txMap().addGiveItem( + if (!txMap_.addGiveItem( SHAMapNodeType::tnTRANSACTION_MD, make_shamapitem(key, s.slice()))) LogicError("duplicate_tx: " + to_string(key)); } @@ -610,7 +599,7 @@ Ledger::rawTxInsertWithHash( s.addVL(metaData->peekData()); auto item = make_shamapitem(key, s.slice()); auto hash = sha512Half(HashPrefix::txNode, item->slice(), item->key()); - if (!txMap().addGiveItem(SHAMapNodeType::tnTRANSACTION_MD, std::move(item))) + if (!txMap_.addGiveItem(SHAMapNodeType::tnTRANSACTION_MD, std::move(item))) LogicError("duplicate_tx: " + to_string(key)); return hash; @@ -710,7 +699,7 @@ Ledger::defaultFees(Config const& config) std::shared_ptr Ledger::peek(Keylet const& k) const { - auto const& value = stateMap_->peekItem(k.key); + auto const& value = stateMap_.peekItem(k.key); if (!value) return nullptr; auto sle = std::make_shared(SerialIter{value->slice()}, value->key()); @@ -832,8 +821,8 @@ Ledger::walkLedger(beast::Journal j, bool parallel) const std::vector missingNodes1; std::vector missingNodes2; - if (stateMap_->getHash().isZero() && !info_.accountHash.isZero() && - !stateMap_->fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) + if (stateMap_.getHash().isZero() && !info_.accountHash.isZero() && + !stateMap_.fetchRoot(SHAMapHash{info_.accountHash}, nullptr)) { missingNodes1.emplace_back( SHAMapType::STATE, SHAMapHash{info_.accountHash}); @@ -841,9 +830,9 @@ Ledger::walkLedger(beast::Journal j, bool parallel) const else { if (parallel) - return stateMap_->walkMapParallel(missingNodes1, 32); + return stateMap_.walkMapParallel(missingNodes1, 32); else - stateMap_->walkMap(missingNodes1, 32); + stateMap_.walkMap(missingNodes1, 32); } if (!missingNodes1.empty()) @@ -855,15 +844,15 @@ Ledger::walkLedger(beast::Journal j, bool parallel) const } } - if (txMap_->getHash().isZero() && info_.txHash.isNonZero() && - !txMap_->fetchRoot(SHAMapHash{info_.txHash}, nullptr)) + if (txMap_.getHash().isZero() && info_.txHash.isNonZero() && + !txMap_.fetchRoot(SHAMapHash{info_.txHash}, nullptr)) { missingNodes2.emplace_back( SHAMapType::TRANSACTION, SHAMapHash{info_.txHash}); } else { - txMap_->walkMap(missingNodes2, 32); + txMap_.walkMap(missingNodes2, 32); } if (!missingNodes2.empty()) @@ -880,9 +869,9 @@ Ledger::walkLedger(beast::Journal j, bool parallel) const bool Ledger::assertSensible(beast::Journal ledgerJ) const { - if (info_.hash.isNonZero() && info_.accountHash.isNonZero() && stateMap_ && - txMap_ && (info_.accountHash == stateMap_->getHash().as_uint256()) && - (info_.txHash == txMap_->getHash().as_uint256())) + if (info_.hash.isNonZero() && info_.accountHash.isNonZero() && + (info_.accountHash == stateMap_.getHash().as_uint256()) && + (info_.txHash == txMap_.getHash().as_uint256())) { return true; } @@ -1044,15 +1033,14 @@ pendSaveValidated( return true; } - JobType const jobType{isCurrent ? jtPUBLEDGER : jtPUBOLDLEDGER}; - char const* const jobName{ - isCurrent ? "Ledger::pendSave" : "Ledger::pendOldSave"}; - // See if we can use the JobQueue. if (!isSynchronous && - app.getJobQueue().addJob(jobType, jobName, [&app, ledger, isCurrent]() { - saveValidatedLedger(app, ledger, isCurrent); - })) + app.getJobQueue().addJob( + isCurrent ? jtPUBLEDGER : jtPUBOLDLEDGER, + std::to_string(ledger->seq()), + [&app, ledger, isCurrent]() { + saveValidatedLedger(app, ledger, isCurrent); + })) { return true; } @@ -1064,15 +1052,15 @@ pendSaveValidated( void Ledger::unshare() const { - stateMap_->unshare(); - txMap_->unshare(); + stateMap_.unshare(); + txMap_.unshare(); } void Ledger::invariants() const { - stateMap_->invariants(); - txMap_->invariants(); + stateMap_.invariants(); + txMap_.invariants(); } //------------------------------------------------------------------------------ diff --git a/src/ripple/app/ledger/Ledger.h b/src/ripple/app/ledger/Ledger.h index 84e65ecffc7..051b322e27a 100644 --- a/src/ripple/app/ledger/Ledger.h +++ b/src/ripple/app/ledger/Ledger.h @@ -83,6 +83,10 @@ class Ledger final : public std::enable_shared_from_this, Ledger& operator=(Ledger const&) = delete; + Ledger(Ledger&&) = delete; + Ledger& + operator=(Ledger&&) = delete; + /** Create the Genesis ledger. The Genesis ledger contains a single account whose @@ -290,10 +294,10 @@ class Ledger final : public std::enable_shared_from_this, void setFull() const { - txMap_->setFull(); - stateMap_->setFull(); - txMap_->setLedgerSeq(info_.seq); - stateMap_->setLedgerSeq(info_.seq); + txMap_.setFull(); + txMap_.setLedgerSeq(info_.seq); + stateMap_.setFull(); + stateMap_.setLedgerSeq(info_.seq); } void @@ -305,25 +309,25 @@ class Ledger final : public std::enable_shared_from_this, SHAMap const& stateMap() const { - return *stateMap_; + return stateMap_; } SHAMap& stateMap() { - return *stateMap_; + return stateMap_; } SHAMap const& txMap() const { - return *txMap_; + return txMap_; } SHAMap& txMap() { - return *txMap_; + return txMap_; } // returns false on error @@ -401,8 +405,11 @@ class Ledger final : public std::enable_shared_from_this, bool mImmutable; - std::shared_ptr txMap_; - std::shared_ptr stateMap_; + // A SHAMap containing the transactions associated with this ledger. + SHAMap mutable txMap_; + + // A SHAMap containing the state objects for this ledger. + SHAMap mutable stateMap_; // Protects fee variables std::mutex mutable mutex_; diff --git a/src/ripple/app/ledger/LedgerHistory.cpp b/src/ripple/app/ledger/LedgerHistory.cpp index 53c723e1469..ed2ccd07434 100644 --- a/src/ripple/app/ledger/LedgerHistory.cpp +++ b/src/ripple/app/ledger/LedgerHistory.cpp @@ -51,7 +51,9 @@ LedgerHistory::LedgerHistory( } bool -LedgerHistory::insert(std::shared_ptr ledger, bool validated) +LedgerHistory::insert( + std::shared_ptr const& ledger, + bool validated) { if (!ledger->isImmutable()) LogicError("mutable Ledger in insert"); @@ -72,12 +74,9 @@ LedgerHash LedgerHistory::getLedgerHash(LedgerIndex index) { std::unique_lock sl(m_ledgers_by_hash.peekMutex()); - auto it = mLedgersByIndex.find(index); - - if (it != mLedgersByIndex.end()) + if (auto it = mLedgersByIndex.find(index); it != mLedgersByIndex.end()) return it->second; - - return uint256(); + return {}; } std::shared_ptr @@ -167,19 +166,19 @@ log_metadata_difference( uint256 const& tx, beast::Journal j) { - auto getMeta = [](ReadView const& ledger, - uint256 const& txID) -> std::shared_ptr { - auto meta = ledger.txRead(txID).second; - if (!meta) - return {}; - return std::make_shared(txID, ledger.seq(), *meta); + auto getMeta = [](ReadView const& ledger, uint256 const& txID) { + std::optional ret; + if (auto meta = ledger.txRead(txID).second) + ret.emplace(txID, ledger.seq(), *meta); + return ret; }; auto validMetaData = getMeta(validLedger, tx); auto builtMetaData = getMeta(builtLedger, tx); - assert(validMetaData != nullptr || builtMetaData != nullptr); - if (validMetaData != nullptr && builtMetaData != nullptr) + assert(validMetaData || builtMetaData); + + if (validMetaData && builtMetaData) { auto const& validNodes = validMetaData->getNodes(); auto const& builtNodes = builtMetaData->getNodes(); @@ -280,17 +279,21 @@ log_metadata_difference( << validNodes.getJson(JsonOptions::none); } } + + return; } - else if (validMetaData != nullptr) + + if (validMetaData) { JLOG(j.error()) << "MISMATCH on TX " << tx - << ": Metadata Difference (built has none)\n" + << ": Metadata Difference. Valid=\n" << validMetaData->getJson(JsonOptions::none); } - else // builtMetaData != nullptr + + if (builtMetaData) { JLOG(j.error()) << "MISMATCH on TX " << tx - << ": Metadata Difference (valid has none)\n" + << ": Metadata Difference. Built=\n" << builtMetaData->getJson(JsonOptions::none); } } diff --git a/src/ripple/app/ledger/LedgerHistory.h b/src/ripple/app/ledger/LedgerHistory.h index be5c559beed..5733ca76375 100644 --- a/src/ripple/app/ledger/LedgerHistory.h +++ b/src/ripple/app/ledger/LedgerHistory.h @@ -44,7 +44,7 @@ class LedgerHistory @return `true` if the ledger was already tracked */ bool - insert(std::shared_ptr ledger, bool validated); + insert(std::shared_ptr const& ledger, bool validated); /** Get the ledgers_by_hash cache hit rate @return the hit rate diff --git a/src/ripple/app/ledger/impl/LedgerToJson.cpp b/src/ripple/app/ledger/impl/LedgerToJson.cpp index e32303c492d..558757d511b 100644 --- a/src/ripple/app/ledger/impl/LedgerToJson.cpp +++ b/src/ripple/app/ledger/impl/LedgerToJson.cpp @@ -131,14 +131,14 @@ fillJsonTx( if (stMeta) { txJson[jss::metaData] = stMeta->getJson(JsonOptions::none); + + // If applicable, insert delivered amount if (txnType == ttPAYMENT || txnType == ttCHECK_CASH) - { - // Insert delivered amount - auto txMeta = std::make_shared( - txn->getTransactionID(), fill.ledger.seq(), *stMeta); RPC::insertDeliveredAmount( - txJson[jss::metaData], fill.ledger, txn, *txMeta); - } + txJson[jss::metaData], + fill.ledger, + txn, + {txn->getTransactionID(), fill.ledger.seq(), *stMeta}); } } diff --git a/src/ripple/app/rdb/impl/UnitaryShard.cpp b/src/ripple/app/rdb/impl/UnitaryShard.cpp index 72441d0b75a..37cbfd55ac3 100644 --- a/src/ripple/app/rdb/impl/UnitaryShard.cpp +++ b/src/ripple/app/rdb/impl/UnitaryShard.cpp @@ -103,22 +103,24 @@ updateLedgerDBs( for (auto const& item : ledger->txs) { - if (stop) + if (stop.load(std::memory_order_relaxed)) return false; - auto const txID{item.first->getTransactionID()}; - auto const sTxID{to_string(txID)}; - auto const txMeta{std::make_shared( - txID, ledger->seq(), *item.second)}; + TxMeta const txMeta{ + item.first->getTransactionID(), + ledger->seq(), + *item.second}; + + auto const sTxID = to_string(txMeta.getTxID()); session << "DELETE FROM AccountTransactions " "WHERE TransID = :txID;", soci::use(sTxID); - auto const& accounts = txMeta->getAffectedAccounts(); + auto const& accounts = txMeta.getAffectedAccounts(); if (!accounts.empty()) { - auto const sTxnSeq{std::to_string(txMeta->getIndex())}; + auto const sTxnSeq{std::to_string(txMeta.getIndex())}; auto const s{boost::str( boost::format("('%s','%s',%s,%s)") % sTxID % "%s" % sSeq % sTxnSeq)}; diff --git a/src/ripple/nodestore/impl/DecodedBlob.cpp b/src/ripple/nodestore/impl/DecodedBlob.cpp index debb907b73b..13175d36295 100644 --- a/src/ripple/nodestore/impl/DecodedBlob.cpp +++ b/src/ripple/nodestore/impl/DecodedBlob.cpp @@ -38,7 +38,6 @@ DecodedBlob::DecodedBlob(void const* key, void const* value, int valueBytes) m_success = false; m_key = key; - // VFALCO NOTE Ledger indexes should hav e started at 1 m_objectType = hotUNKNOWN; m_objectData = nullptr; m_dataBytes = std::max(0, valueBytes - 9); diff --git a/src/ripple/protocol/impl/TxMeta.cpp b/src/ripple/protocol/impl/TxMeta.cpp index 9e199176515..20fa61de2a8 100644 --- a/src/ripple/protocol/impl/TxMeta.cpp +++ b/src/ripple/protocol/impl/TxMeta.cpp @@ -126,8 +126,7 @@ TxMeta::getAffectedAccounts() const if (index != -1) { - const STObject* inner = - dynamic_cast(&it.peekAtIndex(index)); + auto inner = dynamic_cast(&it.peekAtIndex(index)); assert(inner); if (inner) { @@ -145,8 +144,7 @@ TxMeta::getAffectedAccounts() const (field.getFName() == sfTakerPays) || (field.getFName() == sfTakerGets)) { - const STAmount* lim = - dynamic_cast(&field); + auto lim = dynamic_cast(&field); assert(lim); if (lim != nullptr) diff --git a/src/ripple/shamap/SHAMap.h b/src/ripple/shamap/SHAMap.h index 2170cf020e9..2d1aa192fc6 100644 --- a/src/ripple/shamap/SHAMap.h +++ b/src/ripple/shamap/SHAMap.h @@ -124,10 +124,14 @@ class SHAMap boost::intrusive_ptr>; using Delta = std::map; + SHAMap() = delete; SHAMap(SHAMap const&) = delete; SHAMap& operator=(SHAMap const&) = delete; + // Take a snapshot of the given map: + SHAMap(SHAMap const& other, bool isMutable); + // build new map SHAMap(SHAMapType t, Family& f); diff --git a/src/ripple/shamap/impl/SHAMap.cpp b/src/ripple/shamap/impl/SHAMap.cpp index 51a11680c93..d6348c86c48 100644 --- a/src/ripple/shamap/impl/SHAMap.cpp +++ b/src/ripple/shamap/impl/SHAMap.cpp @@ -66,28 +66,28 @@ SHAMap::SHAMap(SHAMapType t, uint256 const& hash, Family& f) root_ = std::make_shared(cowid_); } -std::shared_ptr -SHAMap::snapShot(bool isMutable) const +SHAMap::SHAMap(SHAMap const& other, bool isMutable) + : f_(other.f_) + , journal_(other.f_.journal()) + , cowid_(other.cowid_ + 1) + , ledgerSeq_(other.ledgerSeq_) + , root_(other.root_) + , state_(isMutable ? SHAMapState::Modifying : SHAMapState::Immutable) + , type_(other.type_) + , backed_(other.backed_) { - auto ret = std::make_shared(type_, f_); - SHAMap& newMap = *ret; - - if (!isMutable) - newMap.state_ = SHAMapState::Immutable; - - newMap.cowid_ = cowid_ + 1; - newMap.ledgerSeq_ = ledgerSeq_; - newMap.root_ = root_; - newMap.backed_ = backed_; - + // If either map may change, they cannot share nodes if ((state_ != SHAMapState::Immutable) || - (newMap.state_ != SHAMapState::Immutable)) + (other.state_ != SHAMapState::Immutable)) { - // If either map may change, they cannot share nodes - newMap.unshare(); + unshare(); } +} - return ret; +std::shared_ptr +SHAMap::snapShot(bool isMutable) const +{ + return std::make_shared(*this, isMutable); } void @@ -174,7 +174,6 @@ SHAMap::finishFetch( { assert(backed_); - std::shared_ptr node; try { if (!object) @@ -187,27 +186,23 @@ SHAMap::finishFetch( return {}; } - node = + auto node = SHAMapTreeNode::makeFromPrefix(makeSlice(object->getData()), hash); if (node) canonicalize(hash, node); return node; } - - catch (SHAMapMissingNode const& e) - { - JLOG(journal_.warn()) << "Missing node: " << hash << " : " << e.what(); - } catch (std::runtime_error const& e) { - JLOG(journal_.warn()) << __func__ << " : " << e.what(); + JLOG(journal_.warn()) << "finishFetch exception: " << e.what(); } catch (...) { - JLOG(journal_.warn()) << "Invalid DB node " << hash; + JLOG(journal_.warn()) + << "finishFetch exception: unknonw exception: " << hash; } - return std::shared_ptr(); + return {}; } // See if a sync filter has a node From 4f95b9d7a69e95e40126822514e5369fcd57fb1d Mon Sep 17 00:00:00 2001 From: RichardAH Date: Wed, 12 Apr 2023 02:11:17 +0200 Subject: [PATCH 150/508] Prevent replay attacks with NetworkID field: (#4370) Add a `NetworkID` field to help prevent replay attacks on and from side-chains. The new field must be used when the server is using a network id > 1024. To preserve legacy behavior, all chains with a network ID less than 1025 retain the existing behavior. This includes Mainnet, Testnet, Devnet, and hooks-testnet. If `sfNetworkID` is present in any transaction submitted to any of the nodes on one of these chains, then `telNETWORK_ID_MAKES_TX_NON_CANONICAL` is returned. Since chains with a network ID less than 1025, including Mainnet, retain the existing behavior, there is no need for an amendment. The `NetworkID` helps to prevent replay attacks because users specify a `NetworkID` field in every transaction for that chain. This change introduces a new UINT32 field, `sfNetworkID` ("NetworkID"). There are also three new local error codes for transaction results: - `telNETWORK_ID_MAKES_TX_NON_CANONICAL` - `telREQUIRES_NETWORK_ID` - `telWRONG_NETWORK` To learn about the other transaction result codes, see: https://xrpl.org/transaction-results.html Local error codes were chosen because a transaction is not necessarily malformed if it is submitted to a node running on the incorrect chain. This is a local error specific to that node and could be corrected by switching to a different node or by changing the `network_id` on that node. See: https://xrpl.org/connect-your-rippled-to-the-xrp-test-net.html In addition to using `NetworkID`, it is still generally recommended to use different accounts and keys on side-chains. However, people will undoubtedly use the same keys on multiple chains; for example, this is common practice on other blockchain networks. There are also some legitimate use cases for this. A `app.NetworkID` test suite has been added, and `core.Config` was updated to include some network_id tests. --- Builds/CMake/RippledCore.cmake | 1 + src/ripple/app/tx/impl/Transactor.cpp | 21 ++++ src/ripple/core/Config.h | 1 + src/ripple/core/ConfigSections.h | 1 + src/ripple/core/impl/Config.cpp | 12 ++ src/ripple/protocol/SField.h | 1 + src/ripple/protocol/TER.h | 3 + src/ripple/protocol/impl/SField.cpp | 1 + src/ripple/protocol/impl/TER.cpp | 3 + src/ripple/protocol/impl/TxFormats.cpp | 1 + src/ripple/protocol/jss.h | 1 + src/test/app/NetworkID_test.cpp | 154 +++++++++++++++++++++++++ src/test/core/Config_test.cpp | 66 +++++++++++ src/test/jtx/impl/Env.cpp | 3 + 14 files changed, 269 insertions(+) create mode 100644 src/test/app/NetworkID_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index c34b40392fa..b9e4653e653 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -705,6 +705,7 @@ if (tests) src/test/app/LoadFeeTrack_test.cpp src/test/app/Manifest_test.cpp src/test/app/MultiSign_test.cpp + src/test/app/NetworkID_test.cpp src/test/app/NFToken_test.cpp src/test/app/NFTokenBurn_test.cpp src/test/app/NFTokenDir_test.cpp diff --git a/src/ripple/app/tx/impl/Transactor.cpp b/src/ripple/app/tx/impl/Transactor.cpp index 1eecccedb25..66aa10227d4 100644 --- a/src/ripple/app/tx/impl/Transactor.cpp +++ b/src/ripple/app/tx/impl/Transactor.cpp @@ -40,6 +40,27 @@ namespace ripple { NotTEC preflight0(PreflightContext const& ctx) { + uint32_t const nodeNID = ctx.app.config().NETWORK_ID; + std::optional const txNID = ctx.tx[~sfNetworkID]; + + if (nodeNID <= 1024) + { + // legacy networks have IDs 1024 and below. These networks cannot + // specify NetworkID in txn + if (txNID) + return telNETWORK_ID_MAKES_TX_NON_CANONICAL; + } + else + { + // new networks both require the field to be present and require it to + // match + if (!txNID) + return telREQUIRES_NETWORK_ID; + + if (*txNID != nodeNID) + return telWRONG_NETWORK; + } + auto const txID = ctx.tx.getTransactionID(); if (txID == beast::zero) diff --git a/src/ripple/core/Config.h b/src/ripple/core/Config.h index e805faad989..e86157762b3 100644 --- a/src/ripple/core/Config.h +++ b/src/ripple/core/Config.h @@ -158,6 +158,7 @@ class Config : public BasicConfig std::string START_LEDGER; // Network parameters + uint32_t NETWORK_ID = 0; // DEPRECATED - Fee units for a reference transction. // Only provided for backwards compatibility in a couple of places diff --git a/src/ripple/core/ConfigSections.h b/src/ripple/core/ConfigSections.h index 6fd59680d8b..03c702f9a52 100644 --- a/src/ripple/core/ConfigSections.h +++ b/src/ripple/core/ConfigSections.h @@ -99,6 +99,7 @@ struct ConfigSection #define SECTION_LEDGER_REPLAY "ledger_replay" #define SECTION_BETA_RPC_API "beta_rpc_api" #define SECTION_SWEEP_INTERVAL "sweep_interval" +#define SECTION_NETWORK_ID "network_id" } // namespace ripple diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index 4e25552cb8b..ebeec9af36f 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -502,6 +502,18 @@ Config::loadFromString(std::string const& fileContents) std::string strTemp; + if (getSingleSection(secConfig, SECTION_NETWORK_ID, strTemp, j_)) + { + if (strTemp == "main") + NETWORK_ID = 0; + else if (strTemp == "testnet") + NETWORK_ID = 1; + else if (strTemp == "devnet") + NETWORK_ID = 2; + else + NETWORK_ID = beast::lexicalCastThrow(strTemp); + } + if (getSingleSection(secConfig, SECTION_PEER_PRIVATE, strTemp, j_)) PEER_PRIVATE = beast::lexicalCastThrow(strTemp); diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index 5821a43d1c0..5c899d3fad3 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -354,6 +354,7 @@ extern SF_UINT16 const sfHookExecutionIndex; extern SF_UINT16 const sfHookApiVersion; // 32-bit integers (common) +extern SF_UINT32 const sfNetworkID; extern SF_UINT32 const sfFlags; extern SF_UINT32 const sfSourceTag; extern SF_UINT32 const sfSequence; diff --git a/src/ripple/protocol/TER.h b/src/ripple/protocol/TER.h index 38342f0c139..1ece0f2abcd 100644 --- a/src/ripple/protocol/TER.h +++ b/src/ripple/protocol/TER.h @@ -61,6 +61,9 @@ enum TELcodes : TERUnderlyingType { telCAN_NOT_QUEUE_BLOCKED, telCAN_NOT_QUEUE_FEE, telCAN_NOT_QUEUE_FULL, + telWRONG_NETWORK, + telREQUIRES_NETWORK_ID, + telNETWORK_ID_MAKES_TX_NON_CANONICAL }; //------------------------------------------------------------------------------ diff --git a/src/ripple/protocol/impl/SField.cpp b/src/ripple/protocol/impl/SField.cpp index 14c2bd5c3de..d02b7ca4b7b 100644 --- a/src/ripple/protocol/impl/SField.cpp +++ b/src/ripple/protocol/impl/SField.cpp @@ -104,6 +104,7 @@ CONSTRUCT_TYPED_SFIELD(sfHookExecutionIndex, "HookExecutionIndex", UINT16, CONSTRUCT_TYPED_SFIELD(sfHookApiVersion, "HookApiVersion", UINT16, 20); // 32-bit integers (common) +CONSTRUCT_TYPED_SFIELD(sfNetworkID, "NetworkID", UINT32, 1); CONSTRUCT_TYPED_SFIELD(sfFlags, "Flags", UINT32, 2); CONSTRUCT_TYPED_SFIELD(sfSourceTag, "SourceTag", UINT32, 3); CONSTRUCT_TYPED_SFIELD(sfSequence, "Sequence", UINT32, 4); diff --git a/src/ripple/protocol/impl/TER.cpp b/src/ripple/protocol/impl/TER.cpp index a845bdaeebc..2dfb45fd728 100644 --- a/src/ripple/protocol/impl/TER.cpp +++ b/src/ripple/protocol/impl/TER.cpp @@ -124,6 +124,9 @@ transResults() MAKE_ERROR(telCAN_NOT_QUEUE_BLOCKED, "Can not queue at this time: blocking transaction in queue."), MAKE_ERROR(telCAN_NOT_QUEUE_FEE, "Can not queue at this time: fee insufficient to replace queued transaction."), MAKE_ERROR(telCAN_NOT_QUEUE_FULL, "Can not queue at this time: queue is full."), + MAKE_ERROR(telWRONG_NETWORK, "Transaction specifies a Network ID that differs from that of the local node."), + MAKE_ERROR(telREQUIRES_NETWORK_ID, "Transactions submitted to this node/network must include a correct NetworkID field."), + MAKE_ERROR(telNETWORK_ID_MAKES_TX_NON_CANONICAL, "Transactions submitted to this node/network must NOT include a NetworkID field."), MAKE_ERROR(temMALFORMED, "Malformed transaction."), MAKE_ERROR(temBAD_AMOUNT, "Can only send positive amounts."), diff --git a/src/ripple/protocol/impl/TxFormats.cpp b/src/ripple/protocol/impl/TxFormats.cpp index fe42fd53e3c..91a6bcb581b 100644 --- a/src/ripple/protocol/impl/TxFormats.cpp +++ b/src/ripple/protocol/impl/TxFormats.cpp @@ -40,6 +40,7 @@ TxFormats::TxFormats() {sfSigningPubKey, soeREQUIRED}, {sfTxnSignature, soeOPTIONAL}, {sfSigners, soeOPTIONAL}, // submit_multisigned + {sfNetworkID, soeOPTIONAL}, }; add(jss::AccountSet, diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 1cc06806569..e2738309d3c 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -71,6 +71,7 @@ JSS(Invalid); // JSS(LastLedgerSequence); // in: TransactionSign; field JSS(LedgerHashes); // ledger type. JSS(LimitAmount); // field. +JSS(NetworkID); // field. JSS(NFTokenBurn); // transaction type. JSS(NFTokenMint); // transaction type. JSS(NFTokenOffer); // ledger type. diff --git a/src/test/app/NetworkID_test.cpp b/src/test/app/NetworkID_test.cpp new file mode 100644 index 00000000000..e650667f842 --- /dev/null +++ b/src/test/app/NetworkID_test.cpp @@ -0,0 +1,154 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Dev Null Productions + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include + +namespace ripple { +namespace test { + +class NetworkID_test : public beast::unit_test::suite +{ +public: + void + run() override + { + testNetworkID(); + } + + std::unique_ptr + makeNetworkConfig(uint32_t networkID) + { + using namespace jtx; + return envconfig([&](std::unique_ptr cfg) { + cfg->NETWORK_ID = networkID; + return cfg; + }); + } + + void + testNetworkID() + { + testcase( + "Require txn NetworkID to be specified (or not) depending on the " + "network ID of the node"); + using namespace jtx; + + auto const alice = Account{"alice"}; + + auto const runTx = [&](test::jtx::Env& env, + Json::Value const& jv, + TER expectedOutcome) { + env.memoize(env.master); + env.memoize(alice); + + // fund alice + { + Json::Value jv; + jv[jss::Account] = env.master.human(); + jv[jss::Destination] = alice.human(); + jv[jss::TransactionType] = "Payment"; + jv[jss::Amount] = "10000000000"; + if (env.app().config().NETWORK_ID > 1024) + jv[jss::NetworkID] = + std::to_string(env.app().config().NETWORK_ID); + + env(jv, fee(1000), sig(env.master)); + } + + // run tx + env(jv, fee(1000), ter(expectedOutcome)); + env.close(); + }; + + // test mainnet + { + test::jtx::Env env{*this, makeNetworkConfig(0)}; + BEAST_EXPECT(env.app().config().NETWORK_ID == 0); + + // try to submit a txn without network id, this should work + Json::Value jv; + jv[jss::Account] = alice.human(); + jv[jss::TransactionType] = jss::AccountSet; + runTx(env, jv, tesSUCCESS); + + // try to submit a txn with NetworkID present against a mainnet + // node, this will fail + jv[jss::NetworkID] = 0; + runTx(env, jv, telNETWORK_ID_MAKES_TX_NON_CANONICAL); + + // change network id to something else, should still return same + // error + jv[jss::NetworkID] = 10000; + runTx(env, jv, telNETWORK_ID_MAKES_TX_NON_CANONICAL); + } + + // any network up to and including networkid 1024 cannot support + // NetworkID + { + test::jtx::Env env{*this, makeNetworkConfig(1024)}; + BEAST_EXPECT(env.app().config().NETWORK_ID == 1024); + + // try to submit a txn without network id, this should work + Json::Value jv; + jv[jss::Account] = alice.human(); + jv[jss::TransactionType] = jss::AccountSet; + runTx(env, jv, tesSUCCESS); + + // now submit with a network id, this will fail + jv[jss::NetworkID] = 1024; + runTx(env, jv, telNETWORK_ID_MAKES_TX_NON_CANONICAL); + + jv[jss::NetworkID] = 1000; + runTx(env, jv, telNETWORK_ID_MAKES_TX_NON_CANONICAL); + } + + // any network above networkid 1024 will produce an error if fed a txn + // absent networkid + { + test::jtx::Env env{*this, makeNetworkConfig(1025)}; + BEAST_EXPECT(env.app().config().NETWORK_ID == 1025); + + // try to submit a txn without network id, this should not work + Json::Value jv; + jv[jss::Account] = alice.human(); + jv[jss::TransactionType] = jss::AccountSet; + runTx(env, jv, telREQUIRES_NETWORK_ID); + + // try to submit with wrong network id + jv[jss::NetworkID] = 0; + runTx(env, jv, telWRONG_NETWORK); + + jv[jss::NetworkID] = 1024; + runTx(env, jv, telWRONG_NETWORK); + + // submit the correct network id + jv[jss::NetworkID] = 1025; + runTx(env, jv, tesSUCCESS); + } + } +}; + +BEAST_DEFINE_TESTSUITE(NetworkID, app, ripple); + +} // namespace test +} // namespace ripple diff --git a/src/test/core/Config_test.cpp b/src/test/core/Config_test.cpp index b455762dea4..c991f3b11a2 100644 --- a/src/test/core/Config_test.cpp +++ b/src/test/core/Config_test.cpp @@ -411,6 +411,71 @@ port_wss_admin } } + void + testNetworkID() + { + testcase("network id"); + std::string error; + Config c; + try + { + c.loadFromString(R"rippleConfig( +[network_id] +main +)rippleConfig"); + } + catch (std::runtime_error& e) + { + error = e.what(); + } + + BEAST_EXPECT(error == ""); + BEAST_EXPECT(c.NETWORK_ID == 0); + + try + { + c.loadFromString(R"rippleConfig( +)rippleConfig"); + } + catch (std::runtime_error& e) + { + error = e.what(); + } + + BEAST_EXPECT(error == ""); + BEAST_EXPECT(c.NETWORK_ID == 0); + + try + { + c.loadFromString(R"rippleConfig( +[network_id] +255 +)rippleConfig"); + } + catch (std::runtime_error& e) + { + error = e.what(); + } + + BEAST_EXPECT(error == ""); + BEAST_EXPECT(c.NETWORK_ID == 255); + + try + { + c.loadFromString(R"rippleConfig( +[network_id] +10000 +)rippleConfig"); + } + catch (std::runtime_error& e) + { + error = e.what(); + } + + BEAST_EXPECT(error == ""); + BEAST_EXPECT(c.NETWORK_ID == 10000); + } + void testValidatorsFile() { @@ -1230,6 +1295,7 @@ r.ripple.com:51235 testGetters(); testAmendment(); testOverlay(); + testNetworkID(); } }; diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 41eac3204ed..e0126d86854 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -163,7 +163,10 @@ Env::lookup(AccountID const& id) const { auto const iter = map_.find(id); if (iter == map_.end()) + { + std::cout << "Unknown account: " << id << "\n"; Throw("Env::lookup:: unknown account ID"); + } return iter->second; } From e75cd4931378f7e0b94afdb9fdee8d45e4159531 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 12 Apr 2023 18:32:37 -0500 Subject: [PATCH 151/508] Fix the fix for std::result_of (#4496) Newer compilers, such as Apple Clang 15.0, have removed `std::result_of` as part of C++20. The build instructions provided a fix for this (by adding a preprocessor definition), but the fix was broken. This fixes the fix by: * Adding the `conf` prefix for tool configurations (which had been forgotten). * Passing `extra_b2_flags` to `boost` package to fix its build. * Define `BOOST_ASIO_HAS_STD_INVOKE_RESULT` in order to build boost 1.77 with a newer compiler. --- BUILD.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/BUILD.md b/BUILD.md index 3a7283407d9..cafde96b6a6 100644 --- a/BUILD.md +++ b/BUILD.md @@ -258,10 +258,11 @@ part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor definition to your bulid: ``` +conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default conan profile update 'env.CFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default conan profile update 'env.CXXFLAGS="-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"' default -conan profile update 'tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default -conan profile update 'tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default +conan profile update 'conf.tools.build:cflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default +conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_RESULT"]' default ``` From 1f417764c3293adfd60e98c9d27da096f474d13d Mon Sep 17 00:00:00 2001 From: John Freeman Date: Thu, 13 Apr 2023 12:41:16 -0500 Subject: [PATCH 152/508] Add install instructions for package managers: (#4472) Add instructions for installing rippled using the package managers APT and YUM. Some steps were adapted from xrpl.org. --------- Co-authored-by: Michael Legleux --- docs/build/install.md | 146 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 146 insertions(+) diff --git a/docs/build/install.md b/docs/build/install.md index 1bdc456b6d8..af0d6f335c0 100644 --- a/docs/build/install.md +++ b/docs/build/install.md @@ -1,3 +1,12 @@ +This document contains instructions for installing rippled. +The APT package manager is common on Debian-based Linux distributions like +Ubuntu, +while the YUM package manager is common on Red Hat-based Linux distributions +like CentOS. +Installing from source is an option for all platforms, +and the only supported option for installing custom builds. + + ## From source From a source build, you can install rippled and libxrpl using CMake's @@ -11,3 +20,140 @@ The default [prefix][1] is typically `/usr/local` on Linux and macOS and `C:/Program Files/rippled` on Windows. [1]: https://cmake.org/cmake/help/latest/variable/CMAKE_INSTALL_PREFIX.html + + +## With the APT package manager + +1. Update repositories: + + sudo apt update -y + +2. Install utilities: + + sudo apt install -y apt-transport-https ca-certificates wget gnupg + +3. Add Ripple's package-signing GPG key to your list of trusted keys: + + sudo mkdir /usr/local/share/keyrings/ + wget -q -O - "https://repos.ripple.com/repos/api/gpg/key/public" | gpg --dearmor > ripple-key.gpg + sudo mv ripple-key.gpg /usr/local/share/keyrings + + +4. Check the fingerprint of the newly-added key: + + gpg /usr/local/share/keyrings/ripple-key.gpg + + The output should include an entry for Ripple such as the following: + + gpg: WARNING: no command supplied. Trying to guess what you mean ... + pub rsa3072 2019-02-14 [SC] [expires: 2026-02-17] + C0010EC205B35A3310DC90DE395F97FFCCAFD9A2 + uid TechOps Team at Ripple + sub rsa3072 2019-02-14 [E] [expires: 2026-02-17] + + + In particular, make sure that the fingerprint matches. (In the above example, the fingerprint is on the third line, starting with `C001`.) + +4. Add the appropriate Ripple repository for your operating system version: + + echo "deb [signed-by=/usr/local/share/keyrings/ripple-key.gpg] https://repos.ripple.com/repos/rippled-deb focal stable" | \ + sudo tee -a /etc/apt/sources.list.d/ripple.list + + The above example is appropriate for **Ubuntu 20.04 Focal Fossa**. For other operating systems, replace the word `focal` with one of the following: + + - `jammy` for **Ubuntu 22.04 Jammy Jellyfish** + - `bionic` for **Ubuntu 18.04 Bionic Beaver** + - `bullseye` for **Debian 11 Bullseye** + - `buster` for **Debian 10 Buster** + + If you want access to development or pre-release versions of `rippled`, use one of the following instead of `stable`: + + - `unstable` - Pre-release builds ([`release` branch](https://github.com/ripple/rippled/tree/release)) + - `nightly` - Experimental/development builds ([`develop` branch](https://github.com/ripple/rippled/tree/develop)) + + **Warning:** Unstable and nightly builds may be broken at any time. Do not use these builds for production servers. + +5. Fetch the Ripple repository. + + sudo apt -y update + +6. Install the `rippled` software package: + + sudo apt -y install rippled + +7. Check the status of the `rippled` service: + + systemctl status rippled.service + + The `rippled` service should start automatically. If not, you can start it manually: + + sudo systemctl start rippled.service + +8. Optional: allow `rippled` to bind to privileged ports. + + This allows you to serve incoming API requests on port 80 or 443. (If you want to do so, you must also update the config file's port settings.) + + sudo setcap 'cap_net_bind_service=+ep' /opt/ripple/bin/rippled + + +## With the YUM package manager + +1. Install the Ripple RPM repository: + + Choose the appropriate RPM repository for the stability of releases you want: + + - `stable` for the latest production release (`master` branch) + - `unstable` for pre-release builds (`release` branch) + - `nightly` for experimental/development builds (`develop` branch) + + *Stable* + + cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo + [ripple-stable] + name=XRP Ledger Packages + enabled=1 + gpgcheck=0 + repo_gpgcheck=1 + baseurl=https://repos.ripple.com/repos/rippled-rpm/stable/ + gpgkey=https://repos.ripple.com/repos/rippled-rpm/stable/repodata/repomd.xml.key + REPOFILE + + *Unstable* + + cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo + [ripple-unstable] + name=XRP Ledger Packages + enabled=1 + gpgcheck=0 + repo_gpgcheck=1 + baseurl=https://repos.ripple.com/repos/rippled-rpm/unstable/ + gpgkey=https://repos.ripple.com/repos/rippled-rpm/unstable/repodata/repomd.xml.key + REPOFILE + + *Nightly* + + cat << REPOFILE | sudo tee /etc/yum.repos.d/ripple.repo + [ripple-nightly] + name=XRP Ledger Packages + enabled=1 + gpgcheck=0 + repo_gpgcheck=1 + baseurl=https://repos.ripple.com/repos/rippled-rpm/nightly/ + gpgkey=https://repos.ripple.com/repos/rippled-rpm/nightly/repodata/repomd.xml.key + REPOFILE + +2. Fetch the latest repo updates: + + sudo yum -y update + +3. Install the new `rippled` package: + + sudo yum install -y rippled + +4. Configure the `rippled` service to start on boot: + + sudo systemctl enable rippled.service + +5. Start the `rippled` service: + + sudo systemctl start rippled.service From c5003969dee3fe7fbc1e0a14afab1ccbb8782d82 Mon Sep 17 00:00:00 2001 From: Mark Travis Date: Thu, 20 Apr 2023 11:36:18 -0700 Subject: [PATCH 153/508] Use quorum specified via command line: (#4489) If `--quorum` setting is present on the command line, use the specified value as the minimum quorum. This allows for the use of a potentially fork-unsafe quorum, but it is sometimes necessary for small and test networks. Fix #4488. --------- Co-authored-by: RichardAH --- src/ripple/app/misc/impl/ValidatorList.cpp | 24 +++++++++------------- src/test/app/ValidatorList_test.cpp | 9 ++++---- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/src/ripple/app/misc/impl/ValidatorList.cpp b/src/ripple/app/misc/impl/ValidatorList.cpp index 09f774f8af9..d17b85c4840 100644 --- a/src/ripple/app/misc/impl/ValidatorList.cpp +++ b/src/ripple/app/misc/impl/ValidatorList.cpp @@ -1710,6 +1710,15 @@ ValidatorList::calculateQuorum( std::size_t effectiveUnlSize, std::size_t seenSize) { + // Use quorum if specified via command line. + if (minimumQuorum_ > 0) + { + JLOG(j_.warn()) << "Using potentially unsafe quorum of " + << *minimumQuorum_ + << " as specified on the command line"; + return *minimumQuorum_; + } + // Do not use achievable quorum until lists from all configured // publishers are available for (auto const& list : publisherLists_) @@ -1752,21 +1761,8 @@ ValidatorList::calculateQuorum( // Note that the negative UNL protocol introduced the // AbsoluteMinimumQuorum which is 60% of the original UNL size. The // effective quorum should not be lower than it. - auto quorum = static_cast(std::max( + return static_cast(std::max( std::ceil(effectiveUnlSize * 0.8f), std::ceil(unlSize * 0.6f))); - - // Use lower quorum specified via command line if the normal quorum - // appears unreachable based on the number of recently received - // validations. - if (minimumQuorum_ && *minimumQuorum_ < quorum && seenSize < quorum) - { - quorum = *minimumQuorum_; - - JLOG(j_.warn()) << "Using unsafe quorum of " << quorum - << " as specified in the command line"; - } - - return quorum; } TrustChanges diff --git a/src/test/app/ValidatorList_test.cpp b/src/test/app/ValidatorList_test.cpp index fead5563f21..ff9b57f3ced 100644 --- a/src/test/app/ValidatorList_test.cpp +++ b/src/test/app/ValidatorList_test.cpp @@ -1318,7 +1318,7 @@ class ValidatorList_test : public beast::unit_test::suite BEAST_EXPECT(changes.added == expectedTrusted); BEAST_EXPECT(trustedKeys->quorum() == minQuorum); - // Use normal quorum when seen validators >= quorum + // Use configured quorum even when seen validators >= quorum activeValidators.emplace(toBeSeen); changes = trustedKeys->updateTrusted( activeValidators, @@ -1328,7 +1328,7 @@ class ValidatorList_test : public beast::unit_test::suite env.app().getHashRouter()); BEAST_EXPECT(changes.removed.empty()); BEAST_EXPECT(changes.added.empty()); - BEAST_EXPECT(trustedKeys->quorum() == std::ceil(n * 0.8f)); + BEAST_EXPECT(trustedKeys->quorum() == minQuorum); } { // Remove expired published list @@ -1828,7 +1828,8 @@ class ValidatorList_test : public beast::unit_test::suite env.app().getOPs(), env.app().overlay(), env.app().getHashRouter()); - if (trustedKeys->quorum() == std::ceil(cfgKeys.size() * 0.8f)) + if (minimumQuorum == trustedKeys->quorum() || + trustedKeys->quorum() == std::ceil(cfgKeys.size() * 0.8f)) return trustedKeys; } return nullptr; @@ -1980,7 +1981,7 @@ class ValidatorList_test : public beast::unit_test::suite env.app().getOPs(), env.app().overlay(), env.app().getHashRouter()); - BEAST_EXPECT(validators->quorum() == 48); + BEAST_EXPECT(validators->quorum() == 30); hash_set nUnl; it = unl.begin(); for (std::uint32_t i = 0; i < 20; ++i) From 8d482d3557fb206eab5c7bbfc57216238b516d14 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Fri, 21 Apr 2023 14:20:35 -0500 Subject: [PATCH 154/508] Fix errors for Clang 16: (#4501) Address issues related to the removal of `std::{u,bi}nary_function` in C++17 and some warnings with Clang 16. Some warnings appeared with the upgrade to Apple clang version 14.0.3 (clang-1403.0.22.14.1). - `std::{u,bi}nary_function` were removed in C++17. They were empty classes with a few associated types. We already have conditional code to define the types. Just make it unconditional. - libc++ checks a cast in an unevaluated context to see if a type inherits from a binary function class in the standard library, e.g. `std::equal_to`, and this causes an error when the type privately inherits from such a class. Change these instances to public inheritance. - We don't need a middle-man for the empty base optimization. Prefer to inherit directly from an empty class than from `beast::detail::empty_base_optimization`. - Clang warns when all the uses of a variable are removed by conditional compilation of assertions. Add a `[[maybe_unused]]` annotation to suppress it. - As a drive-by clean-up, remove commented code. See related work in #4486. --- .../container/detail/aged_ordered_container.h | 55 ++++--------------- .../detail/aged_unordered_container.h | 55 ++++--------------- src/ripple/peerfinder/impl/Bootcache.h | 7 --- src/ripple/peerfinder/impl/Livecache.h | 11 ---- src/ripple/shamap/impl/SHAMapInnerNode.cpp | 2 +- 5 files changed, 24 insertions(+), 106 deletions(-) diff --git a/src/ripple/beast/container/detail/aged_ordered_container.h b/src/ripple/beast/container/detail/aged_ordered_container.h index 23534a26bb3..10dca962b13 100644 --- a/src/ripple/beast/container/detail/aged_ordered_container.h +++ b/src/ripple/beast/container/detail/aged_ordered_container.h @@ -145,111 +145,78 @@ class aged_ordered_container }; // VFALCO TODO This should only be enabled for maps. - class pair_value_compare - : public beast::detail::empty_base_optimization -#ifdef _LIBCPP_VERSION - , - public std::binary_function -#endif + class pair_value_compare : public Compare { public: -#ifndef _LIBCPP_VERSION using first_argument = value_type; using second_argument = value_type; using result_type = bool; -#endif bool operator()(value_type const& lhs, value_type const& rhs) const { - return this->member()(lhs.first, rhs.first); + return Compare::operator()(lhs.first, rhs.first); } pair_value_compare() { } - pair_value_compare(pair_value_compare const& other) - : beast::detail::empty_base_optimization(other) + pair_value_compare(pair_value_compare const& other) : Compare(other) { } private: friend aged_ordered_container; - pair_value_compare(Compare const& compare) - : beast::detail::empty_base_optimization(compare) + pair_value_compare(Compare const& compare) : Compare(compare) { } }; // Compares value_type against element, used in insert_check // VFALCO TODO hoist to remove template argument dependencies - class KeyValueCompare - : public beast::detail::empty_base_optimization -#ifdef _LIBCPP_VERSION - , - public std::binary_function -#endif + class KeyValueCompare : public Compare { public: -#ifndef _LIBCPP_VERSION using first_argument = Key; using second_argument = element; using result_type = bool; -#endif KeyValueCompare() = default; - KeyValueCompare(Compare const& compare) - : beast::detail::empty_base_optimization(compare) - { - } - - // VFALCO NOTE WE might want only to enable these overloads - // if Compare has is_transparent -#if 0 - template - bool operator() (K const& k, element const& e) const + KeyValueCompare(Compare const& compare) : Compare(compare) { - return this->member() (k, extract (e.value)); } - template - bool operator() (element const& e, K const& k) const - { - return this->member() (extract (e.value), k); - } -#endif - bool operator()(Key const& k, element const& e) const { - return this->member()(k, extract(e.value)); + return Compare::operator()(k, extract(e.value)); } bool operator()(element const& e, Key const& k) const { - return this->member()(extract(e.value), k); + return Compare::operator()(extract(e.value), k); } bool operator()(element const& x, element const& y) const { - return this->member()(extract(x.value), extract(y.value)); + return Compare::operator()(extract(x.value), extract(y.value)); } Compare& compare() { - return beast::detail::empty_base_optimization::member(); + return *this; } Compare const& compare() const { - return beast::detail::empty_base_optimization::member(); + return *this; } }; diff --git a/src/ripple/beast/container/detail/aged_unordered_container.h b/src/ripple/beast/container/detail/aged_unordered_container.h index 920e6196bb9..fcdccd2a637 100644 --- a/src/ripple/beast/container/detail/aged_unordered_container.h +++ b/src/ripple/beast/container/detail/aged_unordered_container.h @@ -148,115 +148,84 @@ class aged_unordered_container }; // VFALCO TODO hoist to remove template argument dependencies - class ValueHash : private beast::detail::empty_base_optimization -#ifdef _LIBCPP_VERSION - , - public std::unary_function -#endif + class ValueHash : public Hash { public: -#ifndef _LIBCPP_VERSION using argument_type = element; using result_type = size_t; -#endif ValueHash() { } - ValueHash(Hash const& h) - : beast::detail::empty_base_optimization(h) + ValueHash(Hash const& h) : Hash(h) { } std::size_t operator()(element const& e) const { - return this->member()(extract(e.value)); + return Hash::operator()(extract(e.value)); } Hash& hash_function() { - return this->member(); + return *this; } Hash const& hash_function() const { - return this->member(); + return *this; } }; // Compares value_type against element, used in find/insert_check // VFALCO TODO hoist to remove template argument dependencies - class KeyValueEqual - : private beast::detail::empty_base_optimization -#ifdef _LIBCPP_VERSION - , - public std::binary_function -#endif + class KeyValueEqual : public KeyEqual { public: -#ifndef _LIBCPP_VERSION using first_argument_type = Key; using second_argument_type = element; using result_type = bool; -#endif KeyValueEqual() { } - KeyValueEqual(KeyEqual const& keyEqual) - : beast::detail::empty_base_optimization(keyEqual) + KeyValueEqual(KeyEqual const& keyEqual) : KeyEqual(keyEqual) { } - // VFALCO NOTE WE might want only to enable these overloads - // if KeyEqual has is_transparent -#if 0 - template - bool operator() (K const& k, element const& e) const - { - return this->member() (k, extract (e.value)); - } - - template - bool operator() (element const& e, K const& k) const - { - return this->member() (extract (e.value), k); - } -#endif - bool operator()(Key const& k, element const& e) const { - return this->member()(k, extract(e.value)); + return KeyEqual::operator()(k, extract(e.value)); } bool operator()(element const& e, Key const& k) const { - return this->member()(extract(e.value), k); + return KeyEqual::operator()(extract(e.value), k); } bool operator()(element const& lhs, element const& rhs) const { - return this->member()(extract(lhs.value), extract(rhs.value)); + return KeyEqual::operator()(extract(lhs.value), extract(rhs.value)); } KeyEqual& key_eq() { - return this->member(); + return *this; } KeyEqual const& key_eq() const { - return this->member(); + return *this; } }; diff --git a/src/ripple/peerfinder/impl/Bootcache.h b/src/ripple/peerfinder/impl/Bootcache.h index eb6455879c0..b48f248ae40 100644 --- a/src/ripple/peerfinder/impl/Bootcache.h +++ b/src/ripple/peerfinder/impl/Bootcache.h @@ -91,17 +91,10 @@ class Bootcache using value_type = map_type::value_type; struct Transform -#ifdef _LIBCPP_VERSION - : std::unary_function< - map_type::right_map::const_iterator::value_type const&, - beast::IP::Endpoint const&> -#endif { -#ifndef _LIBCPP_VERSION using first_argument_type = map_type::right_map::const_iterator::value_type const&; using result_type = beast::IP::Endpoint const&; -#endif explicit Transform() = default; diff --git a/src/ripple/peerfinder/impl/Livecache.h b/src/ripple/peerfinder/impl/Livecache.h index 12e2373faaf..8ecd68e845e 100644 --- a/src/ripple/peerfinder/impl/Livecache.h +++ b/src/ripple/peerfinder/impl/Livecache.h @@ -69,14 +69,9 @@ class LivecacheBase public: // Iterator transformation to extract the endpoint from Element struct Transform -#ifdef _LIBCPP_VERSION - : public std::unary_function -#endif { -#ifndef _LIBCPP_VERSION using first_argument = Element; using result_type = Endpoint; -#endif explicit Transform() = default; @@ -239,15 +234,9 @@ class Livecache : protected detail::LivecacheBase template struct Transform -#ifdef _LIBCPP_VERSION - : public std:: - unary_function> -#endif { -#ifndef _LIBCPP_VERSION using first_argument = typename lists_type::value_type; using result_type = Hop; -#endif explicit Transform() = default; diff --git a/src/ripple/shamap/impl/SHAMapInnerNode.cpp b/src/ripple/shamap/impl/SHAMapInnerNode.cpp index 6ea6f47eb37..1cac616b00c 100644 --- a/src/ripple/shamap/impl/SHAMapInnerNode.cpp +++ b/src/ripple/shamap/impl/SHAMapInnerNode.cpp @@ -398,7 +398,7 @@ SHAMapInnerNode::canonicalizeChild( void SHAMapInnerNode::invariants(bool is_root) const { - unsigned count = 0; + [[maybe_unused]] unsigned count = 0; auto [numAllocated, hashes, children] = hashesAndChildren_.getHashesAndChildren(); From 436de0e03aaad76ca3326a4d5a048e5441d80422 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Mon, 24 Apr 2023 18:17:51 -0500 Subject: [PATCH 155/508] Expand Linux test matrix: (#4454) This change makes progress on the plan in #4371. It does not replicate the full [matrix] implemented in #3851, but it does replicate the 1.ii section of the Linux matrix. It leverages "heavy" self-hosted runners, and demonstrates a repeatable pattern for future matrices. [matrix]: https://github.com/XRPLF/rippled/blob/d794a0f3f161bb30c74881172fc38f763d7d46e8/.github/README.md#continuous-integration --- .github/actions/build/action.yml | 42 +++++++++ .github/workflows/macos.yml | 69 ++++++++++++++ .github/workflows/nix.yml | 148 +++++++++++++++++-------------- .github/workflows/windows.yml | 6 +- 4 files changed, 197 insertions(+), 68 deletions(-) create mode 100644 .github/actions/build/action.yml create mode 100644 .github/workflows/macos.yml diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml new file mode 100644 index 00000000000..e879fdbb5fd --- /dev/null +++ b/.github/actions/build/action.yml @@ -0,0 +1,42 @@ +name: build +inputs: + generator: + default: null + configuration: + required: true + cmake-args: + default: null +# An implicit input is the environment variable `build_dir`. +runs: + using: composite + steps: + - name: export custom recipes + shell: bash + run: conan export external/snappy snappy/1.1.9@ + - name: install dependencies + shell: bash + run: | + mkdir ${build_dir} + cd ${build_dir} + conan install \ + --output-folder . \ + --build missing \ + --settings build_type=${{ inputs.configuration }} \ + .. + - name: configure + shell: bash + run: | + cd ${build_dir} + cmake \ + ${{ inputs.generator && format('-G {0}', inputs.generator) || '' }} \ + -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ + -DCMAKE_BUILD_TYPE=${{ inputs.configuration }} \ + ${{ inputs.cmake-args }} \ + .. + - name: build + shell: bash + run: | + cmake \ + --build ${build_dir} \ + --config ${{ inputs.configuration }} \ + --parallel ${NUM_PROCESSORS:-$(nproc)} diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml new file mode 100644 index 00000000000..8b4aca0f4f7 --- /dev/null +++ b/.github/workflows/macos.yml @@ -0,0 +1,69 @@ +name: macos +on: [push, pull_request] + +jobs: + + test: + strategy: + matrix: + platform: + - macos-12 + generator: + - Ninja + configuration: + - Release + runs-on: ${{ matrix.platform }} + env: + # The `build` action requires these variables. + build_dir: .build + NUM_PROCESSORS: 2 + steps: + - name: checkout + uses: actions/checkout@v3 + - name: install Ninja + if: matrix.generator == 'Ninja' + run: brew install ninja + - name: choose Python + uses: actions/setup-python@v3 + with: + python-version: 3.9 + - name: learn Python cache directory + id: pip-cache + run: | + sudo pip install --upgrade pip + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT + - name: restore Python cache directory + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-${{ hashFiles('.github/workflows/nix.yml') }} + - name: install Conan + run: pip install wheel 'conan<2' + - name: check environment + run: | + echo ${PATH} | tr ':' '\n' + python --version + conan --version + cmake --version + env + - name: configure Conan + run: | + conan profile new default --detect + conan profile update settings.compiler.cppstd=20 default + - name: learn Conan cache directory + id: conan-cache + run: | + echo "dir=$(conan config get storage.path)" >> $GITHUB_OUTPUT + - name: restore Conan cache directory + uses: actions/cache@v2 + with: + path: ${{ steps.conan-cache.outputs.dir }} + key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/nix.yml') }} + - name: build + uses: ./.github/actions/build + with: + generator: ${{ matrix.generator }} + configuration: ${{ matrix.configuration }} + - name: test + run: | + ${build_dir}/rippled --unittest diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 8ccb1f52733..28d76231d1f 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -3,52 +3,35 @@ on: [push, pull_request] jobs: - test: + dependencies: strategy: fail-fast: false matrix: platform: - - ubuntu-latest - - macos-12 - generator: - - Ninja + - linux + compiler: + - gcc + - clang configuration: + - Debug - Release - runs-on: ${{ matrix.platform }} - env: - build_dir: .build + include: + - compiler: gcc + profile: + version: 11 + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + - compiler: clang + profile: + version: 14 + cc: /usr/bin/clang-14 + cxx: /usr/bin/clang++-14 + runs-on: [self-hosted, heavy] + container: thejohnfreeman/rippled-build-ubuntu:12e19cd9034b steps: - - name: checkout - uses: actions/checkout@v3 - - name: install Ninja on Linux - if: matrix.generator == 'Ninja' && runner.os == 'Linux' - run: sudo apt install ninja-build - - name: install Ninja on OSX - if: matrix.generator == 'Ninja' && runner.os == 'macOS' - run: brew install ninja - - name: install nproc on OSX - if: runner.os == 'macOS' - run: brew install coreutils - - name: choose Python - uses: actions/setup-python@v3 - with: - python-version: 3.9 - - name: learn Python cache directory - id: pip-cache - run: | - sudo pip install --upgrade pip - echo "::set-output name=dir::$(pip cache dir)" - - name: restore Python cache directory - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ hashFiles('.github/workflows/nix.yml') }} - - name: install Conan - run: pip install wheel 'conan~=1.52' - name: check environment run: | echo ${PATH} | tr ':' '\n' - python --version conan --version cmake --version env @@ -56,41 +39,76 @@ jobs: run: | conan profile new default --detect conan profile update settings.compiler.cppstd=20 default - - name: configure Conan on Linux - if: runner.os == 'Linux' - run: | + conan profile update settings.compiler=${{ matrix.compiler }} default + conan profile update settings.compiler.version=${{ matrix.profile.version }} default conan profile update settings.compiler.libcxx=libstdc++11 default - - name: learn Conan cache directory - id: conan-cache + conan profile update env.CC=${{ matrix.profile.cc }} default + conan profile update env.CXX=${{ matrix.profile.cxx }} default + conan profile update conf.tools.build:compiler_executables='{"c": "${{ matrix.profile.cc }}", "cpp": "${{ matrix.profile.cxx }}"}' default + - name: checkout + uses: actions/checkout@v3 + - name: build dependencies run: | - echo "::set-output name=dir::$(conan config get storage.path)" - - name: restore Conan cache directory - uses: actions/cache@v2 + mkdir .build + cd .build + conan install \ + --output-folder . \ + --build missing \ + --settings build_type=${{ matrix.configuration }} \ + .. + - name: archive cache + run: tar -czf conan.tar -C ~/.conan . + - name: upload cache + uses: actions/upload-artifact@v3 with: - path: ${{ steps.conan-cache.outputs.dir }} - key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/nix.yml') }} - - name: export Snappy - run: conan export external/snappy snappy/1.1.9@ - - name: install dependencies + name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} + path: conan.tar + + + test: + strategy: + fail-fast: false + matrix: + platform: + - linux + compiler: + - gcc + - clang + configuration: + - Debug + - Release + cmake-args: + - + - "-Dunity=ON" + needs: dependencies + runs-on: [self-hosted, heavy] + container: thejohnfreeman/rippled-build-ubuntu:12e19cd9034b + env: + build_dir: .build + steps: + - name: download cache + uses: actions/download-artifact@v3 + with: + name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} + - name: extract cache run: | - mkdir ${build_dir} - cd ${build_dir} - conan install .. --build missing --settings build_type=${{ matrix.configuration }} - - name: configure + mkdir -p ~/.conan + tar -xzf conan.tar -C ~/.conan + - name: check environment run: | - cd ${build_dir} - cmake \ - -G ${{ matrix.generator }} \ - -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ - -DCMAKE_BUILD_TYPE=${{ matrix.configuration }} \ - -Dassert=ON \ - -Dcoverage=OFF \ - -Dreporting=OFF \ - -Dunity=OFF \ - .. + echo ${PATH} | tr ':' '\n' + conan --version + cmake --version + env + ls ~/.conan + - name: checkout + uses: actions/checkout@v3 - name: build - run: | - cmake --build ${build_dir} --target rippled --parallel $(nproc) + uses: ./.github/actions/build + with: + generator: Ninja + configuration: ${{ matrix.configuration }} + cmake-args: ${{ matrix.cmake-args }} - name: test run: | ${build_dir}/rippled --unittest --unittest-jobs $(nproc) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 51fdc5d27cb..b3a069a0eb5 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -39,14 +39,14 @@ jobs: id: pip-cache run: | pip install --upgrade pip - echo "::set-output name=dir::$(pip cache dir)" + echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - name: restore Python cache directory uses: actions/cache@v2 with: path: ${{ steps.pip-cache.outputs.dir }} key: ${{ runner.os }}-${{ hashFiles('.github/workflows/windows.yml') }} - name: install Conan - run: pip install wheel 'conan~=1.52' + run: pip install wheel 'conan<2' - name: check environment run: | $env:PATH -split ';' @@ -63,7 +63,7 @@ jobs: - name: learn Conan cache directory id: conan-cache run: | - echo "::set-output name=dir::$(conan config get storage.path)" + echo "dir=$(conan config get storage.path)" >> $GITHUB_OUTPUT - name: restore Conan cache directory uses: actions/cache@v2 with: From b21a05d465961d50bee7f7858aa2b6ed063c0315 Mon Sep 17 00:00:00 2001 From: solmsted Date: Tue, 25 Apr 2023 17:11:08 -0400 Subject: [PATCH 156/508] Fix typo (#4508) --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index c60ef8c75cf..5db252247d7 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -68,7 +68,7 @@ This program allows us to recognize and reward individuals or groups that identi 2. **Relevant**. A security issue, posing a danger to user funds, privacy or the operation of the XRP Ledger. 3. **Original and previously unknown**. Bugs that are already known and discussed in public do not qualify. Previously reported bugs, even if publicly unknown, are not eligible. 4. **Specific**. We welcome general security advice or recommendations, but we cannot pay bounties for that. -5. **Fixable**. There has to be something we can do to permanently fix the problem. Note that bugs in other people’s software may still qualify in some cases. For example, if you find a bug in a library that we use which can compromises the security of software that is in scope and we can get it fixed, you may qualify for a bounty. +5. **Fixable**. There has to be something we can do to permanently fix the problem. Note that bugs in other people’s software may still qualify in some cases. For example, if you find a bug in a library that we use which can compromise the security of software that is in scope and we can get it fixed, you may qualify for a bounty. 6. **Unused**. If you use the exploit to attack the XRP Ledger, you do not qualify for a bounty. If you report a vulnerability used in an ongoing or past attack and there is specific, concrete evidence that suggests you are the attacker we reserve the right not to pay a bounty. The amount paid varies dramatically. Vulnerabilities that are harmless on their own, but could form part of a critical exploit will usually receive a bounty. Full-blown exploits can receive much higher bounties. Please don’t hold back partial vulnerabilities while trying to construct a full-blown exploit. We will pay a bounty to anyone who reports a complete chain of vulnerabilities even if they have reported each component of the exploit separately and those vulnerabilities have been fixed in the meantime. However, to qualify for a the full bounty, you must to have been the first to report each of the partial exploits. From c7ef4c978345bacf3894545e7c9d09e9f8a7b0bb Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 26 Apr 2023 00:24:41 -0500 Subject: [PATCH 157/508] Add patched recipe for SOCI: (#4510) SOCI is the C++ database access library. The SOCI recipe was updated in Conan Center Index (CCI), and it breaks for our choice of options. This breakage occurs when you build with a fresh Conan cache (e.g. when you submit a PR, or delete `~/.conan/data`). * Add a custom Conan recipe for SOCI v4.0.3 * Update dependency building to handle exporting and installing Snappy and SOCI * Fix workflows to use custom SOCI recipe * Update BUILD.md to include instruction for exporting the SOCI Conan recipe: * `conan export external/soci soci/4.0.3@` This solution has been verified on Ubuntu 20.04 and macOS. Context: * There is a compiler error that the `sqlite3.h` header is not available when building soci. * When package B depends on package A, it finds the pieces it needs by importing the Package Configuration File (PCF) that Conan generates for package A. * Read the CMake written by package B to check that it is importing the PCF correctly and linking its exports correctly. * Since this can be difficult, it is often more efficient to check https://github.com/conan-io/conan-center-index/issues for package B to see if anyone else has seen a similar problem. * One of the issues points to a problem area in soci's CMake. To confirm the diagnosis, review soci's CMake (after any patches are applied) in the Conan build directory `build/$buildId/src/`. * Review the Conan-generated PCF in `build/$buildId/build/$buildType/generators/`. * In this case, the problem was likely (re)introduced by https://github.com/conan-io/conan-center-index/pull/17026 * If there is a problem in the source or in the Conan recipe, the fastest fix is to copy the recipe and either: * Add a source patch to fix any problems in the source. * Change the recipe to fix any problems in the recipe. * In this case, this can be done by finding soci's Conan recipe at https://github.com/conan-io/conan-center-index/tree/master/recipes/soci and then copying the `all` directory as `external/$packageName` in our project. Then, make any changes. * Test packages can be removed from the recipe folder as they are not needed. * If adding a patch in the `patches` directory, add a description for it to `conandata.yml`. * Since `conanfile.py` has no `version` property on the recipe class, builders need to pass a version on the command line (like they do for our `snappy` recipe). * Add an example command to `BUILD.md`. Future work: It may make sense to refer to recipes by revision, by checking in a lockfile. --- .github/actions/build/action.yml | 17 +- .github/actions/dependencies/action.yml | 23 ++ .github/workflows/nix.yml | 15 +- .github/workflows/windows.yml | 6 +- BUILD.md | 7 + external/soci/conandata.yml | 12 + external/soci/conanfile.py | 212 ++++++++++++++++++ ...-INSTALL_NAME_DIR-for-relocatable-li.patch | 39 ++++ .../soci/patches/0002-Fix-soci_backend.patch | 24 ++ 9 files changed, 331 insertions(+), 24 deletions(-) create mode 100644 .github/actions/dependencies/action.yml create mode 100644 external/soci/conandata.yml create mode 100644 external/soci/conanfile.py create mode 100644 external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch create mode 100644 external/soci/patches/0002-Fix-soci_backend.patch diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml index e879fdbb5fd..2c4135e0cf1 100644 --- a/.github/actions/build/action.yml +++ b/.github/actions/build/action.yml @@ -10,19 +10,10 @@ inputs: runs: using: composite steps: - - name: export custom recipes - shell: bash - run: conan export external/snappy snappy/1.1.9@ - - name: install dependencies - shell: bash - run: | - mkdir ${build_dir} - cd ${build_dir} - conan install \ - --output-folder . \ - --build missing \ - --settings build_type=${{ inputs.configuration }} \ - .. + - name: dependencies + uses: ./.github/actions/dependencies + with: + configuration: ${{ inputs.configuration }} - name: configure shell: bash run: | diff --git a/.github/actions/dependencies/action.yml b/.github/actions/dependencies/action.yml new file mode 100644 index 00000000000..af570f21b82 --- /dev/null +++ b/.github/actions/dependencies/action.yml @@ -0,0 +1,23 @@ +name: dependencies +inputs: + configuration: + required: true +# An implicit input is the environment variable `build_dir`. +runs: + using: composite + steps: + - name: export custom recipes + shell: bash + run: | + conan export external/snappy snappy/1.1.9@ + conan export external/soci soci/4.0.3@ + - name: install dependencies + shell: bash + run: | + mkdir ${build_dir} + cd ${build_dir} + conan install \ + --output-folder . \ + --build missing \ + --settings build_type=${{ inputs.configuration }} \ + .. diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 28d76231d1f..53c75a9f6fd 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -28,6 +28,8 @@ jobs: cxx: /usr/bin/clang++-14 runs-on: [self-hosted, heavy] container: thejohnfreeman/rippled-build-ubuntu:12e19cd9034b + env: + build_dir: .build steps: - name: check environment run: | @@ -47,15 +49,10 @@ jobs: conan profile update conf.tools.build:compiler_executables='{"c": "${{ matrix.profile.cc }}", "cpp": "${{ matrix.profile.cxx }}"}' default - name: checkout uses: actions/checkout@v3 - - name: build dependencies - run: | - mkdir .build - cd .build - conan install \ - --output-folder . \ - --build missing \ - --settings build_type=${{ matrix.configuration }} \ - .. + - name: dependencies + uses: ./.github/actions/dependencies + with: + configuration: ${{ matrix.configuration }} - name: archive cache run: tar -czf conan.tar -C ~/.conan . - name: upload cache diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index b3a069a0eb5..0f918a50959 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -69,8 +69,10 @@ jobs: with: path: ${{ steps.conan-cache.outputs.dir }} key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/windows.yml') }} - - name: export Snappy - run: conan export external/snappy snappy/1.1.9@ + - name: export custom recipes + run: | + conan export external/snappy snappy/1.1.9@ + conan export external/soci soci/4.0.3@ - name: install dependencies run: | mkdir $env:build_dir diff --git a/BUILD.md b/BUILD.md index cafde96b6a6..741ce2ba61d 100644 --- a/BUILD.md +++ b/BUILD.md @@ -127,6 +127,13 @@ which allows you to statically link it with GCC, if you want. conan export external/snappy snappy/1.1.9@ ``` +Export our [Conan recipe for SOCI](./external/soci). +It patches their CMake to correctly import its dependencies. + +``` +conan export external/soci soci/4.0.3@ +``` + ## How to build and test Let's start with a couple of examples of common workflows. diff --git a/external/soci/conandata.yml b/external/soci/conandata.yml new file mode 100644 index 00000000000..6eb59aaffa2 --- /dev/null +++ b/external/soci/conandata.yml @@ -0,0 +1,12 @@ +sources: + "4.0.3": + url: "https://github.com/SOCI/soci/archive/v4.0.3.tar.gz" + sha256: "4b1ff9c8545c5d802fbe06ee6cd2886630e5c03bf740e269bb625b45cf934928" +patches: + "4.0.3": + - patch_file: "patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch" + patch_description: "Generate relocatable libraries on MacOS" + patch_type: "portability" + - patch_file: "patches/0002-Fix-soci_backend.patch" + patch_description: "Fix variable names for dependencies" + patch_type: "conan" diff --git a/external/soci/conanfile.py b/external/soci/conanfile.py new file mode 100644 index 00000000000..67c572d5ad8 --- /dev/null +++ b/external/soci/conanfile.py @@ -0,0 +1,212 @@ +from conan import ConanFile +from conan.tools.build import check_min_cppstd +from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout +from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir +from conan.tools.microsoft import is_msvc +from conan.tools.scm import Version +from conan.errors import ConanInvalidConfiguration +import os + +required_conan_version = ">=1.55.0" + + +class SociConan(ConanFile): + name = "soci" + homepage = "https://github.com/SOCI/soci" + url = "https://github.com/conan-io/conan-center-index" + description = "The C++ Database Access Library " + topics = ("mysql", "odbc", "postgresql", "sqlite3") + license = "BSL-1.0" + + settings = "os", "arch", "compiler", "build_type" + options = { + "shared": [True, False], + "fPIC": [True, False], + "empty": [True, False], + "with_sqlite3": [True, False], + "with_db2": [True, False], + "with_odbc": [True, False], + "with_oracle": [True, False], + "with_firebird": [True, False], + "with_mysql": [True, False], + "with_postgresql": [True, False], + "with_boost": [True, False], + } + default_options = { + "shared": False, + "fPIC": True, + "empty": False, + "with_sqlite3": False, + "with_db2": False, + "with_odbc": False, + "with_oracle": False, + "with_firebird": False, + "with_mysql": False, + "with_postgresql": False, + "with_boost": False, + } + + def export_sources(self): + export_conandata_patches(self) + + def layout(self): + cmake_layout(self, src_folder="src") + + def config_options(self): + if self.settings.os == "Windows": + self.options.rm_safe("fPIC") + + def configure(self): + if self.options.shared: + self.options.rm_safe("fPIC") + + def requirements(self): + if self.options.with_sqlite3: + self.requires("sqlite3/3.41.1") + if self.options.with_odbc and self.settings.os != "Windows": + self.requires("odbc/2.3.11") + if self.options.with_mysql: + self.requires("libmysqlclient/8.0.31") + if self.options.with_postgresql: + self.requires("libpq/14.7") + if self.options.with_boost: + self.requires("boost/1.81.0") + + @property + def _minimum_compilers_version(self): + return { + "Visual Studio": "14", + "gcc": "4.8", + "clang": "3.8", + "apple-clang": "8.0" + } + + def validate(self): + if self.settings.compiler.get_safe("cppstd"): + check_min_cppstd(self, 11) + + compiler = str(self.settings.compiler) + compiler_version = Version(self.settings.compiler.version.value) + if compiler not in self._minimum_compilers_version: + self.output.warning("{} recipe lacks information about the {} compiler support.".format(self.name, self.settings.compiler)) + elif compiler_version < self._minimum_compilers_version[compiler]: + raise ConanInvalidConfiguration("{} requires a {} version >= {}".format(self.name, compiler, compiler_version)) + + prefix = "Dependencies for" + message = "not configured in this conan package." + if self.options.with_db2: + # self.requires("db2/0.0.0") # TODO add support for db2 + raise ConanInvalidConfiguration("{} DB2 {} ".format(prefix, message)) + if self.options.with_oracle: + # self.requires("oracle_db/0.0.0") # TODO add support for oracle + raise ConanInvalidConfiguration("{} ORACLE {} ".format(prefix, message)) + if self.options.with_firebird: + # self.requires("firebird/0.0.0") # TODO add support for firebird + raise ConanInvalidConfiguration("{} firebird {} ".format(prefix, message)) + + def source(self): + get(self, **self.conan_data["sources"][self.version], strip_root=True) + + def generate(self): + tc = CMakeToolchain(self) + + tc.variables["SOCI_SHARED"] = self.options.shared + tc.variables["SOCI_STATIC"] = not self.options.shared + tc.variables["SOCI_TESTS"] = False + tc.variables["SOCI_CXX11"] = True + tc.variables["SOCI_EMPTY"] = self.options.empty + tc.variables["WITH_SQLITE3"] = self.options.with_sqlite3 + tc.variables["WITH_DB2"] = self.options.with_db2 + tc.variables["WITH_ODBC"] = self.options.with_odbc + tc.variables["WITH_ORACLE"] = self.options.with_oracle + tc.variables["WITH_FIREBIRD"] = self.options.with_firebird + tc.variables["WITH_MYSQL"] = self.options.with_mysql + tc.variables["WITH_POSTGRESQL"] = self.options.with_postgresql + tc.variables["WITH_BOOST"] = self.options.with_boost + tc.generate() + + deps = CMakeDeps(self) + deps.generate() + + def build(self): + apply_conandata_patches(self) + cmake = CMake(self) + cmake.configure() + cmake.build() + + def package(self): + copy(self, "LICENSE_1_0.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder) + + cmake = CMake(self) + cmake.install() + + rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) + + def package_info(self): + self.cpp_info.set_property("cmake_file_name", "SOCI") + + target_suffix = "" if self.options.shared else "_static" + lib_prefix = "lib" if is_msvc(self) and not self.options.shared else "" + version = Version(self.version) + lib_suffix = "_{}_{}".format(version.major, version.minor) if self.settings.os == "Windows" else "" + + # soci_core + self.cpp_info.components["soci_core"].set_property("cmake_target_name", "SOCI::soci_core{}".format(target_suffix)) + self.cpp_info.components["soci_core"].libs = ["{}soci_core{}".format(lib_prefix, lib_suffix)] + if self.options.with_boost: + self.cpp_info.components["soci_core"].requires.append("boost::boost") + + # soci_empty + if self.options.empty: + self.cpp_info.components["soci_empty"].set_property("cmake_target_name", "SOCI::soci_empty{}".format(target_suffix)) + self.cpp_info.components["soci_empty"].libs = ["{}soci_empty{}".format(lib_prefix, lib_suffix)] + self.cpp_info.components["soci_empty"].requires = ["soci_core"] + + # soci_sqlite3 + if self.options.with_sqlite3: + self.cpp_info.components["soci_sqlite3"].set_property("cmake_target_name", "SOCI::soci_sqlite3{}".format(target_suffix)) + self.cpp_info.components["soci_sqlite3"].libs = ["{}soci_sqlite3{}".format(lib_prefix, lib_suffix)] + self.cpp_info.components["soci_sqlite3"].requires = ["soci_core", "sqlite3::sqlite3"] + + # soci_odbc + if self.options.with_odbc: + self.cpp_info.components["soci_odbc"].set_property("cmake_target_name", "SOCI::soci_odbc{}".format(target_suffix)) + self.cpp_info.components["soci_odbc"].libs = ["{}soci_odbc{}".format(lib_prefix, lib_suffix)] + self.cpp_info.components["soci_odbc"].requires = ["soci_core"] + if self.settings.os == "Windows": + self.cpp_info.components["soci_odbc"].system_libs.append("odbc32") + else: + self.cpp_info.components["soci_odbc"].requires.append("odbc::odbc") + + # soci_mysql + if self.options.with_mysql: + self.cpp_info.components["soci_mysql"].set_property("cmake_target_name", "SOCI::soci_mysql{}".format(target_suffix)) + self.cpp_info.components["soci_mysql"].libs = ["{}soci_mysql{}".format(lib_prefix, lib_suffix)] + self.cpp_info.components["soci_mysql"].requires = ["soci_core", "libmysqlclient::libmysqlclient"] + + # soci_postgresql + if self.options.with_postgresql: + self.cpp_info.components["soci_postgresql"].set_property("cmake_target_name", "SOCI::soci_postgresql{}".format(target_suffix)) + self.cpp_info.components["soci_postgresql"].libs = ["{}soci_postgresql{}".format(lib_prefix, lib_suffix)] + self.cpp_info.components["soci_postgresql"].requires = ["soci_core", "libpq::libpq"] + + # TODO: to remove in conan v2 once cmake_find_package* generators removed + self.cpp_info.names["cmake_find_package"] = "SOCI" + self.cpp_info.names["cmake_find_package_multi"] = "SOCI" + self.cpp_info.components["soci_core"].names["cmake_find_package"] = "soci_core{}".format(target_suffix) + self.cpp_info.components["soci_core"].names["cmake_find_package_multi"] = "soci_core{}".format(target_suffix) + if self.options.empty: + self.cpp_info.components["soci_empty"].names["cmake_find_package"] = "soci_empty{}".format(target_suffix) + self.cpp_info.components["soci_empty"].names["cmake_find_package_multi"] = "soci_empty{}".format(target_suffix) + if self.options.with_sqlite3: + self.cpp_info.components["soci_sqlite3"].names["cmake_find_package"] = "soci_sqlite3{}".format(target_suffix) + self.cpp_info.components["soci_sqlite3"].names["cmake_find_package_multi"] = "soci_sqlite3{}".format(target_suffix) + if self.options.with_odbc: + self.cpp_info.components["soci_odbc"].names["cmake_find_package"] = "soci_odbc{}".format(target_suffix) + self.cpp_info.components["soci_odbc"].names["cmake_find_package_multi"] = "soci_odbc{}".format(target_suffix) + if self.options.with_mysql: + self.cpp_info.components["soci_mysql"].names["cmake_find_package"] = "soci_mysql{}".format(target_suffix) + self.cpp_info.components["soci_mysql"].names["cmake_find_package_multi"] = "soci_mysql{}".format(target_suffix) + if self.options.with_postgresql: + self.cpp_info.components["soci_postgresql"].names["cmake_find_package"] = "soci_postgresql{}".format(target_suffix) + self.cpp_info.components["soci_postgresql"].names["cmake_find_package_multi"] = "soci_postgresql{}".format(target_suffix) diff --git a/external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch b/external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch new file mode 100644 index 00000000000..5de0027f750 --- /dev/null +++ b/external/soci/patches/0001-Remove-hardcoded-INSTALL_NAME_DIR-for-relocatable-li.patch @@ -0,0 +1,39 @@ +From d491bf7b5040d314ffd0c6310ba01f78ff44c85e Mon Sep 17 00:00:00 2001 +From: Rasmus Thomsen +Date: Fri, 14 Apr 2023 09:16:29 +0200 +Subject: [PATCH] Remove hardcoded INSTALL_NAME_DIR for relocatable libraries + on MacOS + +--- + cmake/SociBackend.cmake | 2 +- + src/core/CMakeLists.txt | 1 - + 2 files changed, 1 insertion(+), 2 deletions(-) + +diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake +index 5d4ef0df..39fe1f77 100644 +--- a/cmake/SociBackend.cmake ++++ b/cmake/SociBackend.cmake +@@ -171,7 +171,7 @@ macro(soci_backend NAME) + set_target_properties(${THIS_BACKEND_TARGET} + PROPERTIES + SOVERSION ${${PROJECT_NAME}_SOVERSION} +- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib) ++ ) + + if(APPLE) + set_target_properties(${THIS_BACKEND_TARGET} +diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt +index 3e7deeae..f9eae564 100644 +--- a/src/core/CMakeLists.txt ++++ b/src/core/CMakeLists.txt +@@ -59,7 +59,6 @@ if (SOCI_SHARED) + PROPERTIES + VERSION ${SOCI_VERSION} + SOVERSION ${SOCI_SOVERSION} +- INSTALL_NAME_DIR ${CMAKE_INSTALL_PREFIX}/lib + CLEAN_DIRECT_OUTPUT 1) + endif() + +-- +2.25.1 + diff --git a/external/soci/patches/0002-Fix-soci_backend.patch b/external/soci/patches/0002-Fix-soci_backend.patch new file mode 100644 index 00000000000..eab3c3763c0 --- /dev/null +++ b/external/soci/patches/0002-Fix-soci_backend.patch @@ -0,0 +1,24 @@ +diff --git a/cmake/SociBackend.cmake b/cmake/SociBackend.cmake +index 0a664667..3fa2ed95 100644 +--- a/cmake/SociBackend.cmake ++++ b/cmake/SociBackend.cmake +@@ -31,14 +31,13 @@ macro(soci_backend_deps_found NAME DEPS SUCCESS) + if(NOT DEPEND_FOUND) + list(APPEND DEPS_NOT_FOUND ${dep}) + else() +- string(TOUPPER "${dep}" DEPU) +- if( ${DEPU}_INCLUDE_DIR ) +- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIR}) ++ if( ${dep}_INCLUDE_DIR ) ++ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIR}) + endif() +- if( ${DEPU}_INCLUDE_DIRS ) +- list(APPEND DEPS_INCLUDE_DIRS ${${DEPU}_INCLUDE_DIRS}) ++ if( ${dep}_INCLUDE_DIRS ) ++ list(APPEND DEPS_INCLUDE_DIRS ${${dep}_INCLUDE_DIRS}) + endif() +- list(APPEND DEPS_LIBRARIES ${${DEPU}_LIBRARIES}) ++ list(APPEND DEPS_LIBRARIES ${${dep}_LIBRARIES}) + endif() + endforeach() + From 67238b9fa66c5f99fe2a969b2a8851a4bca2c74f Mon Sep 17 00:00:00 2001 From: David Fuelling Date: Thu, 27 Apr 2023 11:18:59 -0600 Subject: [PATCH 158/508] Update environment.md build doc to install `lzma`: (#4498) On macOS, if you have not installed something that depends on `xz`, then your system may lack `lzma`, resulting in a build error similar to: ``` Downloading libarchive-3.6.0.tar.xz completed [6250.61k] libarchive/3.6.0: ERROR: libarchive/3.6.0: Error in source() method, line 120 get(self, **self.conan_data["sources"][self.version], strip_root=True) ReadError: file could not be opened successfully: - method gz: ReadError('not a gzip file') - method bz2: ReadError('not a bzip2 file') - method xz: CompressionError('lzma module is not available') - method tar: ReadError('invalid header') ``` The solution is to ensure that `lzma` is installed by installing `xz`. --- docs/build/environment.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/build/environment.md b/docs/build/environment.md index 8e6ba45aacc..a204cd2c197 100644 --- a/docs/build/environment.md +++ b/docs/build/environment.md @@ -69,6 +69,7 @@ and use it to install Conan: ``` /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" brew update +brew install xz brew install pyenv pyenv install 3.10-dev pyenv global 3.10-dev From 78076a69038be72102bcf90076f69f79ce78987c Mon Sep 17 00:00:00 2001 From: drlongle Date: Wed, 17 May 2023 02:22:10 +0200 Subject: [PATCH 159/508] fix!: Prevent API from accepting seed or public key for account (#4404) The API would allow seeds (and public keys) to be used in place of accounts at several locations in the API. For example, when calling account_info, you could pass `"account": "foo"`. The string "foo" is treated like a seed, so the method returns `actNotFound` (instead of `actMalformed`, as most developers would expect). In the early days, this was a convenience to make testing easier. However, it allows for poor security practices, so it is no longer a good idea. Allowing a secret or passphrase is now considered a bug. Previously, it was controlled by the `strict` option on some methods. With this commit, since the API does not interpret `account` as `seed`, the option `strict` is no longer needed and is removed. Removing this behavior from the API is a [breaking change](https://xrpl.org/request-formatting.html#breaking-changes). One could argue that it shouldn't be done without bumping the API version; however, in this instance, there is no evidence that anyone is using the API in the "legacy" way. Furthermore, it is a potential security hole, as it allows users to send secrets to places where they are not needed, where they could end up in logs, error messages, etc. There's no reason to take such a risk with a seed/secret, since only the public address is needed. Resolves: #3329, #3330, #4337 BREAKING CHANGE: Remove non-strict account parsing (#3330) --- src/ripple/app/main/Main.cpp | 10 +- src/ripple/net/impl/RPCCall.cpp | 26 +-- src/ripple/rpc/handlers/AccountChannels.cpp | 34 ++- .../rpc/handlers/AccountCurrenciesHandler.cpp | 13 +- src/ripple/rpc/handlers/AccountInfo.cpp | 18 +- src/ripple/rpc/handlers/AccountLines.cpp | 53 ++--- src/ripple/rpc/handlers/AccountObjects.cpp | 30 +-- src/ripple/rpc/handlers/AccountOffers.cpp | 13 +- src/ripple/rpc/handlers/DepositAuthorized.cpp | 24 +- src/ripple/rpc/handlers/GatewayBalances.cpp | 28 +-- src/ripple/rpc/handlers/NoRippleCheck.cpp | 14 +- src/ripple/rpc/handlers/OwnerInfo.cpp | 20 +- src/test/rpc/AccountCurrencies_test.cpp | 15 +- src/test/rpc/AccountInfo_test.cpp | 17 +- src/test/rpc/AccountLinesRPC_test.cpp | 8 +- src/test/rpc/AccountObjects_test.cpp | 2 +- src/test/rpc/AccountOffers_test.cpp | 8 +- src/test/rpc/NoRippleCheck_test.cpp | 4 +- src/test/rpc/OwnerInfo_test.cpp | 10 +- src/test/rpc/RPCCall_test.cpp | 207 +++++------------- 20 files changed, 203 insertions(+), 351 deletions(-) diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index f25b83fd51f..6693ac0f7bb 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -125,14 +125,12 @@ printHelp(const po::options_description& desc) << systemName() << "d [options] \n" << desc << std::endl << "Commands: \n" - " account_currencies [] [strict]\n" - " account_info ||| [] " - "[strict]\n" + " account_currencies []\n" + " account_info | []\n" " account_lines |\"\" []\n" " account_channels |\"\" []\n" - " account_objects [] [strict]\n" - " account_offers | [] " - "[strict]\n" + " account_objects []\n" + " account_offers | []\n" " account_tx accountID [ledger_index_min [ledger_index_max " "[limit " "]]] [binary]\n" diff --git a/src/ripple/net/impl/RPCCall.cpp b/src/ripple/net/impl/RPCCall.cpp index b5a167f76b0..26e56b690fa 100644 --- a/src/ripple/net/impl/RPCCall.cpp +++ b/src/ripple/net/impl/RPCCall.cpp @@ -775,11 +775,9 @@ class RPCParser return jvRequest; } - // owner_info | [strict] - // owner_info || [] [strict] - // account_info | [strict] - // account_info || [] [strict] - // account_offers | [] [strict] + // owner_info + // account_info [] + // account_offers [] Json::Value parseAccountItems(Json::Value const& jvParams) { @@ -895,10 +893,7 @@ class RPCParser // Parameters 0 and 1 are accounts if (i < 2) { - if (parseBase58( - TokenType::AccountPublic, strParam) || - parseBase58(strParam) || - parseGenericSeed(strParam)) + if (parseBase58(strParam)) { jvRequest[accFields[i]] = std::move(strParam); } @@ -924,16 +919,8 @@ class RPCParser { std::string strIdent = jvParams[0u].asString(); unsigned int iCursor = jvParams.size(); - bool bStrict = false; - if (iCursor >= 2 && jvParams[iCursor - 1] == jss::strict) - { - bStrict = true; - --iCursor; - } - - if (!parseBase58(TokenType::AccountPublic, strIdent) && - !parseBase58(strIdent) && !parseGenericSeed(strIdent)) + if (!parseBase58(strIdent)) return rpcError(rpcACT_MALFORMED); // Get info on account. @@ -941,9 +928,6 @@ class RPCParser jvRequest[jss::account] = strIdent; - if (bStrict) - jvRequest[jss::strict] = 1; - if (iCursor == 2 && !jvParseLedger(jvRequest, jvParams[1u].asString())) return rpcError(rpcLGR_IDX_MALFORMED); diff --git a/src/ripple/rpc/handlers/AccountChannels.cpp b/src/ripple/rpc/handlers/AccountChannels.cpp index 9e5c9ca2c46..8f39bef164d 100644 --- a/src/ripple/rpc/handlers/AccountChannels.cpp +++ b/src/ripple/rpc/handlers/AccountChannels.cpp @@ -58,7 +58,7 @@ addChannel(Json::Value& jsonLines, SLE const& line) } // { -// account: | +// account: // ledger_hash : // ledger_index : // limit: integer // optional @@ -76,11 +76,12 @@ doAccountChannels(RPC::JsonContext& context) if (!ledger) return result; - std::string strIdent(params[jss::account].asString()); - AccountID accountID; - - if (auto const err = RPC::accountFromString(accountID, strIdent)) - return err; + auto id = parseBase58(params[jss::account].asString()); + if (!id) + { + return rpcError(rpcACT_MALFORMED); + } + AccountID const accountID{std::move(id.value())}; if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); @@ -88,14 +89,12 @@ doAccountChannels(RPC::JsonContext& context) std::string strDst; if (params.isMember(jss::destination_account)) strDst = params[jss::destination_account].asString(); - auto hasDst = !strDst.empty(); - AccountID raDstAccount; - if (hasDst) - { - if (auto const err = RPC::accountFromString(raDstAccount, strDst)) - return err; - } + auto const raDstAccount = [&]() -> std::optional { + return strDst.empty() ? std::nullopt : parseBase58(strDst); + }(); + if (!strDst.empty() && !raDstAccount) + return rpcError(rpcACT_MALFORMED); unsigned int limit; if (auto err = readLimitField(limit, RPC::Tuning::accountChannels, context)) @@ -109,10 +108,9 @@ doAccountChannels(RPC::JsonContext& context) { std::vector> items; AccountID const& accountID; - bool hasDst; - AccountID const& raDstAccount; + std::optional const& raDstAccount; }; - VisitData visitData = {{}, accountID, hasDst, raDstAccount}; + VisitData visitData = {{}, accountID, raDstAccount}; visitData.items.reserve(limit); uint256 startAfter = beast::zero; std::uint64_t startHint = 0; @@ -180,8 +178,8 @@ doAccountChannels(RPC::JsonContext& context) if (count <= limit && sleCur->getType() == ltPAYCHAN && (*sleCur)[sfAccount] == accountID && - (!visitData.hasDst || - visitData.raDstAccount == (*sleCur)[sfDestination])) + (!visitData.raDstAccount || + *visitData.raDstAccount == (*sleCur)[sfDestination])) { visitData.items.emplace_back(sleCur); } diff --git a/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp b/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp index d735e5976f1..64956a7d0ad 100644 --- a/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp +++ b/src/ripple/rpc/handlers/AccountCurrenciesHandler.cpp @@ -46,13 +46,14 @@ doAccountCurrencies(RPC::JsonContext& context) params.isMember(jss::account) ? params[jss::account].asString() : params[jss::ident].asString()); - bool const bStrict = - params.isMember(jss::strict) && params[jss::strict].asBool(); - // Get info on account. - AccountID accountID; // out param - if (auto jvAccepted = RPC::accountFromString(accountID, strIdent, bStrict)) - return jvAccepted; + auto id = parseBase58(strIdent); + if (!id) + { + RPC::inject_error(rpcACT_MALFORMED, result); + return result; + } + auto const accountID{std::move(id.value())}; if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index ef130ef185f..3af70324bcd 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -34,8 +34,6 @@ namespace ripple { // { // account: , -// strict: // optional (default false) -// // if true only allow public keys and addresses. // ledger_hash : // ledger_index : // signer_lists : // optional (default false) @@ -67,15 +65,14 @@ doAccountInfo(RPC::JsonContext& context) if (!ledger) return result; - bool bStrict = params.isMember(jss::strict) && params[jss::strict].asBool(); - AccountID accountID; - // Get info on account. - - auto jvAccepted = RPC::accountFromString(accountID, strIdent, bStrict); - - if (jvAccepted) - return jvAccepted; + auto id = parseBase58(strIdent); + if (!id) + { + RPC::inject_error(rpcACT_MALFORMED, result); + return result; + } + auto const accountID{std::move(id.value())}; static constexpr std:: array, 9> @@ -113,6 +110,7 @@ doAccountInfo(RPC::JsonContext& context) return result; } + Json::Value jvAccepted(Json::objectValue); RPC::injectSLE(jvAccepted, *sleAccepted); result[jss::account_data] = jvAccepted; diff --git a/src/ripple/rpc/handlers/AccountLines.cpp b/src/ripple/rpc/handlers/AccountLines.cpp index adba2acaa72..f30a5d4b0a7 100644 --- a/src/ripple/rpc/handlers/AccountLines.cpp +++ b/src/ripple/rpc/handlers/AccountLines.cpp @@ -30,17 +30,6 @@ namespace ripple { -struct VisitData -{ - std::vector items; - AccountID const& accountID; - bool hasPeer; - AccountID const& raPeerAccount; - - bool ignoreDefault; - uint32_t foundCount; -}; - void addLine(Json::Value& jsonLines, RPCTrustLine const& line) { @@ -76,7 +65,7 @@ addLine(Json::Value& jsonLines, RPCTrustLine const& line) } // { -// account: | +// account: // ledger_hash : // ledger_index : // limit: integer // optional @@ -96,15 +85,13 @@ doAccountLines(RPC::JsonContext& context) if (!ledger) return result; - std::string strIdent(params[jss::account].asString()); - AccountID accountID; - - if (auto jv = RPC::accountFromString(accountID, strIdent)) + auto id = parseBase58(params[jss::account].asString()); + if (!id) { - for (auto it = jv.begin(); it != jv.end(); ++it) - result[it.memberName()] = *it; + RPC::inject_error(rpcACT_MALFORMED, result); return result; } + auto const accountID{std::move(id.value())}; if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); @@ -112,17 +99,14 @@ doAccountLines(RPC::JsonContext& context) std::string strPeer; if (params.isMember(jss::peer)) strPeer = params[jss::peer].asString(); - auto hasPeer = !strPeer.empty(); - AccountID raPeerAccount; - if (hasPeer) + auto const raPeerAccount = [&]() -> std::optional { + return strPeer.empty() ? std::nullopt : parseBase58(strPeer); + }(); + if (!strPeer.empty() && !raPeerAccount) { - if (auto jv = RPC::accountFromString(raPeerAccount, strPeer)) - { - for (auto it = jv.begin(); it != jv.end(); ++it) - result[it.memberName()] = *it; - return result; - } + RPC::inject_error(rpcACT_MALFORMED, result); + return result; } unsigned int limit; @@ -138,8 +122,15 @@ doAccountLines(RPC::JsonContext& context) params[jss::ignore_default].asBool(); Json::Value& jsonLines(result[jss::lines] = Json::arrayValue); - VisitData visitData = { - {}, accountID, hasPeer, raPeerAccount, ignoreDefault, 0}; + struct VisitData + { + std::vector items; + AccountID const& accountID; + std::optional const& raPeerAccount; + bool ignoreDefault; + uint32_t foundCount; + }; + VisitData visitData = {{}, accountID, raPeerAccount, ignoreDefault, 0}; uint256 startAfter = beast::zero; std::uint64_t startHint = 0; @@ -227,8 +218,8 @@ doAccountLines(RPC::JsonContext& context) RPCTrustLine::makeItem(visitData.accountID, sleCur); if (line && - (!visitData.hasPeer || - visitData.raPeerAccount == + (!visitData.raPeerAccount || + *visitData.raPeerAccount == line->getAccountIDPeer())) { visitData.items.emplace_back(*line); diff --git a/src/ripple/rpc/handlers/AccountObjects.cpp b/src/ripple/rpc/handlers/AccountObjects.cpp index 687b8717970..e8304c670de 100644 --- a/src/ripple/rpc/handlers/AccountObjects.cpp +++ b/src/ripple/rpc/handlers/AccountObjects.cpp @@ -39,7 +39,7 @@ namespace ripple { /** General RPC command that can retrieve objects in the account root. { - account: | + account: ledger_hash: // optional ledger_index: // optional type: // optional, defaults to all account objects types @@ -60,17 +60,13 @@ doAccountNFTs(RPC::JsonContext& context) if (ledger == nullptr) return result; - AccountID accountID; + auto id = parseBase58(params[jss::account].asString()); + if (!id) { - auto const strIdent = params[jss::account].asString(); - if (auto jv = RPC::accountFromString(accountID, strIdent)) - { - for (auto it = jv.begin(); it != jv.end(); ++it) - result[it.memberName()] = *it; - - return result; - } + RPC::inject_error(rpcACT_MALFORMED, result); + return result; } + auto const accountID{std::move(id.value())}; if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); @@ -177,17 +173,13 @@ doAccountObjects(RPC::JsonContext& context) if (ledger == nullptr) return result; - AccountID accountID; + auto const id = parseBase58(params[jss::account].asString()); + if (!id) { - auto const strIdent = params[jss::account].asString(); - if (auto jv = RPC::accountFromString(accountID, strIdent)) - { - for (auto it = jv.begin(); it != jv.end(); ++it) - result[it.memberName()] = *it; - - return result; - } + RPC::inject_error(rpcACT_MALFORMED, result); + return result; } + auto const accountID{std::move(id.value())}; if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); diff --git a/src/ripple/rpc/handlers/AccountOffers.cpp b/src/ripple/rpc/handlers/AccountOffers.cpp index 409d071fb02..1afd2732550 100644 --- a/src/ripple/rpc/handlers/AccountOffers.cpp +++ b/src/ripple/rpc/handlers/AccountOffers.cpp @@ -47,7 +47,7 @@ appendOfferJson(std::shared_ptr const& offer, Json::Value& offers) }; // { -// account: | +// account: // ledger_hash : // ledger_index : // limit: integer // optional @@ -65,16 +65,13 @@ doAccountOffers(RPC::JsonContext& context) if (!ledger) return result; - std::string strIdent(params[jss::account].asString()); - AccountID accountID; - - if (auto jv = RPC::accountFromString(accountID, strIdent)) + auto id = parseBase58(params[jss::account].asString()); + if (!id) { - for (auto it = jv.begin(); it != jv.end(); ++it) - result[it.memberName()] = (*it); - + RPC::inject_error(rpcACT_MALFORMED, result); return result; } + auto const accountID{std::move(id.value())}; // Get info on account. result[jss::account] = toBase58(accountID); diff --git a/src/ripple/rpc/handlers/DepositAuthorized.cpp b/src/ripple/rpc/handlers/DepositAuthorized.cpp index a74db924370..a5c9c9a21fe 100644 --- a/src/ripple/rpc/handlers/DepositAuthorized.cpp +++ b/src/ripple/rpc/handlers/DepositAuthorized.cpp @@ -18,6 +18,7 @@ //============================================================================== #include +#include #include #include #include @@ -46,13 +47,10 @@ doDepositAuthorized(RPC::JsonContext& context) rpcINVALID_PARAMS, RPC::expected_field_message(jss::source_account, "a string")); - AccountID srcAcct; - { - Json::Value const jvAccepted = RPC::accountFromString( - srcAcct, params[jss::source_account].asString(), true); - if (jvAccepted) - return jvAccepted; - } + auto srcID = parseBase58(params[jss::source_account].asString()); + if (!srcID) + return rpcError(rpcACT_MALFORMED); + auto const srcAcct{std::move(srcID.value())}; // Validate destination_account. if (!params.isMember(jss::destination_account)) @@ -62,13 +60,11 @@ doDepositAuthorized(RPC::JsonContext& context) rpcINVALID_PARAMS, RPC::expected_field_message(jss::destination_account, "a string")); - AccountID dstAcct; - { - Json::Value const jvAccepted = RPC::accountFromString( - dstAcct, params[jss::destination_account].asString(), true); - if (jvAccepted) - return jvAccepted; - } + auto dstID = + parseBase58(params[jss::destination_account].asString()); + if (!dstID) + return rpcError(rpcACT_MALFORMED); + auto const dstAcct{std::move(dstID.value())}; // Validate ledger. std::shared_ptr ledger; diff --git a/src/ripple/rpc/handlers/GatewayBalances.cpp b/src/ripple/rpc/handlers/GatewayBalances.cpp index 3a422c6e965..77cec496ed0 100644 --- a/src/ripple/rpc/handlers/GatewayBalances.cpp +++ b/src/ripple/rpc/handlers/GatewayBalances.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -68,16 +69,11 @@ doGatewayBalances(RPC::JsonContext& context) params.isMember(jss::account) ? params[jss::account].asString() : params[jss::ident].asString()); - bool const bStrict = - params.isMember(jss::strict) && params[jss::strict].asBool(); - // Get info on account. - AccountID accountID; - auto jvAccepted = RPC::accountFromString(accountID, strIdent, bStrict); - - if (jvAccepted) - return jvAccepted; - + auto id = parseBase58(strIdent); + if (!id) + return rpcError(rpcACT_MALFORMED); + auto const accountID{std::move(id.value())}; context.loadType = Resource::feeHighBurdenRPC; result[jss::account] = toBase58(accountID); @@ -90,19 +86,9 @@ doGatewayBalances(RPC::JsonContext& context) auto addHotWallet = [&hotWallets](Json::Value const& j) { if (j.isString()) { - auto const pk = parseBase58( - TokenType::AccountPublic, j.asString()); - if (pk) - { - hotWallets.insert(calcAccountID(*pk)); - return true; - } - - auto const id = parseBase58(j.asString()); - - if (id) + if (auto id = parseBase58(j.asString()); id) { - hotWallets.insert(*id); + hotWallets.insert(std::move(id.value())); return true; } } diff --git a/src/ripple/rpc/handlers/NoRippleCheck.cpp b/src/ripple/rpc/handlers/NoRippleCheck.cpp index 18156ea4247..20137c985c9 100644 --- a/src/ripple/rpc/handlers/NoRippleCheck.cpp +++ b/src/ripple/rpc/handlers/NoRippleCheck.cpp @@ -50,7 +50,7 @@ fillTransaction( } // { -// account: | +// account: // ledger_hash : // ledger_index : // limit: integer // optional, number of problems @@ -92,17 +92,13 @@ doNoRippleCheck(RPC::JsonContext& context) Json::Value& jvTransactions = transactions ? (result[jss::transactions] = Json::arrayValue) : dummy; - std::string strIdent(params[jss::account].asString()); - AccountID accountID; - - if (auto jv = RPC::accountFromString(accountID, strIdent)) + auto id = parseBase58(params[jss::account].asString()); + if (!id) { - for (auto it(jv.begin()); it != jv.end(); ++it) - result[it.memberName()] = *it; - + RPC::inject_error(rpcACT_MALFORMED, result); return result; } - + auto const accountID{std::move(id.value())}; auto const sle = ledger->read(keylet::account(accountID)); if (!sle) return rpcError(rpcACT_NOT_FOUND); diff --git a/src/ripple/rpc/handlers/OwnerInfo.cpp b/src/ripple/rpc/handlers/OwnerInfo.cpp index b336107ec27..2bd9f258da8 100644 --- a/src/ripple/rpc/handlers/OwnerInfo.cpp +++ b/src/ripple/rpc/handlers/OwnerInfo.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -45,21 +46,16 @@ doOwnerInfo(RPC::JsonContext& context) Json::Value ret; // Get info on account. - auto const& closedLedger = context.ledgerMaster.getClosedLedger(); - AccountID accountID; - auto jAccepted = RPC::accountFromString(accountID, strIdent); - - ret[jss::accepted] = !jAccepted - ? context.netOps.getOwnerInfo(closedLedger, accountID) - : jAccepted; + std::optional const accountID = parseBase58(strIdent); + ret[jss::accepted] = accountID.has_value() + ? context.netOps.getOwnerInfo(closedLedger, accountID.value()) + : rpcError(rpcACT_MALFORMED); auto const& currentLedger = context.ledgerMaster.getCurrentLedger(); - auto jCurrent = RPC::accountFromString(accountID, strIdent); - - ret[jss::current] = !jCurrent - ? context.netOps.getOwnerInfo(currentLedger, accountID) - : jCurrent; + ret[jss::current] = accountID.has_value() + ? context.netOps.getOwnerInfo(currentLedger, *accountID) + : rpcError(rpcACT_MALFORMED); return ret; } diff --git a/src/test/rpc/AccountCurrencies_test.cpp b/src/test/rpc/AccountCurrencies_test.cpp index ac4adcf5167..c3e46a3e66c 100644 --- a/src/test/rpc/AccountCurrencies_test.cpp +++ b/src/test/rpc/AccountCurrencies_test.cpp @@ -56,11 +56,22 @@ class AccountCurrencies_test : public beast::unit_test::suite result[jss::error_message] == "Missing field 'account'."); } - { // strict mode, invalid bitcoin token + { Json::Value params; params[jss::account] = "llIIOO"; // these are invalid in bitcoin alphabet - params[jss::strict] = true; + auto const result = env.rpc( + "json", + "account_currencies", + boost::lexical_cast(params))[jss::result]; + BEAST_EXPECT(result[jss::error] == "actMalformed"); + BEAST_EXPECT(result[jss::error_message] == "Account malformed."); + } + + { + // Cannot use a seed as account + Json::Value params; + params[jss::account] = "Bob"; auto const result = env.rpc( "json", "account_currencies", diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 0cda0632edc..6ec4740bac2 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -53,7 +53,9 @@ class AccountInfo_test : public beast::unit_test::suite "{\"account\": " "\"n94JNrQYkDrpt62bbSR7nVEhdyAvcJXRAsjEkFYyqRkh9SUTYEqV\"}"); BEAST_EXPECT( - info[jss::result][jss::error_message] == "Disallowed seed."); + info[jss::result][jss::error_code] == rpcACT_MALFORMED); + BEAST_EXPECT( + info[jss::result][jss::error_message] == "Account malformed."); } { // account_info with an account that's not in the ledger. @@ -61,10 +63,21 @@ class AccountInfo_test : public beast::unit_test::suite auto const info = env.rpc( "json", "account_info", - std::string("{ ") + "\"account\": \"" + bogie.human() + "\"}"); + R"({ "account": ")" + bogie.human() + R"("})"); + BEAST_EXPECT( + info[jss::result][jss::error_code] == rpcACT_NOT_FOUND); BEAST_EXPECT( info[jss::result][jss::error_message] == "Account not found."); } + { + // Cannot use a seed as account + auto const info = + env.rpc("json", "account_info", R"({"account": "foo"})"); + BEAST_EXPECT( + info[jss::result][jss::error_code] == rpcACT_MALFORMED); + BEAST_EXPECT( + info[jss::result][jss::error_message] == "Account malformed."); + } } // Test the "signer_lists" argument in account_info. diff --git a/src/test/rpc/AccountLinesRPC_test.cpp b/src/test/rpc/AccountLinesRPC_test.cpp index 1b099f7b7b4..04688156d12 100644 --- a/src/test/rpc/AccountLinesRPC_test.cpp +++ b/src/test/rpc/AccountLinesRPC_test.cpp @@ -53,7 +53,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"})"); BEAST_EXPECT( lines[jss::result][jss::error_message] == - RPC::make_error(rpcBAD_SEED)[jss::error_message]); + RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); } Account const alice{"alice"}; { @@ -239,7 +239,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"})"); BEAST_EXPECT( lines[jss::result][jss::error_message] == - RPC::make_error(rpcBAD_SEED)[jss::error_message]); + RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); } { // A negative limit should fail. @@ -815,7 +815,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"}})"); BEAST_EXPECT( lines[jss::error][jss::message] == - RPC::make_error(rpcBAD_SEED)[jss::error_message]); + RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); BEAST_EXPECT( lines.isMember(jss::jsonrpc) && lines[jss::jsonrpc] == "2.0"); BEAST_EXPECT( @@ -1122,7 +1122,7 @@ class AccountLinesRPC_test : public beast::unit_test::suite R"("n9MJkEKHDhy5eTLuHUQeAAjo382frHNbFK4C8hcwN4nwM2SrLdBj"}})"); BEAST_EXPECT( lines[jss::error][jss::message] == - RPC::make_error(rpcBAD_SEED)[jss::error_message]); + RPC::make_error(rpcACT_MALFORMED)[jss::error_message]); BEAST_EXPECT( lines.isMember(jss::jsonrpc) && lines[jss::jsonrpc] == "2.0"); BEAST_EXPECT( diff --git a/src/test/rpc/AccountObjects_test.cpp b/src/test/rpc/AccountObjects_test.cpp index 07fd5d6ddd7..7de5b73671e 100644 --- a/src/test/rpc/AccountObjects_test.cpp +++ b/src/test/rpc/AccountObjects_test.cpp @@ -131,7 +131,7 @@ class AccountObjects_test : public beast::unit_test::suite "n94JNrQYkDrpt62bbSR7nVEhdyAvcJXRAsjEkFYyqRkh9SUTYEqV"; auto resp = env.rpc("json", "account_objects", to_string(params)); BEAST_EXPECT( - resp[jss::result][jss::error_message] == "Disallowed seed."); + resp[jss::result][jss::error_message] == "Account malformed."); } // test error on account that's not in the ledger. { diff --git a/src/test/rpc/AccountOffers_test.cpp b/src/test/rpc/AccountOffers_test.cpp index f4ad0a72595..a90566d9c37 100644 --- a/src/test/rpc/AccountOffers_test.cpp +++ b/src/test/rpc/AccountOffers_test.cpp @@ -248,15 +248,15 @@ class AccountOffers_test : public beast::unit_test::suite "json", "account_offers", jvParams.toStyledString())[jss::result]; - BEAST_EXPECT(jrr[jss::error] == "badSeed"); + BEAST_EXPECT(jrr[jss::error] == "actMalformed"); BEAST_EXPECT(jrr[jss::status] == "error"); - BEAST_EXPECT(jrr[jss::error_message] == "Disallowed seed."); + BEAST_EXPECT(jrr[jss::error_message] == "Account malformed."); } { // bogus account value - auto const jrr = - env.rpc("account_offers", "rNOT_AN_ACCOUNT")[jss::result]; + auto const jrr = env.rpc( + "account_offers", Account("bogus").human())[jss::result]; BEAST_EXPECT(jrr[jss::error] == "actNotFound"); BEAST_EXPECT(jrr[jss::status] == "error"); BEAST_EXPECT(jrr[jss::error_message] == "Account not found."); diff --git a/src/test/rpc/NoRippleCheck_test.cpp b/src/test/rpc/NoRippleCheck_test.cpp index 73934899e06..3d34f55c90d 100644 --- a/src/test/rpc/NoRippleCheck_test.cpp +++ b/src/test/rpc/NoRippleCheck_test.cpp @@ -127,8 +127,8 @@ class NoRippleCheck_test : public beast::unit_test::suite "json", "noripple_check", boost::lexical_cast(params))[jss::result]; - BEAST_EXPECT(result[jss::error] == "badSeed"); - BEAST_EXPECT(result[jss::error_message] == "Disallowed seed."); + BEAST_EXPECT(result[jss::error] == "actMalformed"); + BEAST_EXPECT(result[jss::error_message] == "Account malformed."); } } diff --git a/src/test/rpc/OwnerInfo_test.cpp b/src/test/rpc/OwnerInfo_test.cpp index 0de4ef2bd55..c510c35afc8 100644 --- a/src/test/rpc/OwnerInfo_test.cpp +++ b/src/test/rpc/OwnerInfo_test.cpp @@ -56,14 +56,16 @@ class OwnerInfo_test : public beast::unit_test::suite result.isMember(jss::accepted) && result.isMember(jss::current))) { - BEAST_EXPECT(result[jss::accepted][jss::error] == "badSeed"); + BEAST_EXPECT( + result[jss::accepted][jss::error] == "actMalformed"); BEAST_EXPECT( result[jss::accepted][jss::error_message] == - "Disallowed seed."); - BEAST_EXPECT(result[jss::current][jss::error] == "badSeed"); + "Account malformed."); + BEAST_EXPECT( + result[jss::current][jss::error] == "actMalformed"); BEAST_EXPECT( result[jss::current][jss::error_message] == - "Disallowed seed."); + "Account malformed."); } } diff --git a/src/test/rpc/RPCCall_test.cpp b/src/test/rpc/RPCCall_test.cpp index 966d325f427..1ae15afa303 100644 --- a/src/test/rpc/RPCCall_test.cpp +++ b/src/test/rpc/RPCCall_test.cpp @@ -86,7 +86,7 @@ static RPCCallTestData const rpcCallTestArray[] = { __LINE__, {"account_channels", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210"}, + "rD5MbavGfiSC5m7mkxy1FANuT7s3HxqpoF"}, RPCCallTestData::no_exception, R"({ "method" : "account_channels", @@ -94,13 +94,15 @@ static RPCCallTestData const rpcCallTestArray[] = { { "api_version" : %MAX_API_VER%, "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "destination_account" : "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210" + "destination_account" : "rD5MbavGfiSC5m7mkxy1FANuT7s3HxqpoF" } ] })"}, {"account_channels: account and ledger index.", __LINE__, - {"account_channels", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "closed"}, + {"account_channels", + "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "r9emE59aTWb85t64dAebKrxYMBTpzK5yR7"}, RPCCallTestData::no_exception, R"({ "method" : "account_channels", @@ -108,7 +110,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "api_version" : %MAX_API_VER%, "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "destination_account" : "closed" + "destination_account" : "r9emE59aTWb85t64dAebKrxYMBTpzK5yR7" } ] })"}, @@ -186,7 +188,7 @@ static RPCCallTestData const rpcCallTestArray[] = { "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "rnUy2SHTrB9DubsPmkJZUXTf5FcNDGrYEA", "current", - "strict"}, + "extra"}, RPCCallTestData::no_exception, R"({ "method" : "account_channels", @@ -218,7 +220,7 @@ static RPCCallTestData const rpcCallTestArray[] = { // account_currencies // ---------------------------------------------------------- - {"account_currencies: minimal.", + {"account_currencies: minimal 1.", __LINE__, {"account_currencies", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh"}, RPCCallTestData::no_exception, @@ -231,17 +233,16 @@ static RPCCallTestData const rpcCallTestArray[] = { } ] })"}, - {"account_currencies: strict.", + {"account_currencies: minimal 2.", __LINE__, - {"account_currencies", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "strict"}, + {"account_currencies", "racb4o3DrdYxuCfyVa6vsLb7vgju9RFbBr"}, RPCCallTestData::no_exception, R"({ "method" : "account_currencies", "params" : [ { "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 + "account" : "racb4o3DrdYxuCfyVa6vsLb7vgju9RFbBr" } ] })"}, @@ -275,10 +276,7 @@ static RPCCallTestData const rpcCallTestArray[] = { })"}, {"account_currencies: current ledger.", __LINE__, - {"account_currencies", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "current", - "strict"}, + {"account_currencies", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "current"}, RPCCallTestData::no_exception, R"({ "method" : "account_currencies", @@ -286,8 +284,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "api_version" : %MAX_API_VER%, - "ledger_index" : "current", - "strict" : 1 + "ledger_index" : "current" } ] })"}, @@ -312,8 +309,8 @@ static RPCCallTestData const rpcCallTestArray[] = { {"account_currencies", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "current", - "strict", - "spare"}, + "spare1", + "spare2"}, RPCCallTestData::no_exception, R"({ "method" : "account_currencies", @@ -358,20 +355,6 @@ static RPCCallTestData const rpcCallTestArray[] = { ] })", }, - {"account_currencies: floating point first argument.", - __LINE__, - {"account_currencies", "3.14159", "strict"}, - RPCCallTestData::no_exception, - R"({ - "method" : "account_currencies", - "params" : [ - { - "api_version" : %MAX_API_VER%, - "account" : "3.14159", - "strict" : 1 - } - ] - })"}, // account_info // ---------------------------------------------------------------- @@ -432,26 +415,9 @@ static RPCCallTestData const rpcCallTestArray[] = { } ] })"}, - {"account_info: strict.", + {"account_info: with ledger index.", __LINE__, - {"account_info", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "strict"}, - RPCCallTestData::no_exception, - R"({ - "method" : "account_info", - "params" : [ - { - "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 - } - ] - })"}, - {"account_info: with ledger index and strict.", - __LINE__, - {"account_info", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "validated", - "strict"}, + {"account_info", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "validated"}, RPCCallTestData::no_exception, R"({ "method" : "account_info", @@ -459,8 +425,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "api_version" : %MAX_API_VER%, - "ledger_index" : "validated", - "strict" : 1 + "ledger_index" : "validated" } ] })"}, @@ -485,8 +450,8 @@ static RPCCallTestData const rpcCallTestArray[] = { {"account_info", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "current", - "strict", - "extra"}, + "extra1", + "extra2"}, RPCCallTestData::no_exception, R"({ "method" : "account_info", @@ -798,26 +763,9 @@ static RPCCallTestData const rpcCallTestArray[] = { } ] })"}, - {"account_objects: strict.", - __LINE__, - {"account_objects", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "strict"}, - RPCCallTestData::no_exception, - R"({ - "method" : "account_objects", - "params" : [ - { - "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 - } - ] - })"}, - {"account_objects: with ledger index and strict.", + {"account_objects: with ledger index.", __LINE__, - {"account_objects", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "validated", - "strict"}, + {"account_objects", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "validated"}, RPCCallTestData::no_exception, R"({ "method" : "account_objects", @@ -825,8 +773,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "api_version" : %MAX_API_VER%, "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "ledger_index" : "validated", - "strict" : 1 + "ledger_index" : "validated" } ] })"}, @@ -853,8 +800,8 @@ static RPCCallTestData const rpcCallTestArray[] = { "account_objects", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "current", - "extra", - "strict", + "extra1", + "extra2", }, RPCCallTestData::no_exception, R"({ @@ -862,8 +809,7 @@ static RPCCallTestData const rpcCallTestArray[] = { "params" : [ { "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 + "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh" } ] })"}, @@ -876,7 +822,7 @@ static RPCCallTestData const rpcCallTestArray[] = { "current", "extra1", "extra2", - "strict", + "extra3", }, RPCCallTestData::no_exception, R"({ @@ -884,8 +830,7 @@ static RPCCallTestData const rpcCallTestArray[] = { "params" : [ { "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 + "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh" } ] })"}, @@ -898,7 +843,7 @@ static RPCCallTestData const rpcCallTestArray[] = { "extra1", "extra2", "extra3", - "strict", + "extra4", }, RPCCallTestData::no_exception, R"({ @@ -953,10 +898,7 @@ static RPCCallTestData const rpcCallTestArray[] = { // cannot currently occur because jvParseLedger() always returns true. "account_objects: invalid ledger selection 2.", __LINE__, - {"account_objects", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "no_ledger", - "strict"}, + {"account_objects", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "no_ledger"}, RPCCallTestData::no_exception, R"({ "method" : "account_objects", @@ -964,8 +906,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "api_version" : %MAX_API_VER%, "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "ledger_index" : 0, - "strict" : 1 + "ledger_index" : 0 } ] })", @@ -1030,26 +971,9 @@ static RPCCallTestData const rpcCallTestArray[] = { } ] })"}, - {"account_offers: strict.", + {"account_offers: with ledger index.", __LINE__, - {"account_offers", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "strict"}, - RPCCallTestData::no_exception, - R"({ - "method" : "account_offers", - "params" : [ - { - "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 - } - ] - })"}, - {"account_offers: with ledger index and strict.", - __LINE__, - {"account_offers", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "validated", - "strict"}, + {"account_offers", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "validated"}, RPCCallTestData::no_exception, R"({ "method" : "account_offers", @@ -1057,8 +981,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "api_version" : %MAX_API_VER%, "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "ledger_index" : "validated", - "strict" : 1 + "ledger_index" : "validated" } ] })"}, @@ -1081,21 +1004,17 @@ static RPCCallTestData const rpcCallTestArray[] = { {// Note: I believe this _ought_ to be detected as too many arguments. "account_offers: four arguments.", __LINE__, - { - "account_offers", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "current", - "extra", - "strict", - }, + {"account_offers", + "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", + "current", + "extra"}, RPCCallTestData::no_exception, R"({ "method" : "account_offers", "params" : [ { "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 + "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh" } ] })"}, @@ -1107,7 +1026,7 @@ static RPCCallTestData const rpcCallTestArray[] = { "current", "extra1", "extra2", - "strict", + "extra3", }, RPCCallTestData::no_exception, R"({ @@ -1162,10 +1081,7 @@ static RPCCallTestData const rpcCallTestArray[] = { // cannot currently occur because jvParseLedger() always returns true. "account_offers: invalid ledger selection 2.", __LINE__, - {"account_offers", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "no_ledger", - "strict"}, + {"account_offers", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "no_ledger"}, RPCCallTestData::no_exception, R"({ "method" : "account_offers", @@ -1173,8 +1089,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "api_version" : %MAX_API_VER%, "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "ledger_index" : 0, - "strict" : 1 + "ledger_index" : 0 } ] })", @@ -4429,26 +4344,9 @@ static RPCCallTestData const rpcCallTestArray[] = { } ] })"}, - {"owner_info: strict.", - __LINE__, - {"owner_info", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "strict"}, - RPCCallTestData::no_exception, - R"({ - "method" : "owner_info", - "params" : [ - { - "api_version" : %MAX_API_VER%, - "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "strict" : 1 - } - ] - })"}, - {"owner_info: with ledger index and strict.", + {"owner_info: with ledger index.", __LINE__, - {"owner_info", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "validated", - "strict"}, + {"owner_info", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "validated"}, RPCCallTestData::no_exception, R"({ "method" : "owner_info", @@ -4456,8 +4354,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "api_version" : %MAX_API_VER%, - "ledger_index" : "validated", - "strict" : 1 + "ledger_index" : "validated" } ] })"}, @@ -4483,8 +4380,8 @@ static RPCCallTestData const rpcCallTestArray[] = { "owner_info", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "current", - "extra", - "strict", + "extra1", + "extra2", }, RPCCallTestData::no_exception, R"({ @@ -4537,12 +4434,9 @@ static RPCCallTestData const rpcCallTestArray[] = { { // Note: there is code in place to return rpcLGR_IDX_MALFORMED. That // cannot currently occur because jvParseLedger() always returns true. - "owner_info: invalid ledger selection and strict.", + "owner_info: invalid ledger selection.", __LINE__, - {"owner_info", - "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", - "no_ledger", - "strict"}, + {"owner_info", "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "no_ledger"}, RPCCallTestData::no_exception, R"({ "method" : "owner_info", @@ -4550,8 +4444,7 @@ static RPCCallTestData const rpcCallTestArray[] = { { "account" : "rHb9CJAWyB4rj91VRWn96DkukG4bwdtyTh", "api_version" : %MAX_API_VER%, - "ledger_index" : 0, - "strict" : 1 + "ledger_index" : 0 } ] })", From 629ed5c691fc454140a532d872c2420a93a6d564 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Wed, 17 May 2023 16:51:42 -0500 Subject: [PATCH 160/508] Switch to self-hosted runners for macOS (#4511) --- .github/workflows/macos.yml | 35 +++-------------------------------- 1 file changed, 3 insertions(+), 32 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 8b4aca0f4f7..94151bcf7eb 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -7,38 +7,22 @@ jobs: strategy: matrix: platform: - - macos-12 + - macos generator: - Ninja configuration: - Release - runs-on: ${{ matrix.platform }} + runs-on: [self-hosted, macOS] env: # The `build` action requires these variables. build_dir: .build - NUM_PROCESSORS: 2 + NUM_PROCESSORS: 12 steps: - name: checkout uses: actions/checkout@v3 - name: install Ninja if: matrix.generator == 'Ninja' run: brew install ninja - - name: choose Python - uses: actions/setup-python@v3 - with: - python-version: 3.9 - - name: learn Python cache directory - id: pip-cache - run: | - sudo pip install --upgrade pip - echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT - - name: restore Python cache directory - uses: actions/cache@v2 - with: - path: ${{ steps.pip-cache.outputs.dir }} - key: ${{ runner.os }}-${{ hashFiles('.github/workflows/nix.yml') }} - - name: install Conan - run: pip install wheel 'conan<2' - name: check environment run: | echo ${PATH} | tr ':' '\n' @@ -46,19 +30,6 @@ jobs: conan --version cmake --version env - - name: configure Conan - run: | - conan profile new default --detect - conan profile update settings.compiler.cppstd=20 default - - name: learn Conan cache directory - id: conan-cache - run: | - echo "dir=$(conan config get storage.path)" >> $GITHUB_OUTPUT - - name: restore Conan cache directory - uses: actions/cache@v2 - with: - path: ${{ steps.conan-cache.outputs.dir }} - key: ${{ hashFiles('~/.conan/profiles/default', 'conanfile.py', 'external/rocksdb/*', '.github/workflows/nix.yml') }} - name: build uses: ./.github/actions/build with: From 3620ac287e5a0b7206504a2032a6d4fb185bcf97 Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Thu, 18 May 2023 19:38:18 -0400 Subject: [PATCH 161/508] Add nftoken_id, nftoken_ids, offer_id fields for NFTokens (#4447) Three new fields are added to the `Tx` responses for NFTs: 1. `nftoken_id`: This field is included in the `Tx` responses for `NFTokenMint` and `NFTokenAcceptOffer`. This field indicates the `NFTokenID` for the `NFToken` that was modified on the ledger by the transaction. 2. `nftoken_ids`: This array is included in the `Tx` response for `NFTokenCancelOffer`. This field provides a list of all the `NFTokenID`s for the `NFToken`s that were modified on the ledger by the transaction. 3. `offer_id`: This field is included in the `Tx` response for `NFTokenCreateOffer` transactions and shows the OfferID of the `NFTokenOffer` created. The fields make it easier to track specific tokens and offers. The implementation includes code (by @ledhed2222) from the Clio project to extract NFTokenIDs from mint transactions. --- Builds/CMake/RippledCore.cmake | 3 + src/ripple/protocol/jss.h | 3 + src/ripple/rpc/NFTSyntheticSerializer.h | 58 +++++ src/ripple/rpc/NFTokenID.h | 68 +++++ src/ripple/rpc/NFTokenOfferID.h | 64 +++++ src/ripple/rpc/handlers/AccountTx.cpp | 3 + src/ripple/rpc/handlers/Tx.cpp | 3 + .../rpc/impl/NFTSyntheticSerializer.cpp | 50 ++++ src/ripple/rpc/impl/NFTokenID.cpp | 202 +++++++++++++++ src/ripple/rpc/impl/NFTokenOfferID.cpp | 85 +++++++ src/test/app/NFToken_test.cpp | 233 ++++++++++++++++++ 11 files changed, 772 insertions(+) create mode 100644 src/ripple/rpc/NFTSyntheticSerializer.h create mode 100644 src/ripple/rpc/NFTokenID.h create mode 100644 src/ripple/rpc/NFTokenOfferID.h create mode 100644 src/ripple/rpc/impl/NFTSyntheticSerializer.cpp create mode 100644 src/ripple/rpc/impl/NFTokenID.cpp create mode 100644 src/ripple/rpc/impl/NFTokenOfferID.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index b9e4653e653..db7757f9c2f 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -653,6 +653,9 @@ target_sources (rippled PRIVATE src/ripple/rpc/impl/ShardVerificationScheduler.cpp src/ripple/rpc/impl/Status.cpp src/ripple/rpc/impl/TransactionSign.cpp + src/ripple/rpc/impl/NFTokenID.cpp + src/ripple/rpc/impl/NFTokenOfferID.cpp + src/ripple/rpc/impl/NFTSyntheticSerializer.cpp #[===============================[ main sources: subdir: perflog diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index e2738309d3c..92d9096da92 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -416,6 +416,8 @@ JSS(nft_offer_index); // out nft_buy_offers, nft_sell_offers JSS(nft_page); // in: LedgerEntry JSS(nft_serial); // out: account_nfts JSS(nft_taxon); // out: nft_info (clio) +JSS(nftoken_id); // out: insertNFTokenID +JSS(nftoken_ids); // out: insertNFTokenID JSS(no_ripple); // out: AccountLines JSS(no_ripple_peer); // out: AccountLines JSS(node); // out: LedgerEntry @@ -436,6 +438,7 @@ JSS(node_writes_delayed); // out::GetCounts JSS(obligations); // out: GatewayBalances JSS(offer); // in: LedgerEntry JSS(offers); // out: NetworkOPs, AccountOffers, Subscribe +JSS(offer_id); // out: insertNFTokenOfferID JSS(offline); // in: TransactionSign JSS(offset); // in/out: AccountTxOld JSS(open); // out: handlers/Ledger diff --git a/src/ripple/rpc/NFTSyntheticSerializer.h b/src/ripple/rpc/NFTSyntheticSerializer.h new file mode 100644 index 00000000000..090e8937869 --- /dev/null +++ b/src/ripple/rpc/NFTSyntheticSerializer.h @@ -0,0 +1,58 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_RPC_NFTSYNTHETICSERIALIZER_H_INCLUDED +#define RIPPLE_RPC_NFTSYNTHETICSERIALIZER_H_INCLUDED + +#include +#include + +#include +#include + +namespace Json { +class Value; +} + +namespace ripple { + +class TxMeta; +class STTx; + +namespace RPC { + +struct JsonContext; + +/** + Adds common synthetic fields to transaction-related JSON responses + + @{ + */ +void +insertNFTSyntheticInJson( + Json::Value&, + RPC::JsonContext const&, + std::shared_ptr const&, + TxMeta const&); +/** @} */ + +} // namespace RPC +} // namespace ripple + +#endif diff --git a/src/ripple/rpc/NFTokenID.h b/src/ripple/rpc/NFTokenID.h new file mode 100644 index 00000000000..cb218966fda --- /dev/null +++ b/src/ripple/rpc/NFTokenID.h @@ -0,0 +1,68 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_RPC_NFTOKENID_H_INCLUDED +#define RIPPLE_RPC_NFTOKENID_H_INCLUDED + +#include + +#include +#include + +namespace Json { +class Value; +} + +namespace ripple { + +class TxMeta; +class STTx; + +namespace RPC { + +/** + Add a `nftoken_ids` field to the `meta` output parameter. + The field is only added to successful NFTokenMint, NFTokenAcceptOffer, + and NFTokenCancelOffer transactions. + + Helper functions are not static because they can be used by Clio. + @{ + */ +bool +canHaveNFTokenID( + std::shared_ptr const& serializedTx, + TxMeta const& transactionMeta); + +std::optional +getNFTokenIDFromPage(TxMeta const& transactionMeta); + +std::vector +getNFTokenIDFromDeletedOffer(TxMeta const& transactionMeta); + +void +insertNFTokenID( + Json::Value& response, + std::shared_ptr const& transaction, + TxMeta const& transactionMeta); +/** @} */ + +} // namespace RPC +} // namespace ripple + +#endif diff --git a/src/ripple/rpc/NFTokenOfferID.h b/src/ripple/rpc/NFTokenOfferID.h new file mode 100644 index 00000000000..6c1bef3d127 --- /dev/null +++ b/src/ripple/rpc/NFTokenOfferID.h @@ -0,0 +1,64 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_RPC_NFTOKENOFFERID_H_INCLUDED +#define RIPPLE_RPC_NFTOKENOFFERID_H_INCLUDED + +#include + +#include +#include + +namespace Json { +class Value; +} + +namespace ripple { + +class TxMeta; +class STTx; + +namespace RPC { + +/** + Add an `offer_id` field to the `meta` output parameter. + The field is only added to successful NFTokenCreateOffer transactions. + + Helper functions are not static because they can be used by Clio. + @{ + */ +bool +canHaveNFTokenOfferID( + std::shared_ptr const& serializedTx, + TxMeta const& transactionMeta); + +std::optional +getOfferIDFromCreatedOffer(TxMeta const& transactionMeta); + +void +insertNFTokenOfferID( + Json::Value& response, + std::shared_ptr const& transaction, + TxMeta const& transactionMeta); +/** @} */ + +} // namespace RPC +} // namespace ripple + +#endif diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index 67c80ad9bdc..f65657d92ea 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -307,6 +308,8 @@ populateJsonResponse( jvObj[jss::validated] = true; insertDeliveredAmount( jvObj[jss::meta], context, txn, *txnMeta); + insertNFTSyntheticInJson( + jvObj, context, txn->getSTransaction(), *txnMeta); } } } diff --git a/src/ripple/rpc/handlers/Tx.cpp b/src/ripple/rpc/handlers/Tx.cpp index 4a70f1fe061..e79997ec8f1 100644 --- a/src/ripple/rpc/handlers/Tx.cpp +++ b/src/ripple/rpc/handlers/Tx.cpp @@ -28,6 +28,7 @@ #include #include #include +#include #include namespace ripple { @@ -295,6 +296,8 @@ populateJsonResponse( response[jss::meta] = meta->getJson(JsonOptions::none); insertDeliveredAmount( response[jss::meta], context, result.txn, *meta); + insertNFTSyntheticInJson( + response, context, result.txn->getSTransaction(), *meta); } } response[jss::validated] = result.validated; diff --git a/src/ripple/rpc/impl/NFTSyntheticSerializer.cpp b/src/ripple/rpc/impl/NFTSyntheticSerializer.cpp new file mode 100644 index 00000000000..f4692cfd4f8 --- /dev/null +++ b/src/ripple/rpc/impl/NFTSyntheticSerializer.cpp @@ -0,0 +1,50 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace RPC { + +void +insertNFTSyntheticInJson( + Json::Value& response, + RPC::JsonContext const& context, + std::shared_ptr const& transaction, + TxMeta const& transactionMeta) +{ + insertNFTokenID(response[jss::meta], transaction, transactionMeta); + insertNFTokenOfferID(response[jss::meta], transaction, transactionMeta); +} + +} // namespace RPC +} // namespace ripple diff --git a/src/ripple/rpc/impl/NFTokenID.cpp b/src/ripple/rpc/impl/NFTokenID.cpp new file mode 100644 index 00000000000..d0be439ec6a --- /dev/null +++ b/src/ripple/rpc/impl/NFTokenID.cpp @@ -0,0 +1,202 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace RPC { + +bool +canHaveNFTokenID( + std::shared_ptr const& serializedTx, + TxMeta const& transactionMeta) +{ + if (!serializedTx) + return false; + + TxType const tt = serializedTx->getTxnType(); + if (tt != ttNFTOKEN_MINT && tt != ttNFTOKEN_ACCEPT_OFFER && + tt != ttNFTOKEN_CANCEL_OFFER) + return false; + + // if the transaction failed nothing could have been delivered. + if (transactionMeta.getResultTER() != tesSUCCESS) + return false; + + return true; +} + +std::optional +getNFTokenIDFromPage(TxMeta const& transactionMeta) +{ + // The metadata does not make it obvious which NFT was added. To figure + // that out we gather up all of the previous NFT IDs and all of the final + // NFT IDs and compare them to find what changed. + std::vector prevIDs; + std::vector finalIDs; + + for (STObject const& node : transactionMeta.getNodes()) + { + if (node.getFieldU16(sfLedgerEntryType) != ltNFTOKEN_PAGE) + continue; + + SField const& fName = node.getFName(); + if (fName == sfCreatedNode) + { + STArray const& toAddPrevNFTs = node.peekAtField(sfNewFields) + .downcast() + .getFieldArray(sfNFTokens); + std::transform( + toAddPrevNFTs.begin(), + toAddPrevNFTs.end(), + std::back_inserter(finalIDs), + [](STObject const& nft) { + return nft.getFieldH256(sfNFTokenID); + }); + } + else if (fName == sfModifiedNode) + { + // When a mint results in splitting an existing page, + // it results in a created page and a modified node. Sometimes, + // the created node needs to be linked to a third page, resulting + // in modifying that third page's PreviousPageMin or NextPageMin + // field changing, but no NFTs within that page changing. In this + // case, there will be no previous NFTs and we need to skip. + // However, there will always be NFTs listed in the final fields, + // as rippled outputs all fields in final fields even if they were + // not changed. + STObject const& previousFields = + node.peekAtField(sfPreviousFields).downcast(); + if (!previousFields.isFieldPresent(sfNFTokens)) + continue; + + STArray const& toAddPrevNFTs = + previousFields.getFieldArray(sfNFTokens); + std::transform( + toAddPrevNFTs.begin(), + toAddPrevNFTs.end(), + std::back_inserter(prevIDs), + [](STObject const& nft) { + return nft.getFieldH256(sfNFTokenID); + }); + + STArray const& toAddFinalNFTs = node.peekAtField(sfFinalFields) + .downcast() + .getFieldArray(sfNFTokens); + std::transform( + toAddFinalNFTs.begin(), + toAddFinalNFTs.end(), + std::back_inserter(finalIDs), + [](STObject const& nft) { + return nft.getFieldH256(sfNFTokenID); + }); + } + } + + // We expect NFTs to be added one at a time. So finalIDs should be one + // longer than prevIDs. If that's not the case something is messed up. + if (finalIDs.size() != prevIDs.size() + 1) + return std::nullopt; + + // Find the first NFT ID that doesn't match. We're looking for an + // added NFT, so the one we want will be the mismatch in finalIDs. + auto const diff = std::mismatch( + finalIDs.begin(), finalIDs.end(), prevIDs.begin(), prevIDs.end()); + + // There should always be a difference so the returned finalIDs + // iterator should never be end(). But better safe than sorry. + if (diff.first == finalIDs.end()) + return std::nullopt; + + return *diff.first; +} + +std::vector +getNFTokenIDFromDeletedOffer(TxMeta const& transactionMeta) +{ + std::vector tokenIDResult; + for (STObject const& node : transactionMeta.getNodes()) + { + if (node.getFieldU16(sfLedgerEntryType) != ltNFTOKEN_OFFER || + node.getFName() != sfDeletedNode) + continue; + + auto const& toAddNFT = node.peekAtField(sfFinalFields) + .downcast() + .getFieldH256(sfNFTokenID); + tokenIDResult.push_back(toAddNFT); + } + + // Deduplicate the NFT IDs because multiple offers could affect the same NFT + // and hence we would get duplicate NFT IDs + sort(tokenIDResult.begin(), tokenIDResult.end()); + tokenIDResult.erase( + unique(tokenIDResult.begin(), tokenIDResult.end()), + tokenIDResult.end()); + return tokenIDResult; +} + +void +insertNFTokenID( + Json::Value& response, + std::shared_ptr const& transaction, + TxMeta const& transactionMeta) +{ + if (!canHaveNFTokenID(transaction, transactionMeta)) + return; + + // We extract the NFTokenID from metadata by comparing affected nodes + if (auto const type = transaction->getTxnType(); type == ttNFTOKEN_MINT) + { + std::optional result = getNFTokenIDFromPage(transactionMeta); + if (result.has_value()) + response[jss::nftoken_id] = to_string(result.value()); + } + else if (type == ttNFTOKEN_ACCEPT_OFFER) + { + std::vector result = + getNFTokenIDFromDeletedOffer(transactionMeta); + + if (result.size() > 0) + response[jss::nftoken_id] = to_string(result.front()); + } + else if (type == ttNFTOKEN_CANCEL_OFFER) + { + std::vector result = + getNFTokenIDFromDeletedOffer(transactionMeta); + + response[jss::nftoken_ids] = Json::Value(Json::arrayValue); + for (auto const& nftID : result) + response[jss::nftoken_ids].append(to_string(nftID)); + } +} + +} // namespace RPC +} // namespace ripple diff --git a/src/ripple/rpc/impl/NFTokenOfferID.cpp b/src/ripple/rpc/impl/NFTokenOfferID.cpp new file mode 100644 index 00000000000..05d110aac56 --- /dev/null +++ b/src/ripple/rpc/impl/NFTokenOfferID.cpp @@ -0,0 +1,85 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace RPC { + +bool +canHaveNFTokenOfferID( + std::shared_ptr const& serializedTx, + TxMeta const& transactionMeta) +{ + if (!serializedTx) + return false; + + TxType const tt = serializedTx->getTxnType(); + if (tt != ttNFTOKEN_CREATE_OFFER) + return false; + + // if the transaction failed nothing could have been delivered. + if (transactionMeta.getResultTER() != tesSUCCESS) + return false; + + return true; +} + +std::optional +getOfferIDFromCreatedOffer(TxMeta const& transactionMeta) +{ + for (STObject const& node : transactionMeta.getNodes()) + { + if (node.getFieldU16(sfLedgerEntryType) != ltNFTOKEN_OFFER || + node.getFName() != sfCreatedNode) + continue; + + return node.getFieldH256(sfLedgerIndex); + } + return std::nullopt; +} + +void +insertNFTokenOfferID( + Json::Value& response, + std::shared_ptr const& transaction, + TxMeta const& transactionMeta) +{ + if (!canHaveNFTokenOfferID(transaction, transactionMeta)) + return; + + std::optional result = getOfferIDFromCreatedOffer(transactionMeta); + + if (result.has_value()) + response[jss::offer_id] = to_string(result.value()); +} + +} // namespace RPC +} // namespace ripple diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index d050579b1e7..1f3636e4e39 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -6566,6 +6566,238 @@ class NFToken_test : public beast::unit_test::suite } } + void + testTxJsonMetaFields(FeatureBitset features) + { + // `nftoken_id` is added in the `tx` response for NFTokenMint and + // NFTokenAcceptOffer. + // + // `nftoken_ids` is added in the `tx` response for NFTokenCancelOffer + // + // `offer_id` is added in the `tx` response for NFTokenCreateOffer + // + // The values of these fields are dependent on the NFTokenID/OfferID + // changed in its corresponding transaction. We want to validate each + // transaction to make sure the synethic fields hold the right values. + + testcase("Test synthetic fields from JSON response"); + + using namespace test::jtx; + + Account const alice{"alice"}; + Account const bob{"bob"}; + Account const broker{"broker"}; + + Env env{*this, features}; + env.fund(XRP(10000), alice, bob, broker); + env.close(); + + // Verify `nftoken_id` value equals to the NFTokenID that was + // changed in the most recent NFTokenMint or NFTokenAcceptOffer + // transaction + auto verifyNFTokenID = [&](uint256 const& actualNftID) { + // Get the hash for the most recent transaction. + std::string const txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + + env.close(); + Json::Value const meta = + env.rpc("tx", txHash)[jss::result][jss::meta]; + + // Expect nftokens_id field + if (!BEAST_EXPECT(meta.isMember(jss::nftoken_id))) + return; + + // Check the value of NFT ID in the meta with the + // actual value + uint256 nftID; + BEAST_EXPECT(nftID.parseHex(meta[jss::nftoken_id].asString())); + BEAST_EXPECT(nftID == actualNftID); + }; + + // Verify `nftoken_ids` value equals to the NFTokenIDs that were + // changed in the most recent NFTokenCancelOffer transaction + auto verifyNFTokenIDsInCancelOffer = + [&](std::vector actualNftIDs) { + // Get the hash for the most recent transaction. + std::string const txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + + env.close(); + Json::Value const meta = + env.rpc("tx", txHash)[jss::result][jss::meta]; + + // Expect nftokens_ids field and verify the values + if (!BEAST_EXPECT(meta.isMember(jss::nftoken_ids))) + return; + + // Convert NFT IDs from Json::Value to uint256 + std::vector metaIDs; + std::transform( + meta[jss::nftoken_ids].begin(), + meta[jss::nftoken_ids].end(), + std::back_inserter(metaIDs), + [this](Json::Value id) { + uint256 nftID; + BEAST_EXPECT(nftID.parseHex(id.asString())); + return nftID; + }); + + // Sort both array to prepare for comparison + std::sort(metaIDs.begin(), metaIDs.end()); + std::sort(actualNftIDs.begin(), actualNftIDs.end()); + + // Make sure the expect number of NFTs is correct + BEAST_EXPECT(metaIDs.size() == actualNftIDs.size()); + + // Check the value of NFT ID in the meta with the + // actual values + for (size_t i = 0; i < metaIDs.size(); ++i) + BEAST_EXPECT(metaIDs[i] == actualNftIDs[i]); + }; + + // Verify `offer_id` value equals to the offerID that was + // changed in the most recent NFTokenCreateOffer tx + auto verifyNFTokenOfferID = [&](uint256 const& offerID) { + // Get the hash for the most recent transaction. + std::string const txHash{ + env.tx()->getJson(JsonOptions::none)[jss::hash].asString()}; + + env.close(); + Json::Value const meta = + env.rpc("tx", txHash)[jss::result][jss::meta]; + + // Expect offer_id field and verify the value + if (!BEAST_EXPECT(meta.isMember(jss::offer_id))) + return; + + uint256 metaOfferID; + BEAST_EXPECT(metaOfferID.parseHex(meta[jss::offer_id].asString())); + BEAST_EXPECT(metaOfferID == offerID); + }; + + // Check new fields in tx meta when for all NFTtransactions + { + // Alice mints 2 NFTs + // Verify the NFTokenIDs are correct in the NFTokenMint tx meta + uint256 const nftId1{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId1); + + uint256 const nftId2{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId2); + + // Alice creates one sell offer for each NFT + // Verify the offer indexes are correct in the NFTokenCreateOffer tx + // meta + uint256 const aliceOfferIndex1 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId1, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex1); + + uint256 const aliceOfferIndex2 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId2, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex2); + + // Alice cancels two offers she created + // Verify the NFTokenIDs are correct in the NFTokenCancelOffer tx + // meta + env(token::cancelOffer( + alice, {aliceOfferIndex1, aliceOfferIndex2})); + env.close(); + verifyNFTokenIDsInCancelOffer({nftId1, nftId2}); + + // Bobs creates a buy offer for nftId1 + // Verify the offer id is correct in the NFTokenCreateOffer tx meta + auto const bobBuyOfferIndex = + keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftId1, drops(1)), token::owner(alice)); + env.close(); + verifyNFTokenOfferID(bobBuyOfferIndex); + + // Alice accepts bob's buy offer + // Verify the NFTokenID is correct in the NFTokenAcceptOffer tx meta + env(token::acceptBuyOffer(alice, bobBuyOfferIndex)); + env.close(); + verifyNFTokenID(nftId1); + } + + // Check `nftoken_ids` in brokered mode + { + // Alice mints a NFT + uint256 const nftId{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId); + + // Alice creates sell offer and set broker as destination + uint256 const offerAliceToBroker = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId, drops(1)), + token::destination(broker), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(offerAliceToBroker); + + // Bob creates buy offer + uint256 const offerBobToBroker = + keylet::nftoffer(bob, env.seq(bob)).key; + env(token::createOffer(bob, nftId, drops(1)), token::owner(alice)); + env.close(); + verifyNFTokenOfferID(offerBobToBroker); + + // Check NFTokenID meta for NFTokenAcceptOffer in brokered mode + env(token::brokerOffers( + broker, offerBobToBroker, offerAliceToBroker)); + env.close(); + verifyNFTokenID(nftId); + } + + // Check if there are no duplicate nft id in Cancel transactions where + // multiple offers are cancelled for the same NFT + { + // Alice mints a NFT + uint256 const nftId{ + token::getNextID(env, alice, 0u, tfTransferable)}; + env(token::mint(alice, 0u), txflags(tfTransferable)); + env.close(); + verifyNFTokenID(nftId); + + // Alice creates 2 sell offers for the same NFT + uint256 const aliceOfferIndex1 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex1); + + uint256 const aliceOfferIndex2 = + keylet::nftoffer(alice, env.seq(alice)).key; + env(token::createOffer(alice, nftId, drops(1)), + txflags(tfSellNFToken)); + env.close(); + verifyNFTokenOfferID(aliceOfferIndex2); + + // Make sure the metadata only has 1 nft id, since both offers are + // for the same nft + env(token::cancelOffer( + alice, {aliceOfferIndex1, aliceOfferIndex2})); + env.close(); + verifyNFTokenIDsInCancelOffer({nftId}); + } + } + void testWithFeats(FeatureBitset features) { @@ -6598,6 +6830,7 @@ class NFToken_test : public beast::unit_test::suite testIOUWithTransferFee(features); testBrokeredSaleToSelf(features); testFixNFTokenRemint(features); + testTxJsonMetaFields(features); } public: From ce997a6de8b82aa979632960bbf37efc44fd99bc Mon Sep 17 00:00:00 2001 From: Scott Determan Date: Mon, 22 May 2023 22:36:46 -0400 Subject: [PATCH 162/508] Ensure that switchover vars are initialized before use: (#4527) Global variables in different TUs are initialized in an undefined order. At least one global variable was accessing a global switchover variable. This caused the switchover variable to be accessed in an uninitialized state. Since the switchover is always explicitly set before transaction processing, this bug can not effect transaction processing, but could effect unit tests (and potentially the value of some global variables). Note: at the time of this patch the offending bug is not yet in production. --- src/ripple/basics/IOUAmount.h | 15 +++++++++--- src/ripple/basics/impl/IOUAmount.cpp | 27 ++++++++++++++++++--- src/ripple/protocol/STAmount.h | 13 +++++++--- src/ripple/protocol/impl/STAmount.cpp | 35 +++++++++++++++++++++------ 4 files changed, 72 insertions(+), 18 deletions(-) diff --git a/src/ripple/basics/IOUAmount.h b/src/ripple/basics/IOUAmount.h index 764aa38aae3..2380a7d15e1 100644 --- a/src/ripple/basics/IOUAmount.h +++ b/src/ripple/basics/IOUAmount.h @@ -186,7 +186,14 @@ mulRatio( std::uint32_t den, bool roundUp); -extern LocalValue stNumberSwitchover; +// Since many uses of the number class do not have access to a ledger, +// getSTNumberSwitchover needs to be globally accessible. + +bool +getSTNumberSwitchover(); + +void +setSTNumberSwitchover(bool v); /** RAII class to set and restore the Number switchover. */ @@ -198,16 +205,16 @@ class NumberSO public: ~NumberSO() { - *stNumberSwitchover = saved_; + setSTNumberSwitchover(saved_); } NumberSO(NumberSO const&) = delete; NumberSO& operator=(NumberSO const&) = delete; - explicit NumberSO(bool v) : saved_(*stNumberSwitchover) + explicit NumberSO(bool v) : saved_(getSTNumberSwitchover()) { - *stNumberSwitchover = v; + setSTNumberSwitchover(v); } }; diff --git a/src/ripple/basics/impl/IOUAmount.cpp b/src/ripple/basics/impl/IOUAmount.cpp index c9b52874abd..e3c3411057b 100644 --- a/src/ripple/basics/impl/IOUAmount.cpp +++ b/src/ripple/basics/impl/IOUAmount.cpp @@ -27,7 +27,28 @@ namespace ripple { -LocalValue stNumberSwitchover(true); +namespace { + +// Use a static inside a function to help prevent order-of-initialzation issues +LocalValue& +getStaticSTNumberSwitchover() +{ + static LocalValue r{true}; + return r; +} +} // namespace + +bool +getSTNumberSwitchover() +{ + return *getStaticSTNumberSwitchover(); +} + +void +setSTNumberSwitchover(bool v) +{ + *getStaticSTNumberSwitchover() = v; +} /* The range for the mantissa when normalized */ static std::int64_t constexpr minMantissa = 1000000000000000ull; @@ -51,7 +72,7 @@ IOUAmount::normalize() return; } - if (*stNumberSwitchover) + if (getSTNumberSwitchover()) { Number const v{mantissa_, exponent_}; mantissa_ = v.mantissa(); @@ -117,7 +138,7 @@ IOUAmount::operator+=(IOUAmount const& other) return *this; } - if (*stNumberSwitchover) + if (getSTNumberSwitchover()) { *this = IOUAmount{Number{*this} + Number{other}}; return *this; diff --git a/src/ripple/protocol/STAmount.h b/src/ripple/protocol/STAmount.h index 0b9ca953f1a..f04b6bb0e50 100644 --- a/src/ripple/protocol/STAmount.h +++ b/src/ripple/protocol/STAmount.h @@ -536,7 +536,12 @@ isXRP(STAmount const& amount) // the low-level routine stAmountCanonicalize on an amendment switch. Only // transactions need to use this switchover. Outside of a transaction it's safe // to unconditionally use the new behavior. -extern LocalValue stAmountCanonicalizeSwitchover; + +bool +getSTAmountCanonicalizeSwitchover(); + +void +setSTAmountCanonicalizeSwitchover(bool v); /** RAII class to set and restore the STAmount canonicalize switchover. */ @@ -544,14 +549,14 @@ extern LocalValue stAmountCanonicalizeSwitchover; class STAmountSO { public: - explicit STAmountSO(bool v) : saved_(*stAmountCanonicalizeSwitchover) + explicit STAmountSO(bool v) : saved_(getSTAmountCanonicalizeSwitchover()) { - *stAmountCanonicalizeSwitchover = v; + setSTAmountCanonicalizeSwitchover(v); } ~STAmountSO() { - *stAmountCanonicalizeSwitchover = saved_; + setSTAmountCanonicalizeSwitchover(saved_); } private: diff --git a/src/ripple/protocol/impl/STAmount.cpp b/src/ripple/protocol/impl/STAmount.cpp index d1a878c8b4f..02e3345944e 100644 --- a/src/ripple/protocol/impl/STAmount.cpp +++ b/src/ripple/protocol/impl/STAmount.cpp @@ -34,7 +34,28 @@ namespace ripple { -LocalValue stAmountCanonicalizeSwitchover(true); +namespace { + +// Use a static inside a function to help prevent order-of-initialzation issues +LocalValue& +getStaticSTAmountCanonicalizeSwitchover() +{ + static LocalValue r{true}; + return r; +} +} // namespace + +bool +getSTAmountCanonicalizeSwitchover() +{ + return *getStaticSTAmountCanonicalizeSwitchover(); +} + +void +setSTAmountCanonicalizeSwitchover(bool v) +{ + *getStaticSTAmountCanonicalizeSwitchover() = v; +} static const std::uint64_t tenTo14 = 100000000000000ull; static const std::uint64_t tenTo14m1 = tenTo14 - 1; @@ -395,7 +416,7 @@ operator+(STAmount const& v1, STAmount const& v2) if (v1.native()) return {v1.getFName(), getSNValue(v1) + getSNValue(v2)}; - if (*stNumberSwitchover) + if (getSTNumberSwitchover()) { auto x = v1; x = v1.iou() + v2.iou(); @@ -717,7 +738,7 @@ STAmount::canonicalize() return; } - if (*stAmountCanonicalizeSwitchover) + if (getSTAmountCanonicalizeSwitchover()) { // log(cMaxNativeN, 10) == 17 if (mOffset > 17) @@ -725,7 +746,7 @@ STAmount::canonicalize() "Native currency amount out of range"); } - if (*stNumberSwitchover && *stAmountCanonicalizeSwitchover) + if (getSTNumberSwitchover() && getSTAmountCanonicalizeSwitchover()) { Number num( mIsNegative ? -mValue : mValue, mOffset, Number::unchecked{}); @@ -744,7 +765,7 @@ STAmount::canonicalize() while (mOffset > 0) { - if (*stAmountCanonicalizeSwitchover) + if (getSTAmountCanonicalizeSwitchover()) { // N.B. do not move the overflow check to after the // multiplication @@ -765,7 +786,7 @@ STAmount::canonicalize() mIsNative = false; - if (*stNumberSwitchover) + if (getSTNumberSwitchover()) { *this = iou(); return; @@ -1208,7 +1229,7 @@ multiply(STAmount const& v1, STAmount const& v2, Issue const& issue) return STAmount(v1.getFName(), minV * maxV); } - if (*stNumberSwitchover) + if (getSTNumberSwitchover()) return {IOUAmount{Number{v1} * Number{v2}}, issue}; std::uint64_t value1 = v1.mantissa(); From 32f8ae1af1f7ef34897baea355d8b2fc59aaf6a9 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Tue, 23 May 2023 14:25:18 -0700 Subject: [PATCH 163/508] Move faulty assert (#4533) This assert was put in the wrong place, but it only triggers if shards are configured. This change moves the assert to the right place and updates it to ensure correctness. The assert could be hit after the server downloads some shards. It may be necessary to restart after the shards are downloaded. Note that asserts are normally checked only in debug builds, so release packages should not be affected. Introduced in: #4319 (66627b26cfae8e1c902546f0778dd9b013aedc5a) --- src/ripple/nodestore/impl/Shard.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index 14bfe487303..8d0eab81153 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -688,9 +688,6 @@ Shard::finalize(bool writeSQLite, std::optional const& referenceHash) ledger->stateMap().setLedgerSeq(ledgerSeq); ledger->txMap().setLedgerSeq(ledgerSeq); - assert( - ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || - ledger->read(keylet::fees())); ledger->setImmutable(); if (!ledger->stateMap().fetchRoot( SHAMapHash{ledger->info().accountHash}, nullptr)) @@ -713,6 +710,11 @@ Shard::finalize(bool writeSQLite, std::optional const& referenceHash) if (writeSQLite && !storeSQLite(ledger)) return fail("failed storing to SQLite databases"); + assert( + ledger->info().seq == ledgerSeq && + (ledger->info().seq < XRP_LEDGER_EARLIEST_FEES || + ledger->read(keylet::fees()))); + hash = ledger->info().parentHash; next = std::move(ledger); From 029580886eb7f53e9aed9f1dcf18f396253c669c Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 23 May 2023 14:29:51 -0700 Subject: [PATCH 164/508] Set version to 1.11.0-rc2 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index b1c1bab7fb5..430f805f7e2 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.10.1" +char const* const versionString = "1.11.0-rc2" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From adf672ff83d473043485f8a48d39bb5ecbd892b5 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 30 May 2023 09:48:12 -0700 Subject: [PATCH 165/508] docs(README): add link to Clio (#4535) --- README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2b6aa5512f1..45dc2005ea2 100644 --- a/README.md +++ b/README.md @@ -6,11 +6,14 @@ The [XRP Ledger](https://xrpl.org/) is a decentralized cryptographic ledger powe [XRP](https://xrpl.org/xrp.html) is a public, counterparty-free asset native to the XRP Ledger, and is designed to bridge the many different currencies in use worldwide. XRP is traded on the open-market and is available for anyone to access. The XRP Ledger was created in 2012 with a finite supply of 100 billion units of XRP. ## rippled -The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html). +The server software that powers the XRP Ledger is called `rippled` and is available in this repository under the permissive [ISC open-source license](LICENSE.md). The `rippled` server software is written primarily in C++ and runs on a variety of platforms. The `rippled` server software can run in several modes depending on its [configuration](https://xrpl.org/rippled-server-modes.html). + +If you are interested in running an **API Server** (including a **Full History Server**) or a **Reporting Mode** server, take a look at [Clio](https://github.com/XRPLF/clio). rippled Reporting Mode is expected to be replaced by Clio. ### Build from Source * [Read the build instructions in `BUILD.md`](BUILD.md) +* If you encounter any issues, please [open an issue](https://github.com/XRPLF/rippled/issues) ## Key Features of the XRP Ledger @@ -53,10 +56,14 @@ Some of the directories under `src` are external repositories included using git-subtree. See those directories' README files for more details. -## See Also +## Additional Documentation * [XRP Ledger Dev Portal](https://xrpl.org/) * [Setup and Installation](https://xrpl.org/install-rippled.html) * [Source Documentation (Doxygen)](https://xrplf.github.io/rippled/) + +## See Also + +* [Clio API Server for the XRP Ledger](https://github.com/XRPLF/clio) * [Mailing List for Release Announcements](https://groups.google.com/g/ripple-server) * [Learn more about the XRP Ledger (YouTube)](https://www.youtube.com/playlist?list=PLJQ55Tj1hIVZtJ_JdTvSum2qMTsedWkNi) From adde0c2d11f60d0dff66e086ac32ff7847088a29 Mon Sep 17 00:00:00 2001 From: oeggert <117319296+oeggert@users.noreply.github.com> Date: Wed, 31 May 2023 11:55:47 -0700 Subject: [PATCH 166/508] docs(BUILD): restructure content for better readability (#4514) Follow-up to discussion #4433 --- BUILD.md | 402 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 203 insertions(+), 199 deletions(-) diff --git a/BUILD.md b/BUILD.md index 741ce2ba61d..4ef5e5aad2c 100644 --- a/BUILD.md +++ b/BUILD.md @@ -1,3 +1,12 @@ +> These instructions assume you have a C++ development environment ready +> with Git, Python, Conan, CMake, and a C++ compiler. For help setting one up +> on Linux, macOS, or Windows, see [our guide](./docs/build/environment.md). +> +> These instructions also assume a basic familiarity with Conan and CMake. +> If you are unfamiliar with Conan, +> you can read our [crash course](./docs/build/conan.md) +> or the official [Getting Started][3] walkthrough. + ## Branches For a stable release, choose the `master` branch or one of the [tagged @@ -13,256 +22,253 @@ For the latest release candidate, choose the `release` branch. git checkout release ``` -If you are contributing or want the latest set of untested features, -then use the `develop` branch. +For the latest set of untested features, or to contribute, choose the `develop` +branch. ``` git checkout develop ``` -## Platforms +## Minimum Requirements -rippled is written in the C++20 dialect and includes the `` header. -The [minimum compiler versions][2] that can compile this dialect are given -below: +- [Python 3.7](https://www.python.org/downloads/) +- [Conan 1.55](https://conan.io/downloads.html) +- [CMake 3.16](https://cmake.org/download/) -| Compiler | Minimum Version -|---|--- -| GCC | 10 -| Clang | 13 -| Apple Clang | 13.1.6 -| MSVC | 19.23 +`rippled` is written in the C++20 dialect and includes the `` header. +The [minimum compiler versions][2] required are: -We do not recommend Windows for rippled production use at this time. -As of January 2023, the Ubuntu platform has received the highest level of -quality assurance, testing, and support. -Additionally, 32-bit Windows development is not supported. +| Compiler | Version | +|-------------|---------| +| GCC | 10 | +| Clang | 13 | +| Apple Clang | 13.1.6 | +| MSVC | 19.23 | -Visual Studio 2022 is not yet supported. -This is because rippled is not compatible with [Boost][] versions 1.78 or 1.79, -but Conan cannot build Boost versions released earlier than them with VS 2022. -We expect that rippled will be compatible with Boost 1.80, which should be -released in August 2022. -Until then, we advise Windows developers to use Visual Studio 2019. +We don't recommend Windows for `rippled` production at this time. As of +January 2023, Ubuntu has the highest level of quality assurance, testing, +and support. -[Boost]: https://www.boost.org/ +Windows developers should use Visual Studio 2019. `rippled` isn't +compatible with [Boost](https://www.boost.org/) 1.78 or 1.79, and Conan +can't build earlier Boost versions. +**Note:** 32-bit Windows development isn't supported. -## Prerequisites -> **Warning** -> These instructions assume you have a C++ development environment ready -> with Git, Python, Conan, CMake, and a C++ compiler. -> For help setting one up on Linux, macOS, or Windows, -> please see [our guide](./docs/build/environment.md). -> -> These instructions further assume a basic familiarity with Conan and CMake. -> If you are unfamiliar with Conan, -> then please read our [crash course](./docs/build/conan.md) -> or the official [Getting Started][3] walkthrough. +## Steps -To build this package, you will need Python (>= 3.7), -[Conan][] (>= 1.55, < 2), and [CMake][] (>= 3.16). -[Conan]: https://conan.io/downloads.html -[CMake]: https://cmake.org/download/ +### Set Up Conan -You'll need at least one Conan profile: +1. (Optional) If you've never used Conan, use autodetect to set up a default profile. -``` -conan profile new default --detect -``` + ``` + conan profile new default --detect + ``` -You'll need to compile in the C++20 dialect: +2. Update the compiler settings. -``` -conan profile update settings.compiler.cppstd=20 default -``` + ``` + conan profile update settings.compiler.cppstd=20 default + ``` -Linux developers will commonly have a default Conan [profile][] that compiles -with GCC and links with libstdc++. -If you are linking with libstdc++ (see profile setting `compiler.libcxx`), -then you will need to choose the `libstdc++11` ABI: + Linux developers will commonly have a default Conan [profile][] that compiles + with GCC and links with libstdc++. + If you are linking with libstdc++ (see profile setting `compiler.libcxx`), + then you will need to choose the `libstdc++11` ABI. -``` -conan profile update settings.compiler.libcxx=libstdc++11 default -``` + ``` + conan profile update settings.compiler.libcxx=libstdc++11 default + ``` -We find it necessary to use the x64 native build tools on Windows. -An easy way to do that is to run the shortcut "x64 Native Tools Command -Prompt" for the version of Visual Studio that you have installed. + On Windows, you should use the x64 native build tools. + An easy way to do that is to run the shortcut "x64 Native Tools Command + Prompt" for the version of Visual Studio that you have installed. -Windows developers must build rippled and its dependencies for the x64 -architecture: + Windows developers must also build `rippled` and its dependencies for the x64 + architecture. -``` -conan profile update settings.arch=x86_64 default -``` + ``` + conan profile update settings.arch=x86_64 default + ``` -If you have multiple compilers installed on your platform, -then you'll need to make sure that Conan and CMake select the one you want to -use. -This setting will set the correct variables (`CMAKE__COMPILER`) in the -generated CMake toolchain file: +3. (Optional) If you have multiple compilers installed on your platform, + make sure that Conan and CMake select the one you want to use. + This setting will set the correct variables (`CMAKE__COMPILER`) + in the generated CMake toolchain file. -``` -conan profile update 'conf.tools.build:compiler_executables={"c": "", "cpp": ""}' default -``` + ``` + conan profile update 'conf.tools.build:compiler_executables={"c": "", "cpp": ""}' default + ``` -It should choose the compiler for dependencies as well, -but not all of them have a Conan recipe that respects this setting (yet). -For the rest, you can set these environment variables: + It should choose the compiler for dependencies as well, + but not all of them have a Conan recipe that respects this setting (yet). + For the rest, you can set these environment variables: -``` -conan profile update env.CC= default -conan profile update env.CXX= default -``` + ``` + conan profile update env.CC= default + conan profile update env.CXX= default + ``` -Export our [Conan recipe for Snappy](./external/snappy). -It does not explicitly link the C++ standard library, -which allows you to statically link it with GCC, if you want. +4. Export our [Conan recipe for Snappy](./external/snappy). + It doesn't explicitly link the C++ standard library, + which allows you to statically link it with GCC, if you want. -``` -conan export external/snappy snappy/1.1.9@ -``` + ``` + conan export external/snappy snappy/1.1.9@ + ``` -Export our [Conan recipe for SOCI](./external/soci). -It patches their CMake to correctly import its dependencies. +5. Export our [Conan recipe for SOCI](./external/soci). + It patches their CMake to correctly import its dependencies. -``` -conan export external/soci soci/4.0.3@ -``` + ``` + conan export external/soci soci/4.0.3@ + ``` -## How to build and test +### Build and Test -Let's start with a couple of examples of common workflows. -The first is for a single-configuration generator (e.g. Unix Makefiles) on -Linux or macOS: +1. Create a build directory and move into it. -``` -mkdir .build -cd .build -conan install .. --output-folder . --build missing --settings build_type=Release -cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. -cmake --build . -./rippled --unittest -``` + ``` + mkdir .build + cd .build + ``` -The second is for a multi-configuration generator (e.g. Visual Studio) on -Windows: + You can use any directory name. Conan treats your working directory as an + install folder and generates files with implementation details. + You don't need to worry about these files, but make sure to change + your working directory to your build directory before calling Conan. -``` -mkdir .build -cd .build -conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT -conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd -cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake .. -cmake --build . --config Release -cmake --build . --config Debug -./Release/rippled --unittest -./Debug/rippled --unittest -``` + **Note:** You can specify a directory for the installation files by adding + the `install-folder` or `-if` option to every `conan install` command + in the next step. -Now to explain the individual steps in each example: +2. Generate CMake files for every configuration you want to build. -1. Create a build directory (and move into it). - - You can choose any name you want. - - Conan will generate some files in what it calls the "install folder". - These files are implementation details that you don't need to worry about. - By default, the install folder is your current working directory. - If you don't move into your build directory before calling Conan, - then you may be annoyed to see it polluting your project root directory - with these files. - To make Conan put them in your build directory, - you'll have to add the option - `--install-folder` or `-if` to every `conan install` command. - -1. Generate CMake files for every configuration you want to build. + ``` + conan install .. --output-folder . --build missing --settings build_type=Release + conan install .. --output-folder . --build missing --settings build_type=Debug + ``` For a single-configuration generator, e.g. `Unix Makefiles` or `Ninja`, you only need to run this command once. For a multi-configuration generator, e.g. `Visual Studio`, you may want to run it more than once. - Each of these commands should have a different `build_type` setting. - A second command with the same `build_type` setting will just overwrite - the files generated by the first. - You can pass the build type on the command line with `--settings - build_type=$BUILD_TYPE` or in the profile itself, under the section - `[settings]`, with the key `build_type`. - - If you are using a Microsoft Visual C++ compiler, then you will need to - ensure consistency between the `build_type` setting and the - `compiler.runtime` setting. + Each of these commands should also have a different `build_type` setting. + A second command with the same `build_type` setting will overwrite the files + generated by the first. You can pass the build type on the command line with + `--settings build_type=$BUILD_TYPE` or in the profile itself, + under the section `[settings]` with the key `build_type`. + + If you are using a Microsoft Visual C++ compiler, + then you will need to ensure consistency between the `build_type` setting + and the `compiler.runtime` setting. + When `build_type` is `Release`, `compiler.runtime` should be `MT`. + When `build_type` is `Debug`, `compiler.runtime` should be `MTd`. -1. Configure CMake once. + ``` + conan install .. --output-folder . --build missing --settings build_type=Release --settings compiler.runtime=MT + conan install .. --output-folder . --build missing --settings build_type=Debug --settings compiler.runtime=MTd + ``` + +3. Configure CMake and pass the toolchain file generated by Conan, located at + `$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`. - For all choices of generator, pass the toolchain file generated by Conan. - It will be located at - `$OUTPUT_FOLDER/build/generators/conan_toolchain.cmake`. - If you are using a single-configuration generator, then pass the CMake - variable [`CMAKE_BUILD_TYPE`][build_type] and make sure it matches the - `build_type` setting you chose in the previous step. + Single-config generators: + + ``` + cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake -DCMAKE_BUILD_TYPE=Release .. + ``` - This step is where you may pass build options for rippled. + Pass the CMake variable [`CMAKE_BUILD_TYPE`][build_type] + and make sure it matches the `build_type` setting you chose in the previous + step. -1. Build rippled. + Multi-config gnerators: - For a multi-configuration generator, you must pass the option `--config` - to select the build configuration. - For a single-configuration generator, it will build whatever configuration - you passed for `CMAKE_BUILD_TYPE`. + ``` + cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake .. + ``` -1. Test rippled. + **Note:** You can pass build options for `rippled` in this step. - The exact location of rippled in your build directory - depends on your choice of CMake generator. - You can run unit tests by passing `--unittest`. - Pass `--help` to see the rest of the command line options. +4. Build `rippled`. + For a single-configuration generator, it will build whatever configuration + you passed for `CMAKE_BUILD_TYPE`. For a multi-configuration generator, + you must pass the option `--config` to select the build configuration. -### Options + Single-config generators: -The `unity` option allows you to select between [unity][5] and non-unity -builds. -Unity builds may be faster for the first build (at the cost of much -more memory) since they concatenate sources into fewer translation -units. -Non-unity builds may be faster for incremental builds, and can be helpful for -detecting `#include` omissions. + ``` + cmake --build . + ``` -Below are the most commonly used options, -with their default values in parentheses. + Multi-config generators: + + ``` + cmake --build . --config Release + cmake --build . --config Debug + ``` -- `assert` (OFF): Enable assertions. -- `reporting` (OFF): Build the reporting mode feature. -- `tests` (ON): Build tests. -- `unity` (ON): Configure a [unity build][5]. -- `san` (): Enable a sanitizer with Clang. Choices are `thread` and `address`. +5. Test rippled. + Single-config generators: -### Troubleshooting + ``` + ./rippled --unittest + ``` -#### Conan + Multi-config generators: -If you find trouble building dependencies after changing Conan settings, -then you should retry after removing the Conan cache: + ``` + ./Release/rippled --unittest + ./Debug/rippled --unittest + ``` + + The location of `rippled` in your build directory depends on your CMake + generator. Pass `--help` to see the rest of the command line options. + + +## Options + +| Option | Default Value | Description | +| --- | ---| ---| +| `assert` | OFF | Enable assertions. +| `reporting` | OFF | Build the reporting mode feature. | +| `tests` | ON | Build tests. | +| `unity` | ON | Configure a unity build. | +| `san` | N/A | Enable a sanitizer with Clang. Choices are `thread` and `address`. | + +[Unity builds][5] may be faster for the first build +(at the cost of much more memory) since they concatenate sources into fewer +translation units. Non-unity builds may be faster for incremental builds, +and can be helpful for detecting `#include` omissions. + + +## Troubleshooting + + +### Conan + +If you have trouble building dependencies after changing Conan settings, +try removing the Conan cache. ``` rm -rf ~/.conan/data ``` -#### no std::result_of +### no std::result_of If your compiler version is recent enough to have removed `std::result_of` as -part of C++20, e.g. Apple Clang 15.0, -then you might need to add a preprocessor definition to your bulid: +part of C++20, e.g. Apple Clang 15.0, then you might need to add a preprocessor +definition to your build. ``` conan profile update 'options.boost:extra_b2_flags="define=BOOST_ASIO_HAS_STD_INVOKE_RESULT"' default @@ -273,42 +279,40 @@ conan profile update 'conf.tools.build:cxxflags+=["-DBOOST_ASIO_HAS_STD_INVOKE_R ``` -#### recompile with -fPIC +### recompile with -fPIC + +If you get a linker error suggesting that you recompile Boost with +position-independent code, such as: ``` /usr/bin/ld.gold: error: /home/username/.conan/data/boost/1.77.0/_/_/package/.../lib/libboost_container.a(alloc_lib.o): requires unsupported dynamic reloc 11; recompile with -fPIC ``` -If you get a linker error like the one above suggesting that you recompile -Boost with position-independent code, the reason is most likely that Conan -downloaded a bad binary distribution of the dependency. -For now, this seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled -with GCC for Linux. -The solution is to build the dependency locally by passing `--build boost` -when calling `conan install`: +Conan most likely downloaded a bad binary distribution of the dependency. +This seems to be a [bug][1] in Conan just for Boost 1.77.0 compiled with GCC +for Linux. The solution is to build the dependency locally by passing +`--build boost` when calling `conan install`. ``` conan install --build boost ... ``` -## How to add a dependency +## Add a Dependency -If you want to experiment with a new package, here are the steps to get it -working: +If you want to experiment with a new package, follow these steps: 1. Search for the package on [Conan Center](https://conan.io/center/). -1. In [`conanfile.py`](./conanfile.py): - 1. Add a version of the package to the `requires` property. - 1. Change any default options for the package by adding them to the - `default_options` property (with syntax `'$package:$option': $value`) -1. In [`CMakeLists.txt`](./CMakeLists.txt): - 1. Add a call to `find_package($package REQUIRED)`. - 1. Link a library from the package to the target `ripple_libs` (search for - the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`). -1. Start coding! Don't forget to include whatever headers you need from the - package. +2. Modify [`conanfile.py`](./conanfile.py): + - Add a version of the package to the `requires` property. + - Change any default options for the package by adding them to the + `default_options` property (with syntax `'$package:$option': $value`). +3. Modify [`CMakeLists.txt`](./CMakeLists.txt): + - Add a call to `find_package($package REQUIRED)`. + - Link a library from the package to the target `ripple_libs` + (search for the existing call to `target_link_libraries(ripple_libs INTERFACE ...)`). +4. Start coding! Don't forget to include whatever headers you need from the package. [1]: https://github.com/conan-io/conan-center-index/issues/13168 From f709311762f4a4c97dbc47748980242ae9d1c466 Mon Sep 17 00:00:00 2001 From: Scott Determan Date: Wed, 31 May 2023 16:28:33 -0400 Subject: [PATCH 167/508] Fix unaligned load and stores: (#4528) (#4531) Misaligned load and store operations are supported by both Intel and ARM CPUs. However, in C++, these operations are undefined behavior (UB). Substituting these operations with a `memcpy` fixes this UB. The compiled assembly code is equivalent to the original, so there is no performance penalty to using memcpy. For context: The unaligned load and store operations fixed here were originally introduced in the slab allocator (#4218). --- src/ripple/basics/SlabAllocator.h | 25 +++++++++++++------ .../basics/impl/partitioned_unordered_map.cpp | 6 ++++- src/ripple/beast/hash/impl/xxhash.cpp | 12 +++++++-- 3 files changed, 33 insertions(+), 10 deletions(-) diff --git a/src/ripple/basics/SlabAllocator.h b/src/ripple/basics/SlabAllocator.h index c966af318b6..ece96d0b873 100644 --- a/src/ripple/basics/SlabAllocator.h +++ b/src/ripple/basics/SlabAllocator.h @@ -21,16 +21,18 @@ #define RIPPLE_BASICS_SLABALLOCATOR_H_INCLUDED #include + +#include +#include +#include + #include #include #include #include +#include #include -#include -#include -#include - #if BOOST_OS_LINUX #include #endif @@ -76,7 +78,9 @@ class SlabAllocator while (data + item <= p_ + size_) { - *reinterpret_cast(data) = l_; + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(data, &l_, sizeof(std::uint8_t*)); l_ = data; data += item; } @@ -115,7 +119,11 @@ class SlabAllocator ret = l_; if (ret) - l_ = *reinterpret_cast(ret); + { + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(&l_, ret, sizeof(std::uint8_t*)); + } } return ret; @@ -136,7 +144,10 @@ class SlabAllocator assert(own(ptr)); std::lock_guard l(m_); - *reinterpret_cast(ptr) = l_; + + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(ptr, &l_, sizeof(std::uint8_t*)); l_ = ptr; } }; diff --git a/src/ripple/basics/impl/partitioned_unordered_map.cpp b/src/ripple/basics/impl/partitioned_unordered_map.cpp index 6fb2cbec1d4..3ced32eddff 100644 --- a/src/ripple/basics/impl/partitioned_unordered_map.cpp +++ b/src/ripple/basics/impl/partitioned_unordered_map.cpp @@ -31,7 +31,11 @@ namespace ripple { static std::size_t extract(uint256 const& key) { - return *reinterpret_cast(key.data()); + std::size_t result; + // Use memcpy to avoid unaligned UB + // (will optimize to equivalent code) + std::memcpy(&result, key.data(), sizeof(std::size_t)); + return result; } static std::size_t diff --git a/src/ripple/beast/hash/impl/xxhash.cpp b/src/ripple/beast/hash/impl/xxhash.cpp index 76d5e7997f5..4a6c85db815 100644 --- a/src/ripple/beast/hash/impl/xxhash.cpp +++ b/src/ripple/beast/hash/impl/xxhash.cpp @@ -33,6 +33,8 @@ You can contact the author at : #include +#include + //************************************** // Tuning parameters //************************************** @@ -87,7 +89,7 @@ You can contact the author at : //************************************** // Includes & Memory related functions //************************************** -//#include "xxhash.h" +// #include "xxhash.h" // Modify the local functions below should you wish to use some other memory // routines for malloc(), free() #include @@ -260,7 +262,13 @@ FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) { if (align == XXH_unaligned) - return endian == XXH_littleEndian ? A64(ptr) : XXH_swap64(A64(ptr)); + { + // Use memcpy to avoid unaligned UB + U64 tmp_aligned; + std::memcpy(&tmp_aligned, ptr, sizeof(U64)); + return endian == XXH_littleEndian ? tmp_aligned + : XXH_swap64(tmp_aligned); + } else return endian == XXH_littleEndian ? *(U64*)ptr : XXH_swap64(*(U64*)ptr); } From c9a586c2437bc8ffd22e946c82e1cbe906e1fc40 Mon Sep 17 00:00:00 2001 From: Scott Determan Date: Mon, 5 Jun 2023 18:50:03 -0400 Subject: [PATCH 168/508] Add missing includes for gcc 13.1: (#4555) gcc 13.1 failed to compile due to missing headers. This patch adds the needed headers. --- src/ripple/basics/StringUtilities.h | 2 ++ src/ripple/basics/base64.h | 1 + src/ripple/json/impl/json_reader.cpp | 2 ++ 3 files changed, 5 insertions(+) diff --git a/src/ripple/basics/StringUtilities.h b/src/ripple/basics/StringUtilities.h index 48de772ca41..8af81a37403 100644 --- a/src/ripple/basics/StringUtilities.h +++ b/src/ripple/basics/StringUtilities.h @@ -25,7 +25,9 @@ #include #include + #include +#include #include #include #include diff --git a/src/ripple/basics/base64.h b/src/ripple/basics/base64.h index ef34192d0b0..05a61133f83 100644 --- a/src/ripple/basics/base64.h +++ b/src/ripple/basics/base64.h @@ -57,6 +57,7 @@ #ifndef RIPPLE_BASICS_BASE64_H_INCLUDED #define RIPPLE_BASICS_BASE64_H_INCLUDED +#include #include namespace ripple { diff --git a/src/ripple/json/impl/json_reader.cpp b/src/ripple/json/impl/json_reader.cpp index 6686b38f49e..c92bea6d7e0 100644 --- a/src/ripple/json/impl/json_reader.cpp +++ b/src/ripple/json/impl/json_reader.cpp @@ -19,8 +19,10 @@ #include #include + #include #include +#include #include #include From 5644c8704fc2120509cfcb8c9c15f205d7344972 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Thu, 8 Jun 2023 17:31:19 -0700 Subject: [PATCH 169/508] Trivial: add comments for NFToken-related invariants (#4558) --- src/ripple/app/tx/impl/InvariantCheck.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/ripple/app/tx/impl/InvariantCheck.h b/src/ripple/app/tx/impl/InvariantCheck.h index c3bb0216426..fe17db44fa7 100644 --- a/src/ripple/app/tx/impl/InvariantCheck.h +++ b/src/ripple/app/tx/impl/InvariantCheck.h @@ -318,6 +318,17 @@ class ValidNewAccountRoot beast::Journal const&); }; +/** + * @brief Invariant: Validates several invariants for NFToken pages. + * + * The following checks are made: + * - The page is correctly associated with the owner. + * - The page is correctly ordered between the next and previous links. + * - The page contains at least one and no more than 32 NFTokens. + * - The NFTokens on this page do not belong on a lower or higher page. + * - The NFTokens are correctly sorted on the page. + * - Each URI, if present, is not empty. + */ class ValidNFTokenPage { bool badEntry_ = false; @@ -342,6 +353,19 @@ class ValidNFTokenPage beast::Journal const&); }; +/** + * @brief Invariant: Validates counts of NFTokens after all transaction types. + * + * The following checks are made: + * - The number of minted or burned NFTokens can only be changed by + * NFTokenMint or NFTokenBurn transactions. + * - A successful NFTokenMint must increase the number of NFTokens. + * - A failed NFTokenMint must not change the number of minted NFTokens. + * - An NFTokenMint transaction cannot change the number of burned NFTokens. + * - A successful NFTokenBurn must increase the number of burned NFTokens. + * - A failed NFTokenBurn must not change the number of burned NFTokens. + * - An NFTokenBurn transaction cannot change the number of minted NFTokens. + */ class NFTokenCountTracking { std::uint32_t beforeMintedTotal = 0; From 5d011c7e6b399b988ad193d199e6e3dba92e49ad Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Fri, 9 Jun 2023 16:37:18 +0000 Subject: [PATCH 170/508] fix node size estimation (#4536) Fix a bug in the `NODE_SIZE` auto-detection feature in `Config.cpp`. Specifically, this patch corrects the calculation for the total amount of RAM available, which was previously returned in bytes, but is now being returned in units of the system's memory unit. Additionally, the patch adjusts the node size based on the number of available hardware threads of execution. --- src/ripple/core/impl/Config.cpp | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/ripple/core/impl/Config.cpp b/src/ripple/core/impl/Config.cpp index ebeec9af36f..f835ca8df04 100644 --- a/src/ripple/core/impl/Config.cpp +++ b/src/ripple/core/impl/Config.cpp @@ -68,10 +68,8 @@ namespace detail { [[nodiscard]] std::uint64_t getMemorySize() { - struct sysinfo si; - - if (sysinfo(&si) == 0) - return static_cast(si.totalram); + if (struct sysinfo si; sysinfo(&si) == 0) + return static_cast(si.totalram) * si.mem_unit; return 0; } @@ -128,7 +126,7 @@ sizedItems {SizedItem::lgrDBCache, {{ 4, 8, 16, 32, 128 }}}, {SizedItem::openFinalLimit, {{ 8, 16, 32, 64, 128 }}}, {SizedItem::burstSize, {{ 4, 8, 16, 32, 48 }}}, - {SizedItem::ramSizeGB, {{ 8, 12, 16, 24, 32 }}}, + {SizedItem::ramSizeGB, {{ 6, 8, 12, 24, 0 }}}, {SizedItem::accountIdCacheSize, {{ 20047, 50053, 77081, 150061, 300007 }}} }}; @@ -265,7 +263,8 @@ getEnvVar(char const* name) } Config::Config() - : j_(beast::Journal::getNullSink()), ramSize_(detail::getMemorySize()) + : j_(beast::Journal::getNullSink()) + , ramSize_(detail::getMemorySize() / (1024 * 1024 * 1024)) { } @@ -290,22 +289,18 @@ Config::setupControl(bool bQuiet, bool bSilent, bool bStandalone) threshold.second.begin(), threshold.second.end(), [this](std::size_t limit) { - return (ramSize_ / (1024 * 1024 * 1024)) < limit; + return (limit == 0) || (ramSize_ < limit); }); + assert(ns != threshold.second.end()); + if (ns != threshold.second.end()) NODE_SIZE = std::distance(threshold.second.begin(), ns); // Adjust the size based on the number of hardware threads of // execution available to us: - if (auto const hc = std::thread::hardware_concurrency()) - { - if (hc == 1) - NODE_SIZE = 0; - - if (hc < 4) - NODE_SIZE = std::min(NODE_SIZE, 1); - } + if (auto const hc = std::thread::hardware_concurrency(); hc != 0) + NODE_SIZE = std::min(hc / 2, NODE_SIZE); } assert(NODE_SIZE <= 4); From 77c0a62a7472dbc4dc9ed97d3141840a3acb5ce8 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Fri, 9 Jun 2023 17:33:28 -0700 Subject: [PATCH 171/508] fix: remove redundant moves (#4565) - Resolve gcc compiler warning: AccountObjects.cpp:182:47: warning: redundant move in initialization [-Wredundant-move] - The std::move() operation on trivially copyable types may generate a compile warning in newer versions of gcc. - Remove extraneous header (unused imports) from a unit test file. --- src/ripple/rpc/handlers/AccountObjects.cpp | 5 ++--- src/test/rpc/ReportingETL_test.cpp | 1 - 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/ripple/rpc/handlers/AccountObjects.cpp b/src/ripple/rpc/handlers/AccountObjects.cpp index e8304c670de..65cd12f2d41 100644 --- a/src/ripple/rpc/handlers/AccountObjects.cpp +++ b/src/ripple/rpc/handlers/AccountObjects.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -66,7 +65,7 @@ doAccountNFTs(RPC::JsonContext& context) RPC::inject_error(rpcACT_MALFORMED, result); return result; } - auto const accountID{std::move(id.value())}; + auto const accountID{id.value()}; if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); @@ -179,7 +178,7 @@ doAccountObjects(RPC::JsonContext& context) RPC::inject_error(rpcACT_MALFORMED, result); return result; } - auto const accountID{std::move(id.value())}; + auto const accountID{id.value()}; if (!ledger->exists(keylet::account(accountID))) return rpcError(rpcACT_NOT_FOUND); diff --git a/src/test/rpc/ReportingETL_test.cpp b/src/test/rpc/ReportingETL_test.cpp index 77284dd776d..ed055d0fd93 100644 --- a/src/test/rpc/ReportingETL_test.cpp +++ b/src/test/rpc/ReportingETL_test.cpp @@ -18,7 +18,6 @@ */ //============================================================================== -#include #include #include #include From beb0904a322bcaef777bedfe17c92c2d2f56e916 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Fri, 9 Jun 2023 09:54:02 -0700 Subject: [PATCH 172/508] Set version to 1.11.0-rc3 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 430f805f7e2..031ca744701 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.11.0-rc2" +char const* const versionString = "1.11.0-rc3" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 264280edd79b7f764536e02459f33f66a59c0531 Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Tue, 20 Jun 2023 11:40:11 -0700 Subject: [PATCH 173/508] Set version to 1.11.0 * Add release notes --- RELEASENOTES.md | 124 +++++++++++++++++++++++++ src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 2 files changed, 125 insertions(+), 1 deletion(-) diff --git a/RELEASENOTES.md b/RELEASENOTES.md index 08ceb5b3dd4..e83855b79db 100644 --- a/RELEASENOTES.md +++ b/RELEASENOTES.md @@ -8,6 +8,130 @@ This document contains the release notes for `rippled`, the reference server imp Have new ideas? Need help with setting up your node? [Please open an issue here](https://github.com/xrplf/rippled/issues/new/choose). +# Introducing XRP Ledger version 1.11.0 + +Version 1.11.0 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. + +This release reduces memory usage, introduces the `fixNFTokenRemint` amendment, and adds new features and bug fixes. For example, the new NetworkID field in transactions helps to prevent replay attacks with side-chains. + +[Sign Up for Future Release Announcements](https://groups.google.com/g/ripple-server) + + + +## Action Required + +The `fixNFTokenRemint` amendment is now open for voting according to the XRP Ledger's [amendment process](https://xrpl.org/amendments.html), which enables protocol changes following two weeks of >80% support from trusted validators. + +If you operate an XRP Ledger server, upgrade to version 1.11.0 by July 5 to ensure service continuity. The exact time that protocol changes take effect depends on the voting decisions of the decentralized network. + + +## Install / Upgrade + +On supported platforms, see the [instructions on installing or updating `rippled`](https://xrpl.org/install-rippled.html). + + +## What's Changed + +### New Features and Improvements + +* Allow port numbers be be specified using a either a colon or a space by @RichardAH in https://github.com/XRPLF/rippled/pull/4328 +* Eliminate memory allocation from critical path: by @nbougalis in https://github.com/XRPLF/rippled/pull/4353 +* Make it easy for projects to depend on libxrpl by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4449 +* Add the ability to mark amendments as obsolete by @ximinez in https://github.com/XRPLF/rippled/pull/4291 +* Always create the FeeSettings object in genesis ledger by @ximinez in https://github.com/XRPLF/rippled/pull/4319 +* Log exception messages in several locations by @drlongle in https://github.com/XRPLF/rippled/pull/4400 +* Parse flags in account_info method by @drlongle in https://github.com/XRPLF/rippled/pull/4459 +* Add NFTokenPages to account_objects RPC by @RichardAH in https://github.com/XRPLF/rippled/pull/4352 +* add jss fields used by clio `nft_info` by @ledhed2222 in https://github.com/XRPLF/rippled/pull/4320 +* Introduce a slab-based memory allocator and optimize SHAMapItem by @nbougalis in https://github.com/XRPLF/rippled/pull/4218 +* Add NetworkID field to transactions to help prevent replay attacks on and from side-chains by @RichardAH in https://github.com/XRPLF/rippled/pull/4370 +* If present, set quorum based on command line. by @mtrippled in https://github.com/XRPLF/rippled/pull/4489 +* API does not accept seed or public key for account by @drlongle in https://github.com/XRPLF/rippled/pull/4404 +* Add `nftoken_id`, `nftoken_ids` and `offer_id` meta fields into NFT `Tx` responses by @shawnxie999 in https://github.com/XRPLF/rippled/pull/4447 + +### Bug Fixes + +* fix(gateway_balances): handle overflow exception by @RichardAH in https://github.com/XRPLF/rippled/pull/4355 +* fix(ValidatorSite): handle rare null pointer dereference in timeout by @ximinez in https://github.com/XRPLF/rippled/pull/4420 +* RPC commands understand markers derived from all ledger object types by @ximinez in https://github.com/XRPLF/rippled/pull/4361 +* `fixNFTokenRemint`: prevent NFT re-mint: by @shawnxie999 in https://github.com/XRPLF/rippled/pull/4406 +* Fix a case where ripple::Expected returned a json array, not a value by @scottschurr in https://github.com/XRPLF/rippled/pull/4401 +* fix: Ledger data returns an empty list (instead of null) when all entries are filtered out by @drlongle in https://github.com/XRPLF/rippled/pull/4398 +* Fix unit test ripple.app.LedgerData by @drlongle in https://github.com/XRPLF/rippled/pull/4484 +* Fix the fix for std::result_of by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4496 +* Fix errors for Clang 16 by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4501 +* Ensure that switchover vars are initialized before use: by @seelabs in https://github.com/XRPLF/rippled/pull/4527 +* Move faulty assert by @ximinez in https://github.com/XRPLF/rippled/pull/4533 +* Fix unaligned load and stores: (#4528) by @seelabs in https://github.com/XRPLF/rippled/pull/4531 +* fix node size estimation by @dangell7 in https://github.com/XRPLF/rippled/pull/4536 +* fix: remove redundant moves by @ckeshava in https://github.com/XRPLF/rippled/pull/4565 + +### Code Cleanup and Testing + +* Replace compare() with the three-way comparison operator in base_uint, Issue and Book by @drlongle in https://github.com/XRPLF/rippled/pull/4411 +* Rectify the import paths of boost::function_output_iterator by @ckeshava in https://github.com/XRPLF/rippled/pull/4293 +* Expand Linux test matrix by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4454 +* Add patched recipe for SOCI by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4510 +* Switch to self-hosted runners for macOS by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4511 +* [TRIVIAL] Add missing includes by @seelabs in https://github.com/XRPLF/rippled/pull/4555 + +### Docs + +* Refactor build instructions by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4381 +* Add install instructions for package managers by @thejohnfreeman in https://github.com/XRPLF/rippled/pull/4472 +* Fix typo by @solmsted in https://github.com/XRPLF/rippled/pull/4508 +* Update environment.md by @sappenin in https://github.com/XRPLF/rippled/pull/4498 +* Update BUILD.md by @oeggert in https://github.com/XRPLF/rippled/pull/4514 +* Trivial: add comments for NFToken-related invariants by @scottschurr in https://github.com/XRPLF/rippled/pull/4558 + +## New Contributors +* @drlongle made their first contribution in https://github.com/XRPLF/rippled/pull/4411 +* @ckeshava made their first contribution in https://github.com/XRPLF/rippled/pull/4293 +* @solmsted made their first contribution in https://github.com/XRPLF/rippled/pull/4508 +* @sappenin made their first contribution in https://github.com/XRPLF/rippled/pull/4498 +* @oeggert made their first contribution in https://github.com/XRPLF/rippled/pull/4514 + +**Full Changelog**: https://github.com/XRPLF/rippled/compare/1.10.1...1.11.0 + + +### GitHub + +The public source code repository for `rippled` is hosted on GitHub at . + +We welcome all contributions and invite everyone to join the community of XRP Ledger developers to help build the Internet of Value. + +### Credits + +The following people contributed directly to this release: +- Alloy Networks <45832257+alloynetworks@users.noreply.github.com> +- Brandon Wilson +- Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> +- David Fuelling +- Denis Angell +- Ed Hennis +- Elliot Lee +- John Freeman +- Mark Travis +- Nik Bougalis +- RichardAH +- Scott Determan +- Scott Schurr +- Shawn Xie <35279399+shawnxie999@users.noreply.github.com> +- drlongle +- ledhed2222 +- oeggert <117319296+oeggert@users.noreply.github.com> +- solmsted + + +Bug Bounties and Responsible Disclosures: +We welcome reviews of the rippled code and urge researchers to +responsibly disclose any issues they may find. + +To report a bug, please send a detailed report to: + + bugs@xrpl.org + + # Introducing XRP Ledger version 1.10.1 Version 1.10.1 of `rippled`, the reference server implementation of the XRP Ledger protocol, is now available. This release restores packages for Ubuntu 18.04. diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 031ca744701..734773fa03e 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.11.0-rc3" +char const* const versionString = "1.11.0" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 71d7d67fa39af3af5f36e136a7b6e5ddced08e49 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 21 Jun 2023 11:51:37 -0700 Subject: [PATCH 174/508] Enable the Beta RPC API (v2) for all unit tests: (#4573) * Enable api_version 2, which is currently in beta. It is expected to be marked stable by the next stable release. * This does not change any defaults. * The only existing tests changed were one that set the same flag, which was now redundant, and a couple that tested versioning explicitly. --- src/test/jtx/impl/envconfig.cpp | 3 +++ src/test/rpc/AccountInfo_test.cpp | 5 +---- src/test/rpc/Version_test.cpp | 16 +++++++++++++--- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/src/test/jtx/impl/envconfig.cpp b/src/test/jtx/impl/envconfig.cpp index 77c43f5e40a..7f8163f5ee7 100644 --- a/src/test/jtx/impl/envconfig.cpp +++ b/src/test/jtx/impl/envconfig.cpp @@ -49,6 +49,9 @@ setupConfigForUnitTests(Config& cfg) cfg.FEES.account_reserve = XRP(200).value().xrp().drops(); cfg.FEES.owner_reserve = XRP(50).value().xrp().drops(); + // The Beta API (currently v2) is always available to tests + cfg.BETA_RPC_API = true; + cfg.overwrite(ConfigSection::nodeDatabase(), "type", "memory"); cfg.overwrite(ConfigSection::nodeDatabase(), "path", "main"); cfg.deprecatedClearSection(ConfigSection::importNodeDatabase()); diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 6ec4740bac2..b8e479225d1 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -206,10 +206,7 @@ class AccountInfo_test : public beast::unit_test::suite testSignerListsApiVersion2() { using namespace jtx; - Env env{*this, envconfig([](std::unique_ptr c) { - c->loadFromString("\n[beta_rpc_api]\n1\n"); - return c; - })}; + Env env{*this}; Account const alice{"alice"}; env.fund(XRP(1000), alice); diff --git a/src/test/rpc/Version_test.cpp b/src/test/rpc/Version_test.cpp index 360b29664a1..60ffd30fcf6 100644 --- a/src/test/rpc/Version_test.cpp +++ b/src/test/rpc/Version_test.cpp @@ -76,11 +76,16 @@ class Version_test : public beast::unit_test::suite std::to_string(RPC::apiMinimumSupportedVersion - 1) + "}"); BEAST_EXPECT(badVersion(re)); + BEAST_EXPECT(env.app().config().BETA_RPC_API); re = env.rpc( "json", "version", "{\"api_version\": " + - std::to_string(RPC::apiMaximumSupportedVersion + 1) + "}"); + std::to_string( + std::max( + RPC::apiMaximumSupportedVersion, RPC::apiBetaVersion) + + 1) + + "}"); BEAST_EXPECT(badVersion(re)); re = env.rpc("json", "version", "{\"api_version\": \"a\"}"); @@ -190,20 +195,25 @@ class Version_test : public beast::unit_test::suite using namespace test::jtx; Env env{*this}; + BEAST_EXPECT(env.app().config().BETA_RPC_API); auto const without_api_verion = std::string("{ ") + "\"jsonrpc\": \"2.0\", " "\"ripplerpc\": \"2.0\", " "\"id\": 5, " "\"method\": \"version\", " "\"params\": {}}"; - auto const with_wrong_api_verion = std::string("{ ") + + auto const with_wrong_api_verion = + std::string("{ ") + "\"jsonrpc\": \"2.0\", " "\"ripplerpc\": \"2.0\", " "\"id\": 6, " "\"method\": \"version\", " "\"params\": { " "\"api_version\": " + - std::to_string(RPC::apiMaximumSupportedVersion + 1) + "}}"; + std::to_string( + std::max(RPC::apiMaximumSupportedVersion, RPC::apiBetaVersion) + + 1) + + "}}"; auto re = env.rpc( "json2", '[' + without_api_verion + ", " + with_wrong_api_verion + ']'); From 724a3015998bfc7b301fd0bd285f27e6f9e1b1b9 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Fri, 23 Jun 2023 08:20:25 +0300 Subject: [PATCH 175/508] `fixReducedOffersV1`: prevent offers from blocking order books: (#4512) Curtail the occurrence of order books that are blocked by reduced offers with the implementation of the fixReducedOffersV1 amendment. This commit identifies three ways in which offers can be reduced: 1. A new offer can be partially crossed by existing offers, so the new offer is reduced when placed in the ledger. 2. An in-ledger offer can be partially crossed by a new offer in a transaction. So the in-ledger offer is reduced by the new offer. 3. An in-ledger offer may be under-funded. In this case the in-ledger offer is scaled down to match the available funds. Reduced offers can block order books if the effective quality of the reduced offer is worse than the quality of the original offer (from the perspective of the taker). It turns out that, for small values, the quality of the reduced offer can be significantly affected by the rounding mode used during scaling computations. This commit adjusts some rounding modes so that the quality of a reduced offer is always at least as good (from the taker's perspective) as the original offer. The amendment is titled fixReducedOffersV1 because additional ways of producing reduced offers may come to light. Therefore, there may be a future need for a V2 amendment. --- Builds/CMake/RippledCore.cmake | 1 + src/ripple/app/paths/impl/BookStep.cpp | 30 +- src/ripple/app/tx/impl/CreateOffer.cpp | 18 +- src/ripple/app/tx/impl/OfferStream.cpp | 18 +- src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/Quality.h | 23 + src/ripple/protocol/STAmount.h | 19 +- src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/Quality.cpp | 29 +- src/ripple/protocol/impl/STAmount.cpp | 189 +++++++- src/test/app/Offer_test.cpp | 13 +- src/test/app/ReducedOffer_test.cpp | 622 +++++++++++++++++++++++++ 12 files changed, 938 insertions(+), 28 deletions(-) create mode 100644 src/test/app/ReducedOffer_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index db7757f9c2f..a853a6cff53 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -721,6 +721,7 @@ if (tests) src/test/app/PseudoTx_test.cpp src/test/app/RCLCensorshipDetector_test.cpp src/test/app/RCLValidations_test.cpp + src/test/app/ReducedOffer_test.cpp src/test/app/Regression_test.cpp src/test/app/SHAMapStore_test.cpp src/test/app/SetAuth_test.cpp diff --git a/src/ripple/app/paths/impl/BookStep.cpp b/src/ripple/app/paths/impl/BookStep.cpp index a6b2c59611e..555d90fac8c 100644 --- a/src/ripple/app/paths/impl/BookStep.cpp +++ b/src/ripple/app/paths/impl/BookStep.cpp @@ -531,14 +531,22 @@ limitStepOut( TOut& ownerGives, std::uint32_t transferRateIn, std::uint32_t transferRateOut, - TOut const& limit) + TOut const& limit, + Rules const& rules) { if (limit < stpAmt.out) { stpAmt.out = limit; ownerGives = mulRatio( stpAmt.out, transferRateOut, QUALITY_ONE, /*roundUp*/ false); - ofrAmt = ofrQ.ceil_out(ofrAmt, stpAmt.out); + if (rules.enabled(fixReducedOffersV1)) + // It turns out that the ceil_out implementation has some slop in + // it. ceil_out_strict removes that slop. But removing that slop + // affects transaction outcomes, so the change must be made using + // an amendment. + ofrAmt = ofrQ.ceil_out_strict(ofrAmt, stpAmt.out, /*roundUp*/ true); + else + ofrAmt = ofrQ.ceil_out(ofrAmt, stpAmt.out); stpAmt.in = mulRatio(ofrAmt.in, transferRateIn, QUALITY_ONE, /*roundUp*/ true); } @@ -577,6 +585,7 @@ BookStep::forEachOffer( sb, afView, book_, sb.parentCloseTime(), counter, j_); bool const flowCross = afView.rules().enabled(featureFlowCross); + bool const fixReduced = afView.rules().enabled(fixReducedOffersV1); bool offerAttempted = false; std::optional ofrQ; while (offers.step()) @@ -654,7 +663,16 @@ BookStep::forEachOffer( ownerGives = funds; stpAmt.out = mulRatio( ownerGives, QUALITY_ONE, ofrOutRate, /*roundUp*/ false); - ofrAmt = ofrQ->ceil_out(ofrAmt, stpAmt.out); + + // It turns out we can prevent order book blocking by (strictly) + // rounding down the ceil_out() result. This adjustment changes + // transaction outcomes, so it must be made under an amendment. + if (fixReduced) + ofrAmt = ofrQ->ceil_out_strict( + ofrAmt, stpAmt.out, /* roundUp */ false); + else + ofrAmt = ofrQ->ceil_out(ofrAmt, stpAmt.out); + stpAmt.in = mulRatio(ofrAmt.in, ofrInRate, QUALITY_ONE, /*roundUp*/ true); } @@ -770,7 +788,8 @@ BookStep::revImp( ownerGivesAdj, transferRateIn, transferRateOut, - remainingOut); + remainingOut, + afView.rules()); remainingOut = beast::zero; savedIns.insert(stpAdjAmt.in); savedOuts.insert(remainingOut); @@ -922,7 +941,8 @@ BookStep::fwdImp( ownerGivesAdjRev, transferRateIn, transferRateOut, - remainingOut); + remainingOut, + afView.rules()); if (stpAdjAmtRev.in == remainingIn) { diff --git a/src/ripple/app/tx/impl/CreateOffer.cpp b/src/ripple/app/tx/impl/CreateOffer.cpp index 4f1d9108bca..dd01a64b5f2 100644 --- a/src/ripple/app/tx/impl/CreateOffer.cpp +++ b/src/ripple/app/tx/impl/CreateOffer.cpp @@ -824,8 +824,22 @@ CreateOffer::flowCross( // what is a good threshold to check? afterCross.in.clear(); - afterCross.out = divRound( - afterCross.in, rate, takerAmount.out.issue(), true); + afterCross.out = [&]() { + // Careful analysis showed that rounding up this + // divRound result could lead to placing a reduced + // offer in the ledger that blocks order books. So + // the fixReducedOffersV1 amendment changes the + // behavior to round down instead. + if (psb.rules().enabled(fixReducedOffersV1)) + return divRoundStrict( + afterCross.in, + rate, + takerAmount.out.issue(), + false); + + return divRound( + afterCross.in, rate, takerAmount.out.issue(), true); + }(); } else { diff --git a/src/ripple/app/tx/impl/OfferStream.cpp b/src/ripple/app/tx/impl/OfferStream.cpp index 58fd209ca0b..5933d9c3838 100644 --- a/src/ripple/app/tx/impl/OfferStream.cpp +++ b/src/ripple/app/tx/impl/OfferStream.cpp @@ -182,17 +182,33 @@ TOfferStreamBase::shouldRmSmallIncreasedQOffer() const } TTakerGets const ownerFunds = toAmount(*ownerFunds_); + bool const fixReduced = view_.rules().enabled(fixReducedOffersV1); auto const effectiveAmounts = [&] { if (offer_.owner() != offer_.issueOut().account && ownerFunds < ofrAmts.out) { - // adjust the amounts by owner funds + // adjust the amounts by owner funds. + // + // It turns out we can prevent order book blocking by rounding down + // the ceil_out() result. This adjustment changes transaction + // results, so it must be made under an amendment. + if (fixReduced) + return offer_.quality().ceil_out_strict( + ofrAmts, ownerFunds, /* roundUp */ false); + return offer_.quality().ceil_out(ofrAmts, ownerFunds); } return ofrAmts; }(); + // If either the effective in or out are zero then remove the offer. + // This can happen with fixReducedOffersV1 since it rounds down. + if (fixReduced && + (effectiveAmounts.in.signum() <= 0 || + effectiveAmounts.out.signum() <= 0)) + return true; + if (effectiveAmounts.in > TTakerPays::minPositiveAmount()) return false; diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index e4b0e3d4acd..48198e38315 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 58; +static constexpr std::size_t numFeatures = 59; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -345,6 +345,7 @@ extern uint256 const featureXRPFees; extern uint256 const fixUniversalNumber; extern uint256 const fixNonFungibleTokensV1_2; extern uint256 const fixNFTokenRemint; +extern uint256 const fixReducedOffersV1; } // namespace ripple diff --git a/src/ripple/protocol/Quality.h b/src/ripple/protocol/Quality.h index 9de137d8770..840d8d444e1 100644 --- a/src/ripple/protocol/Quality.h +++ b/src/ripple/protocol/Quality.h @@ -223,6 +223,29 @@ class Quality toAmount(stRes.in), toAmount(stRes.out)); } + Amounts + ceil_out_strict(Amounts const& amount, STAmount const& limit, bool roundUp) + const; + + template + TAmounts + ceil_out_strict( + TAmounts const& amount, + Out const& limit, + bool roundUp) const + { + if (amount.out <= limit) + return amount; + + // Use the existing STAmount implementation for now, but consider + // replacing with code specific to IOUAMount and XRPAmount + Amounts stAmt(toSTAmount(amount.in), toSTAmount(amount.out)); + STAmount stLim(toSTAmount(limit)); + auto const stRes = ceil_out_strict(stAmt, stLim, roundUp); + return TAmounts( + toAmount(stRes.in), toAmount(stRes.out)); + } + /** Returns `true` if lhs is lower quality than `rhs`. Lower quality means the taker receives a worse deal. Higher quality is better for the taker. diff --git a/src/ripple/protocol/STAmount.h b/src/ripple/protocol/STAmount.h index f04b6bb0e50..63f97bb48fe 100644 --- a/src/ripple/protocol/STAmount.h +++ b/src/ripple/protocol/STAmount.h @@ -503,7 +503,7 @@ divide(STAmount const& v1, STAmount const& v2, Issue const& issue); STAmount multiply(STAmount const& v1, STAmount const& v2, Issue const& issue); -// multiply, or divide rounding result in specified direction +// multiply rounding result in specified direction STAmount mulRound( STAmount const& v1, @@ -511,6 +511,15 @@ mulRound( Issue const& issue, bool roundUp); +// multiply following the rounding directions more precisely. +STAmount +mulRoundStrict( + STAmount const& v1, + STAmount const& v2, + Issue const& issue, + bool roundUp); + +// divide rounding result in specified direction STAmount divRound( STAmount const& v1, @@ -518,6 +527,14 @@ divRound( Issue const& issue, bool roundUp); +// divide following the rounding directions more precisely. +STAmount +divRoundStrict( + STAmount const& v1, + STAmount const& v2, + Issue const& issue, + bool roundUp); + // Someone is offering X for Y, what is the rate? // Rate: smaller is better, the taker wants the most out: in/out // VFALCO TODO Return a Quality object diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 6b8e7719d00..09923109671 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -452,6 +452,7 @@ REGISTER_FEATURE(XRPFees, Supported::yes, VoteBehavior::De REGISTER_FIX (fixUniversalNumber, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FIX (fixNonFungibleTokensV1_2, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FIX (fixNFTokenRemint, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FIX (fixReducedOffersV1, Supported::yes, VoteBehavior::DefaultNo); // The following amendments are obsolete, but must remain supported // because they could potentially get enabled. diff --git a/src/ripple/protocol/impl/Quality.cpp b/src/ripple/protocol/impl/Quality.cpp index 97e1b8a9fd7..f7b9d6b3c41 100644 --- a/src/ripple/protocol/impl/Quality.cpp +++ b/src/ripple/protocol/impl/Quality.cpp @@ -81,12 +81,20 @@ Quality::ceil_in(Amounts const& amount, STAmount const& limit) const return amount; } -Amounts -Quality::ceil_out(Amounts const& amount, STAmount const& limit) const +template +static Amounts +ceil_out_impl( + Amounts const& amount, + STAmount const& limit, + bool roundUp, + Quality const& quality) { if (amount.out > limit) { - Amounts result(mulRound(limit, rate(), amount.in.issue(), true), limit); + Amounts result( + MulRoundFunc(limit, quality.rate(), amount.in.issue(), roundUp), + limit); // Clamp in if (result.in > amount.in) result.in = amount.in; @@ -97,6 +105,21 @@ Quality::ceil_out(Amounts const& amount, STAmount const& limit) const return amount; } +Amounts +Quality::ceil_out(Amounts const& amount, STAmount const& limit) const +{ + return ceil_out_impl(amount, limit, /* roundUp */ true, *this); +} + +Amounts +Quality::ceil_out_strict( + Amounts const& amount, + STAmount const& limit, + bool roundUp) const +{ + return ceil_out_impl(amount, limit, roundUp, *this); +} + Quality composed_quality(Quality const& lhs, Quality const& rhs) { diff --git a/src/ripple/protocol/impl/STAmount.cpp b/src/ripple/protocol/impl/STAmount.cpp index 02e3345944e..90a646787ae 100644 --- a/src/ripple/protocol/impl/STAmount.cpp +++ b/src/ripple/protocol/impl/STAmount.cpp @@ -1266,8 +1266,28 @@ multiply(STAmount const& v1, STAmount const& v2, Issue const& issue) v1.negative() != v2.negative()); } +// This is the legacy version of canonicalizeRound. It's been in use +// for years, so it is deeply embedded in the behavior of cross-currency +// transactions. +// +// However in 2022 it was noticed that the rounding characteristics were +// surprising. When the code converts from IOU-like to XRP-like there may +// be a fraction of the IOU-like representation that is too small to be +// represented in drops. `canonicalizeRound()` currently does some unusual +// rounding. +// +// 1. If the fractional part is greater than or equal to 0.1, then the +// number of drops is rounded up. +// +// 2. However, if the fractional part is less than 0.1 (for example, +// 0.099999), then the number of drops is rounded down. +// +// The XRP Ledger has this rounding behavior baked in. But there are +// situations where this rounding behavior led to undesirable outcomes. +// So an alternative rounding approach was introduced. You'll see that +// alternative below. static void -canonicalizeRound(bool native, std::uint64_t& value, int& offset) +canonicalizeRound(bool native, std::uint64_t& value, int& offset, bool) { if (native) { @@ -1301,8 +1321,100 @@ canonicalizeRound(bool native, std::uint64_t& value, int& offset) } } -STAmount -mulRound( +// The original canonicalizeRound did not allow the rounding direction to +// be specified. It also ignored some of the bits that could contribute to +// rounding decisions. canonicalizeRoundStrict() tracks all of the bits in +// the value being rounded. +static void +canonicalizeRoundStrict( + bool native, + std::uint64_t& value, + int& offset, + bool roundUp) +{ + if (native) + { + if (offset < 0) + { + bool hadRemainder = false; + + while (offset < -1) + { + // It would be better to use std::lldiv than to separately + // compute the remainder. But std::lldiv does not support + // unsigned arguments. + std::uint64_t const newValue = value / 10; + hadRemainder |= (value != (newValue * 10)); + value = newValue; + ++offset; + } + value += + (hadRemainder && roundUp) ? 10 : 9; // Add before last divide + value /= 10; + ++offset; + } + } + else if (value > STAmount::cMaxValue) + { + while (value > (10 * STAmount::cMaxValue)) + { + value /= 10; + ++offset; + } + value += 9; // add before last divide + value /= 10; + ++offset; + } +} + +namespace { + +// saveNumberRoundMode doesn't do quite enough for us. What we want is a +// Number::RoundModeGuard that sets the new mode and restores the old mode +// when it leaves scope. Since Number doesn't have that facility, we'll +// build it here. +class NumberRoundModeGuard +{ + saveNumberRoundMode saved_; + +public: + explicit NumberRoundModeGuard(Number::rounding_mode mode) noexcept + : saved_{Number::setround(mode)} + { + } + + NumberRoundModeGuard(NumberRoundModeGuard const&) = delete; + + NumberRoundModeGuard& + operator=(NumberRoundModeGuard const&) = delete; +}; + +// We need a class that has an interface similar to NumberRoundModeGuard +// but does nothing. +class DontAffectNumberRoundMode +{ +public: + explicit DontAffectNumberRoundMode(Number::rounding_mode mode) noexcept + { + } + + DontAffectNumberRoundMode(DontAffectNumberRoundMode const&) = delete; + + DontAffectNumberRoundMode& + operator=(DontAffectNumberRoundMode const&) = delete; +}; + +} // anonymous namespace + +// Pass the canonicalizeRound function pointer as a template parameter. +// +// We might need to use NumberRoundModeGuard. Allow the caller +// to pass either that or a replacement as a template parameter. +template < + void (*CanonicalizeFunc)(bool, std::uint64_t&, int&, bool), + typename MightSaveRound> +static STAmount +mulRoundImpl( STAmount const& v1, STAmount const& v2, Issue const& issue, @@ -1365,8 +1477,15 @@ mulRound( int offset = offset1 + offset2 + 14; if (resultNegative != roundUp) - canonicalizeRound(xrp, amount, offset); - STAmount result(issue, amount, offset, resultNegative); + { + CanonicalizeFunc(xrp, amount, offset, roundUp); + } + STAmount result = [&]() { + // If appropriate, tell Number to round down. This gives the desired + // result from STAmount::canonicalize. + MightSaveRound const savedRound(Number::towards_zero); + return STAmount(issue, amount, offset, resultNegative); + }(); if (roundUp && !resultNegative && !result) { @@ -1388,7 +1507,32 @@ mulRound( } STAmount -divRound( +mulRound( + STAmount const& v1, + STAmount const& v2, + Issue const& issue, + bool roundUp) +{ + return mulRoundImpl( + v1, v2, issue, roundUp); +} + +STAmount +mulRoundStrict( + STAmount const& v1, + STAmount const& v2, + Issue const& issue, + bool roundUp) +{ + return mulRoundImpl( + v1, v2, issue, roundUp); +} + +// We might need to use NumberRoundModeGuard. Allow the caller +// to pass either that or a replacement as a template parameter. +template +static STAmount +divRoundImpl( STAmount const& num, STAmount const& den, Issue const& issue, @@ -1437,9 +1581,18 @@ divRound( int offset = numOffset - denOffset - 17; if (resultNegative != roundUp) - canonicalizeRound(isXRP(issue), amount, offset); + canonicalizeRound(isXRP(issue), amount, offset, roundUp); + + STAmount result = [&]() { + // If appropriate, tell Number the rounding mode we are using. + // Note that "roundUp == true" actually means "round away from zero". + // Otherwise round toward zero. + using enum Number::rounding_mode; + MightSaveRound const savedRound( + roundUp ^ resultNegative ? upward : downward); + return STAmount(issue, amount, offset, resultNegative); + }(); - STAmount result(issue, amount, offset, resultNegative); if (roundUp && !resultNegative && !result) { if (isXRP(issue)) @@ -1459,4 +1612,24 @@ divRound( return result; } +STAmount +divRound( + STAmount const& num, + STAmount const& den, + Issue const& issue, + bool roundUp) +{ + return divRoundImpl(num, den, issue, roundUp); +} + +STAmount +divRoundStrict( + STAmount const& num, + STAmount const& den, + Issue const& issue, + bool roundUp) +{ + return divRoundImpl(num, den, issue, roundUp); +} + } // namespace ripple diff --git a/src/test/app/Offer_test.cpp b/src/test/app/Offer_test.cpp index 0d1a4326440..9d97fb692a6 100644 --- a/src/test/app/Offer_test.cpp +++ b/src/test/app/Offer_test.cpp @@ -2126,18 +2126,17 @@ class Offer_test : public beast::unit_test::suite BEAST_EXPECT( jrr[jss::node][sfBalance.fieldName][jss::value] == "49.96666666666667"); + jrr = ledgerEntryState(env, bob, gw, "USD"); - if (NumberSwitchOver) + Json::Value const bobsUSD = + jrr[jss::node][sfBalance.fieldName][jss::value]; + if (!NumberSwitchOver) { - BEAST_EXPECT( - jrr[jss::node][sfBalance.fieldName][jss::value] == - "-0.9665000000333333"); + BEAST_EXPECT(bobsUSD == "-0.966500000033334"); } else { - BEAST_EXPECT( - jrr[jss::node][sfBalance.fieldName][jss::value] == - "-0.966500000033334"); + BEAST_EXPECT(bobsUSD == "-0.9665000000333333"); } } } diff --git a/src/test/app/ReducedOffer_test.cpp b/src/test/app/ReducedOffer_test.cpp new file mode 100644 index 00000000000..f82efcb7fc8 --- /dev/null +++ b/src/test/app/ReducedOffer_test.cpp @@ -0,0 +1,622 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2022 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include + +namespace ripple { +namespace test { + +class ReducedOffer_test : public beast::unit_test::suite +{ + static auto + ledgerEntryOffer( + jtx::Env& env, + jtx::Account const& acct, + std::uint32_t offer_seq) + { + Json::Value jvParams; + jvParams[jss::offer][jss::account] = acct.human(); + jvParams[jss::offer][jss::seq] = offer_seq; + return env.rpc( + "json", "ledger_entry", to_string(jvParams))[jss::result]; + } + + static bool + offerInLedger( + jtx::Env& env, + jtx::Account const& acct, + std::uint32_t offerSeq) + { + Json::Value ledgerOffer = ledgerEntryOffer(env, acct, offerSeq); + return !( + ledgerOffer.isMember(jss::error) && + ledgerOffer[jss::error].asString() == "entryNotFound"); + } + + // Common code to clean up unneeded offers. + static void + cleanupOldOffers( + jtx::Env& env, + jtx::Account const& acct1, + jtx::Account const& acct2, + std::uint32_t acct1OfferSeq, + std::uint32_t acct2OfferSeq) + { + env(offer_cancel(acct1, acct1OfferSeq)); + env(offer_cancel(acct2, acct2OfferSeq)); + env.close(); + } + +public: + void + testPartialCrossNewXrpIouQChange() + { + testcase("exercise partial cross new XRP/IOU offer Q change"); + + using namespace jtx; + + auto const gw = Account{"gateway"}; + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + auto const USD = gw["USD"]; + + // Make one test run without fixReducedOffersV1 and one with. + for (FeatureBitset features : + {supported_amendments() - fixReducedOffersV1, + supported_amendments() | fixReducedOffersV1}) + { + Env env{*this, features}; + + // Make sure none of the offers we generate are under funded. + env.fund(XRP(10'000'000), gw, alice, bob); + env.close(); + + env(trust(alice, USD(10'000'000))); + env(trust(bob, USD(10'000'000))); + env.close(); + + env(pay(gw, bob, USD(10'000'000))); + env.close(); + + // Lambda that: + // 1. Exercises one offer pair, + // 2. Collects the results, and + // 3. Cleans up for the next offer pair. + // Returns 1 if the crossed offer has a bad rate for the book. + auto exerciseOfferPair = + [this, &env, &alice, &bob]( + Amounts const& inLedger, + Amounts const& newOffer) -> unsigned int { + // Put inLedger offer in the ledger so newOffer can cross it. + std::uint32_t const aliceOfferSeq = env.seq(alice); + env(offer(alice, inLedger.in, inLedger.out)); + env.close(); + + // Now alice's offer will partially cross bob's offer. + STAmount const initialRate = Quality(newOffer).rate(); + std::uint32_t const bobOfferSeq = env.seq(bob); + STAmount const bobInitialBalance = env.balance(bob); + STAmount const bobsFee = drops(10); + env(offer(bob, newOffer.in, newOffer.out, tfSell), + fee(bobsFee)); + env.close(); + STAmount const bobFinalBalance = env.balance(bob); + + // alice's offer should be fully crossed and so gone from + // the ledger. + if (!BEAST_EXPECT(!offerInLedger(env, alice, aliceOfferSeq))) + // If the in-ledger offer was not consumed then further + // results are meaningless. + return 1; + + // bob's offer should be in the ledger, but reduced in size. + unsigned int badRate = 1; + { + Json::Value bobOffer = + ledgerEntryOffer(env, bob, bobOfferSeq); + + STAmount const reducedTakerGets = amountFromJson( + sfTakerGets, bobOffer[jss::node][sfTakerGets.jsonName]); + STAmount const reducedTakerPays = amountFromJson( + sfTakerPays, bobOffer[jss::node][sfTakerPays.jsonName]); + STAmount const bobGot = + env.balance(bob) + bobsFee - bobInitialBalance; + BEAST_EXPECT(reducedTakerPays < newOffer.in); + BEAST_EXPECT(reducedTakerGets < newOffer.out); + STAmount const inLedgerRate = + Quality(Amounts{reducedTakerPays, reducedTakerGets}) + .rate(); + + badRate = inLedgerRate > initialRate ? 1 : 0; + + // If the inLedgerRate is less than initial rate, then + // incrementing the mantissa of the reduced taker pays + // should result in a rate higher than initial. Check + // this to verify that the largest allowable TakerPays + // was computed. + if (badRate == 0) + { + STAmount const tweakedTakerPays = + reducedTakerPays + drops(1); + STAmount const tweakedRate = + Quality(Amounts{tweakedTakerPays, reducedTakerGets}) + .rate(); + BEAST_EXPECT(tweakedRate > initialRate); + } +#if 0 + std::cout << "Placed rate: " << initialRate + << "; in-ledger rate: " << inLedgerRate + << "; TakerPays: " << reducedTakerPays + << "; TakerGets: " << reducedTakerGets + << "; bob already got: " << bobGot << std::endl; +// #else + std::string_view filler = + inLedgerRate > initialRate ? "**" : " "; + std::cout << "| `" << reducedTakerGets << "` | `" + << reducedTakerPays << "` | `" << initialRate + << "` | " << filler << "`" << inLedgerRate << "`" + << filler << " |`" << std::endl; +#endif + } + + // In preparation for the next iteration make sure the two + // offers are gone from the ledger. + cleanupOldOffers(env, alice, bob, aliceOfferSeq, bobOfferSeq); + return badRate; + }; + + // bob's offer (the new offer) is the same every time: + Amounts const bobsOffer{ + STAmount(XRP(1)), STAmount(USD.issue(), 1, 0)}; + + // alice's offer has a slightly smaller TakerPays with each + // iteration. This should mean that the size of the offer bob + // places in the ledger should increase with each iteration. + unsigned int blockedCount = 0; + for (std::uint64_t mantissaReduce = 1'000'000'000ull; + mantissaReduce <= 5'000'000'000ull; + mantissaReduce += 20'000'000ull) + { + STAmount aliceUSD{ + bobsOffer.out.issue(), + bobsOffer.out.mantissa() - mantissaReduce, + bobsOffer.out.exponent()}; + STAmount aliceXRP{ + bobsOffer.in.issue(), bobsOffer.in.mantissa() - 1}; + Amounts alicesOffer{aliceUSD, aliceXRP}; + blockedCount += exerciseOfferPair(alicesOffer, bobsOffer); + } + + // If fixReducedOffersV1 is enabled, then none of the test cases + // should produce a potentially blocking rate. + // + // Also verify that if fixReducedOffersV1 is not enabled then + // some of the test cases produced a potentially blocking rate. + if (features[fixReducedOffersV1]) + { + BEAST_EXPECT(blockedCount == 0); + } + else + { + BEAST_EXPECT(blockedCount >= 170); + } + } + } + + void + testPartialCrossOldXrpIouQChange() + { + testcase("exercise partial cross old XRP/IOU offer Q change"); + + using namespace jtx; + + auto const gw = Account{"gateway"}; + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + auto const USD = gw["USD"]; + + // Make one test run without fixReducedOffersV1 and one with. + for (FeatureBitset features : + {supported_amendments() - fixReducedOffersV1, + supported_amendments() | fixReducedOffersV1}) + { + // Make sure none of the offers we generate are under funded. + Env env{*this, features}; + env.fund(XRP(10'000'000), gw, alice, bob); + env.close(); + + env(trust(alice, USD(10'000'000))); + env(trust(bob, USD(10'000'000))); + env.close(); + + env(pay(gw, alice, USD(10'000'000))); + env.close(); + + // Lambda that: + // 1. Exercises one offer pair, + // 2. Collects the results, and + // 3. Cleans up for the next offer pair. + auto exerciseOfferPair = + [this, &env, &alice, &bob]( + Amounts const& inLedger, + Amounts const& newOffer) -> unsigned int { + // Get the inLedger offer into the ledger so newOffer can cross + // it. + STAmount const initialRate = Quality(inLedger).rate(); + std::uint32_t const aliceOfferSeq = env.seq(alice); + env(offer(alice, inLedger.in, inLedger.out)); + env.close(); + + // Now bob's offer will partially cross alice's offer. + std::uint32_t const bobOfferSeq = env.seq(bob); + STAmount const aliceInitialBalance = env.balance(alice); + env(offer(bob, newOffer.in, newOffer.out)); + env.close(); + STAmount const aliceFinalBalance = env.balance(alice); + + // bob's offer should not have made it into the ledger. + if (!BEAST_EXPECT(!offerInLedger(env, bob, bobOfferSeq))) + { + // If the in-ledger offer was not consumed then further + // results are meaningless. + cleanupOldOffers( + env, alice, bob, aliceOfferSeq, bobOfferSeq); + return 1; + } + // alice's offer should still be in the ledger, but reduced in + // size. + unsigned int badRate = 1; + { + Json::Value aliceOffer = + ledgerEntryOffer(env, alice, aliceOfferSeq); + + STAmount const reducedTakerGets = amountFromJson( + sfTakerGets, + aliceOffer[jss::node][sfTakerGets.jsonName]); + STAmount const reducedTakerPays = amountFromJson( + sfTakerPays, + aliceOffer[jss::node][sfTakerPays.jsonName]); + STAmount const aliceGot = + env.balance(alice) - aliceInitialBalance; + BEAST_EXPECT(reducedTakerPays < inLedger.in); + BEAST_EXPECT(reducedTakerGets < inLedger.out); + STAmount const inLedgerRate = + Quality(Amounts{reducedTakerPays, reducedTakerGets}) + .rate(); + badRate = inLedgerRate > initialRate ? 1 : 0; + + // If the inLedgerRate is less than initial rate, then + // incrementing the mantissa of the reduced taker pays + // should result in a rate higher than initial. Check + // this to verify that the largest allowable TakerPays + // was computed. + if (badRate == 0) + { + STAmount const tweakedTakerPays = + reducedTakerPays + drops(1); + STAmount const tweakedRate = + Quality(Amounts{tweakedTakerPays, reducedTakerGets}) + .rate(); + BEAST_EXPECT(tweakedRate > initialRate); + } +#if 0 + std::cout << "Placed rate: " << initialRate + << "; in-ledger rate: " << inLedgerRate + << "; TakerPays: " << reducedTakerPays + << "; TakerGets: " << reducedTakerGets + << "; alice already got: " << aliceGot + << std::endl; +// #else + std::string_view filler = badRate ? "**" : " "; + std::cout << "| `" << reducedTakerGets << "` | `" + << reducedTakerPays << "` | `" << initialRate + << "` | " << filler << "`" << inLedgerRate << "`" + << filler << " | `" << aliceGot << "` |" + << std::endl; +#endif + } + + // In preparation for the next iteration make sure the two + // offers are gone from the ledger. + cleanupOldOffers(env, alice, bob, aliceOfferSeq, bobOfferSeq); + return badRate; + }; + + // alice's offer (the old offer) is the same every time: + Amounts const aliceOffer{ + STAmount(XRP(1)), STAmount(USD.issue(), 1, 0)}; + + // bob's offer has a slightly smaller TakerPays with each iteration. + // This should mean that the size of the offer alice leaves in the + // ledger should increase with each iteration. + unsigned int blockedCount = 0; + for (std::uint64_t mantissaReduce = 1'000'000'000ull; + mantissaReduce <= 4'000'000'000ull; + mantissaReduce += 20'000'000ull) + { + STAmount bobUSD{ + aliceOffer.out.issue(), + aliceOffer.out.mantissa() - mantissaReduce, + aliceOffer.out.exponent()}; + STAmount bobXRP{ + aliceOffer.in.issue(), aliceOffer.in.mantissa() - 1}; + Amounts bobsOffer{bobUSD, bobXRP}; + + blockedCount += exerciseOfferPair(aliceOffer, bobsOffer); + } + + // If fixReducedOffersV1 is enabled, then none of the test cases + // should produce a potentially blocking rate. + // + // Also verify that if fixReducedOffersV1 is not enabled then + // some of the test cases produced a potentially blocking rate. + if (features[fixReducedOffersV1]) + { + BEAST_EXPECT(blockedCount == 0); + } + else + { + BEAST_EXPECT(blockedCount > 10); + } + } + } + + void + testUnderFundedXrpIouQChange() + { + testcase("exercise underfunded XRP/IOU offer Q change"); + + // Bob places an offer that is not fully funded. + // + // This unit test compares the behavior of this situation before and + // after applying the fixReducedOffersV1 amendment. + + using namespace jtx; + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + auto const gw = Account{"gw"}; + auto const USD = gw["USD"]; + + // Make one test run without fixReducedOffersV1 and one with. + for (FeatureBitset features : + {supported_amendments() - fixReducedOffersV1, + supported_amendments() | fixReducedOffersV1}) + { + Env env{*this, features}; + + env.fund(XRP(10000), alice, bob, gw); + env.close(); + env.trust(USD(1000), alice, bob); + + int blockedOrderBookCount = 0; + for (STAmount initialBobUSD = USD(0.45); initialBobUSD <= USD(1); + initialBobUSD += USD(0.025)) + { + // underfund bob's offer + env(pay(gw, bob, initialBobUSD)); + env.close(); + + std::uint32_t const bobOfferSeq = env.seq(bob); + env(offer(bob, drops(2), USD(1))); + env.close(); + + // alice places an offer that would cross bob's if bob's were + // well funded. + std::uint32_t const aliceOfferSeq = env.seq(alice); + env(offer(alice, USD(1), drops(2))); + env.close(); + + // We want to detect order book blocking. If: + // 1. bob's offer is still in the ledger and + // 2. alice received no USD + // then we use that as evidence that bob's offer blocked the + // order book. + { + bool const bobsOfferGone = + !offerInLedger(env, bob, bobOfferSeq); + STAmount const aliceBalanceUSD = env.balance(alice, USD); + + // Sanity check the ledger if alice got USD. + if (aliceBalanceUSD.signum() > 0) + { + BEAST_EXPECT(aliceBalanceUSD == initialBobUSD); + BEAST_EXPECT(env.balance(bob, USD) == USD(0)); + BEAST_EXPECT(bobsOfferGone); + } + + // Track occurrences of order book blocking. + if (!bobsOfferGone && aliceBalanceUSD.signum() == 0) + { + ++blockedOrderBookCount; + } + + // In preparation for the next iteration clean up any + // leftover offers. + cleanupOldOffers( + env, alice, bob, aliceOfferSeq, bobOfferSeq); + + // Zero out alice's and bob's USD balances. + if (STAmount const aliceBalance = env.balance(alice, USD); + aliceBalance.signum() > 0) + env(pay(alice, gw, aliceBalance)); + + if (STAmount const bobBalance = env.balance(bob, USD); + bobBalance.signum() > 0) + env(pay(bob, gw, bobBalance)); + + env.close(); + } + } + + // If fixReducedOffersV1 is enabled, then none of the test cases + // should produce a potentially blocking rate. + // + // Also verify that if fixReducedOffersV1 is not enabled then + // some of the test cases produced a potentially blocking rate. + if (features[fixReducedOffersV1]) + { + BEAST_EXPECT(blockedOrderBookCount == 0); + } + else + { + BEAST_EXPECT(blockedOrderBookCount > 15); + } + } + } + + void + testUnderFundedIouIouQChange() + { + testcase("exercise underfunded IOU/IOU offer Q change"); + + // Bob places an IOU/IOU offer that is not fully funded. + // + // This unit test compares the behavior of this situation before and + // after applying the fixReducedOffersV1 amendment. + + using namespace jtx; + using namespace std::chrono_literals; + auto const alice = Account{"alice"}; + auto const bob = Account{"bob"}; + auto const gw = Account{"gw"}; + + auto const USD = gw["USD"]; + auto const EUR = gw["EUR"]; + + STAmount const tinyUSD(USD.issue(), /*mantissa*/ 1, /*exponent*/ -81); + + // Make one test run without fixReducedOffersV1 and one with. + for (FeatureBitset features : + {supported_amendments() - fixReducedOffersV1, + supported_amendments() | fixReducedOffersV1}) + { + Env env{*this, features}; + + env.fund(XRP(10000), alice, bob, gw); + env.close(); + env.trust(USD(1000), alice, bob); + env.trust(EUR(1000), alice, bob); + + STAmount const eurOffer( + EUR.issue(), /*mantissa*/ 2957, /*exponent*/ -76); + STAmount const usdOffer( + USD.issue(), /*mantissa*/ 7109, /*exponent*/ -76); + + STAmount const endLoop( + USD.issue(), /*mantissa*/ 50, /*exponent*/ -81); + + int blockedOrderBookCount = 0; + for (STAmount initialBobUSD = tinyUSD; initialBobUSD <= endLoop; + initialBobUSD += tinyUSD) + { + // underfund bob's offer + env(pay(gw, bob, initialBobUSD)); + env(pay(gw, alice, EUR(100))); + env.close(); + + // This offer is underfunded + std::uint32_t bobOfferSeq = env.seq(bob); + env(offer(bob, eurOffer, usdOffer)); + env.close(); + env.require(offers(bob, 1)); + + // alice places an offer that crosses bob's. + std::uint32_t aliceOfferSeq = env.seq(alice); + env(offer(alice, usdOffer, eurOffer)); + env.close(); + + // Examine the aftermath of alice's offer. + { + bool const bobsOfferGone = + !offerInLedger(env, bob, bobOfferSeq); + STAmount aliceBalanceUSD = env.balance(alice, USD); +#if 0 + std::cout + << "bobs initial: " << initialBobUSD + << "; alice final: " << aliceBalanceUSD + << "; bobs offer: " << bobsOfferJson.toStyledString() + << std::endl; +#endif + // Sanity check the ledger if alice got USD. + if (aliceBalanceUSD.signum() > 0) + { + BEAST_EXPECT(aliceBalanceUSD == initialBobUSD); + BEAST_EXPECT(env.balance(bob, USD) == USD(0)); + BEAST_EXPECT(bobsOfferGone); + } + + // Track occurrences of order book blocking. + if (!bobsOfferGone && aliceBalanceUSD.signum() == 0) + { + ++blockedOrderBookCount; + } + } + + // In preparation for the next iteration clean up any + // leftover offers. + cleanupOldOffers(env, alice, bob, aliceOfferSeq, bobOfferSeq); + + // Zero out alice's and bob's IOU balances. + auto zeroBalance = [&env, &gw]( + Account const& acct, IOU const& iou) { + if (STAmount const balance = env.balance(acct, iou); + balance.signum() > 0) + env(pay(acct, gw, balance)); + }; + + zeroBalance(alice, EUR); + zeroBalance(alice, USD); + zeroBalance(bob, EUR); + zeroBalance(bob, USD); + env.close(); + } + + // If fixReducedOffersV1 is enabled, then none of the test cases + // should produce a potentially blocking rate. + // + // Also verify that if fixReducedOffersV1 is not enabled then + // some of the test cases produced a potentially blocking rate. + if (features[fixReducedOffersV1]) + { + BEAST_EXPECT(blockedOrderBookCount == 0); + } + else + { + BEAST_EXPECT(blockedOrderBookCount > 20); + } + } + } + + void + run() override + { + testPartialCrossNewXrpIouQChange(); + testPartialCrossOldXrpIouQChange(); + testUnderFundedXrpIouQChange(); + testUnderFundedIouIouQChange(); + } +}; + +BEAST_DEFINE_TESTSUITE_PRIO(ReducedOffer, tx, ripple, 2); + +} // namespace test +} // namespace ripple From 0b812cdece975ba9eb2705a4834b673f5cf23041 Mon Sep 17 00:00:00 2001 From: drlongle Date: Fri, 23 Jun 2023 19:19:26 +0200 Subject: [PATCH 176/508] Add RPC/WS ports to server_info (#4427) Enhance the /crawl endpoint by publishing WebSocket/RPC ports in the server_info response. The function processing requests to the /crawl endpoint actually calls server_info internally, so this change enables a server to advertise its WebSocket/RPC port(s) to peers via the /crawl endpoint. `grpc` and `peer` ports are included as well. The new `ports` array contains objects, each containing a `port` for the listening port (number string), and an array `protocol` listing the supported protocol(s). This allows crawlers to build a richer topology without needing to port-scan nodes. For non-admin users (including peers), the info about *admin* ports is excluded. Also increase test coverage for RPC ServerInfo. Fix #2837. --- src/ripple/app/main/Application.cpp | 7 ++++ src/ripple/app/main/Application.h | 3 ++ src/ripple/app/main/GRPCServer.cpp | 2 +- src/ripple/app/misc/NetworkOPs.cpp | 47 ++++++++++++++++++++++ src/ripple/protocol/jss.h | 5 ++- src/test/rpc/ServerInfo_test.cpp | 61 ++++++++++++++++++++++++++++- 6 files changed, 121 insertions(+), 4 deletions(-) diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 16781ac09d4..8ed328df440 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -602,6 +602,13 @@ class ApplicationImp : public Application, public BasicApp return *m_networkOPs; } + virtual ServerHandlerImp& + getServerHandler() override + { + assert(serverHandler_); + return *serverHandler_; + } + boost::asio::io_service& getIOService() override { diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index d8cb7d31815..d2ba8f7cc75 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -89,6 +89,7 @@ class Overlay; class PathRequests; class PendingSaves; class PublicKey; +class ServerHandlerImp; class SecretKey; class STLedgerEntry; class TimeKeeper; @@ -231,6 +232,8 @@ class Application : public beast::PropertyStream::Source getOPs() = 0; virtual OrderBookDB& getOrderBookDB() = 0; + virtual ServerHandlerImp& + getServerHandler() = 0; virtual TransactionMaster& getMasterTransaction() = 0; virtual perf::PerfLog& diff --git a/src/ripple/app/main/GRPCServer.cpp b/src/ripple/app/main/GRPCServer.cpp index fdef8c1cec8..3a5e96b0ed9 100644 --- a/src/ripple/app/main/GRPCServer.cpp +++ b/src/ripple/app/main/GRPCServer.cpp @@ -429,7 +429,7 @@ GRPCServerImpl::GRPCServerImpl(Application& app) // if present, get endpoint from config if (app_.config().exists("port_grpc")) { - Section section = app_.config().section("port_grpc"); + const auto& section = app_.config().section("port_grpc"); auto const optIp = section.get("ip"); if (!optIp) diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 6be11c7dd6c..6f51f811055 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -65,9 +65,11 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -2661,6 +2663,51 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) info["reporting"] = app_.getReportingETL().getInfo(); } + // This array must be sorted in increasing order. + static constexpr std::array protocols{ + "http", "https", "peer", "ws", "ws2", "wss", "wss2"}; + static_assert(std::is_sorted(std::begin(protocols), std::end(protocols))); + { + Json::Value ports{Json::arrayValue}; + for (auto const& port : app_.getServerHandler().setup().ports) + { + // Don't publish admin ports for non-admin users + if (!admin && + !(port.admin_nets_v4.empty() && port.admin_nets_v6.empty() && + port.admin_user.empty() && port.admin_password.empty())) + continue; + std::vector proto; + std::set_intersection( + std::begin(port.protocol), + std::end(port.protocol), + std::begin(protocols), + std::end(protocols), + std::back_inserter(proto)); + if (!proto.empty()) + { + auto& jv = ports.append(Json::Value(Json::objectValue)); + jv[jss::port] = std::to_string(port.port); + jv[jss::protocol] = Json::Value{Json::arrayValue}; + for (auto const& p : proto) + jv[jss::protocol].append(p); + } + } + + if (app_.config().exists("port_grpc")) + { + auto const& grpcSection = app_.config().section("port_grpc"); + auto const optPort = grpcSection.get("port"); + if (optPort && grpcSection.get("ip")) + { + auto& jv = ports.append(Json::Value(Json::objectValue)); + jv[jss::port] = *optPort; + jv[jss::protocol] = Json::Value{Json::arrayValue}; + jv[jss::protocol].append("grpc"); + } + } + info[jss::ports] = std::move(ports); + } + return info; } diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 92d9096da92..8fa1b6cc24c 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -465,13 +465,14 @@ JSS(peers); // out: InboundLedger, handlers/Peers, Overlay JSS(peer_disconnects); // Severed peer connection counter. JSS(peer_disconnects_resources); // Severed peer connections because of // excess resource consumption. -JSS(port); // in: Connect +JSS(port); // in: Connect, out: NetworkOPs +JSS(ports); // out: NetworkOPs JSS(previous); // out: Reservations JSS(previous_ledger); // out: LedgerPropose JSS(proof); // in: BookOffers JSS(propose_seq); // out: LedgerPropose JSS(proposers); // out: NetworkOPs, LedgerConsensus -JSS(protocol); // out: PeerImp +JSS(protocol); // out: NetworkOPs, PeerImp JSS(proxied); // out: RPC ping JSS(pubkey_node); // out: NetworkOPs JSS(pubkey_publisher); // out: ValidatorList diff --git a/src/test/rpc/ServerInfo_test.cpp b/src/test/rpc/ServerInfo_test.cpp index 24cfd12299a..a69483cb130 100644 --- a/src/test/rpc/ServerInfo_test.cpp +++ b/src/test/rpc/ServerInfo_test.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include #include @@ -55,6 +56,16 @@ class ServerInfo_test : public beast::unit_test::suite [validators] %2% + +[port_grpc] +ip = 0.0.0.0 +port = 50051 + +[port_admin] +ip = 0.0.0.0 +port = 50052 +protocol = wss2 +admin = 127.0.0.1 )rippleConfig"); p->loadFromString(boost::str( @@ -77,8 +88,30 @@ class ServerInfo_test : public beast::unit_test::suite BEAST_EXPECT(result[jss::result][jss::status] == "success"); BEAST_EXPECT(result[jss::result].isMember(jss::info)); } + { - Env env(*this, makeValidatorConfig()); + Env env(*this); + + // Call NetworkOPs directly and set the admin flag to false. + // Expect that the admin ports are not included in the result. + auto const result = + env.app().getOPs().getServerInfo(true, false, 0); + auto const& ports = result[jss::ports]; + BEAST_EXPECT(ports.isArray() && ports.size() == 0); + } + + { + auto config = makeValidatorConfig(); + auto const rpc_port = + (*config)["port_rpc"].get("port"); + auto const grpc_port = + (*config)["port_grpc"].get("port"); + auto const ws_port = (*config)["port_ws"].get("port"); + BEAST_EXPECT(grpc_port); + BEAST_EXPECT(rpc_port); + BEAST_EXPECT(ws_port); + + Env env(*this, std::move(config)); auto const result = env.rpc("server_info"); BEAST_EXPECT(!result[jss::result].isMember(jss::error)); BEAST_EXPECT(result[jss::result][jss::status] == "success"); @@ -86,6 +119,32 @@ class ServerInfo_test : public beast::unit_test::suite BEAST_EXPECT( result[jss::result][jss::info][jss::pubkey_validator] == validator_data::public_key); + + auto const& ports = result[jss::result][jss::info][jss::ports]; + BEAST_EXPECT(ports.isArray() && ports.size() == 3); + for (auto const& port : ports) + { + auto const& proto = port[jss::protocol]; + BEAST_EXPECT(proto.isArray()); + auto const p = port[jss::port].asUInt(); + BEAST_EXPECT(p == rpc_port || p == ws_port || p == grpc_port); + if (p == grpc_port) + { + BEAST_EXPECT(proto.size() == 1); + BEAST_EXPECT(proto[0u].asString() == "grpc"); + } + if (p == rpc_port) + { + BEAST_EXPECT(proto.size() == 2); + BEAST_EXPECT(proto[0u].asString() == "http"); + BEAST_EXPECT(proto[1u].asString() == "ws2"); + } + if (p == ws_port) + { + BEAST_EXPECT(proto.size() == 1); + BEAST_EXPECT(proto[0u].asString() == "ws"); + } + } } } From 8fdad0d7fd62dc5965759888db51afc6115f04b6 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Fri, 23 Jun 2023 16:20:20 -0500 Subject: [PATCH 177/508] ci: use Artifactory remote in nix workflow (#4556) There is now an Artifactory (thanks @shichengripple001 and team!) to hold dependency binaries for the builds. * Rewrite the `nix` workflow to use it and cut the time down to a mere 21 minutes. * This workflow should continue to work (just more slowly) for forks that do not have access to the Artifactory. --- .github/workflows/nix.yml | 53 +++++++++++++++++++++++++++++++++++---- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 53c75a9f6fd..706bdbe103b 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -1,6 +1,25 @@ name: nix on: [push, pull_request] +# This workflow has two job matrixes. +# They can be considered phases because the second matrix ("test") +# depends on the first ("dependencies"). +# +# The first phase has a job in the matrix for each combination of +# variables that affects dependency ABI: +# platform, compiler, and configuration. +# It creates a GitHub artifact holding the Conan profile, +# and builds and caches binaries for all the dependencies. +# If an Artifactory remote is configured, they are cached there. +# If not, they are added to the GitHub artifact. +# GitHub's "cache" action has a size limit (10 GB) that is too small +# to hold the binaries if they are built locally. +# We must use the "{upload,download}-artifact" actions instead. +# +# The second phase has a job in the matrix for each test configuration. +# It installs dependency binaries from the cache, whichever was used, +# and builds and tests rippled. + jobs: dependencies: @@ -31,6 +50,8 @@ jobs: env: build_dir: .build steps: + - name: checkout + uses: actions/checkout@v3 - name: check environment run: | echo ${PATH} | tr ':' '\n' @@ -38,6 +59,8 @@ jobs: cmake --version env - name: configure Conan + env: + CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod run: | conan profile new default --detect conan profile update settings.compiler.cppstd=20 default @@ -47,19 +70,39 @@ jobs: conan profile update env.CC=${{ matrix.profile.cc }} default conan profile update env.CXX=${{ matrix.profile.cxx }} default conan profile update conf.tools.build:compiler_executables='{"c": "${{ matrix.profile.cc }}", "cpp": "${{ matrix.profile.cxx }}"}' default - - name: checkout - uses: actions/checkout@v3 - - name: dependencies + # Do not quote the URL. An empty string will be accepted (with + # a non-fatal warning), but a missing argument will not. + conan remote add ripple ${{ env.CONAN_URL }} --insert 0 + - name: try to authenticate to ripple Conan remote + id: remote + run: | + echo outcome=$(conan user --remote ripple ${{ secrets.CONAN_USERNAME }} --password ${{ secrets.CONAN_TOKEN }} && echo success || echo failure) | tee ${GITHUB_OUTPUT} + - name: archive profile + # Create this archive before dependencies are added to the local cache. + run: tar -czf conan.tar -C ~/.conan . + - name: list missing binaries + id: binaries + # Print the list of dependencies that would need to be built locally. + # A non-empty list means we have "failed" to cache binaries remotely. + run: | + echo missing=$(conan info . --build missing --json 2>/dev/null | grep '^\[') | tee ${GITHUB_OUTPUT} + - name: build dependencies + if: (steps.binaries.outputs.missing != '[]') uses: ./.github/actions/dependencies with: configuration: ${{ matrix.configuration }} - - name: archive cache + - name: upload dependencies to remote + if: (steps.binaries.outputs.missing != '[]') && (steps.remote.outputs.outcome == 'success') + run: conan upload --remote ripple '*' --all --parallel --confirm + - name: recreate archive with dependencies + if: (steps.binaries.outputs.missing != '[]') && (steps.remote.outputs.outcome == 'failure') run: tar -czf conan.tar -C ~/.conan . - - name: upload cache + - name: upload archive uses: actions/upload-artifact@v3 with: name: ${{ matrix.platform }}-${{ matrix.compiler }}-${{ matrix.configuration }} path: conan.tar + if-no-files-found: error test: From 9eb30d43169e2d083210cadb3ab81706bf834f3a Mon Sep 17 00:00:00 2001 From: Howard Hinnant Date: Mon, 26 Jun 2023 15:32:10 -0400 Subject: [PATCH 178/508] refactor: remove TypedField's move constructor (#4567) Apply a minor cleanup in `TypedField`: * Remove a non-working and unused move constructor. * Constrain the remaining constructor to not be overly generic enough as to be used as a copy or move constructor. --- src/ripple/protocol/SField.h | 8 +------- src/ripple/protocol/impl/SField.cpp | 7 +++++++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/src/ripple/protocol/SField.h b/src/ripple/protocol/SField.h index 5c899d3fad3..d1e300e2ca5 100644 --- a/src/ripple/protocol/SField.h +++ b/src/ripple/protocol/SField.h @@ -273,13 +273,7 @@ struct TypedField : SField using type = T; template - explicit TypedField(Args&&... args) : SField(std::forward(args)...) - { - } - - TypedField(TypedField&& u) : SField(std::move(u)) - { - } + explicit TypedField(private_access_tag_t pat, Args&&... args); }; /** Indicate std::optional field semantics. */ diff --git a/src/ripple/protocol/impl/SField.cpp b/src/ripple/protocol/impl/SField.cpp index d02b7ca4b7b..e537fb48cf3 100644 --- a/src/ripple/protocol/impl/SField.cpp +++ b/src/ripple/protocol/impl/SField.cpp @@ -38,6 +38,13 @@ struct SField::private_access_tag_t static SField::private_access_tag_t access; +template +template +TypedField::TypedField(private_access_tag_t pat, Args&&... args) + : SField(pat, std::forward(args)...) +{ +} + // Construct all compile-time SFields, and register them in the knownCodeToField // database: From b7e902dccc5e8bdc31afc8220ffbc1232eec783a Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Mon, 26 Jun 2023 17:07:20 -0400 Subject: [PATCH 179/508] XLS-39 Clawback: (#4553) Introduces: * AccountRoot flag: lsfAllowClawback * New Clawback transaction * More info on clawback spec: https://github.com/XRPLF/XRPL-Standards/tree/master/XLS-39d-clawback --- Builds/CMake/RippledCore.cmake | 2 + src/ripple/app/tx/impl/Clawback.cpp | 138 +++ src/ripple/app/tx/impl/Clawback.h | 48 ++ src/ripple/app/tx/impl/InvariantCheck.cpp | 59 ++ src/ripple/app/tx/impl/InvariantCheck.h | 31 +- src/ripple/app/tx/impl/SetAccount.cpp | 39 + src/ripple/app/tx/impl/applySteps.cpp | 11 + src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/LedgerFormats.h | 3 + src/ripple/protocol/TxFlags.h | 4 + src/ripple/protocol/TxFormats.h | 3 + src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/TxFormats.cpp | 8 + src/ripple/protocol/jss.h | 1 + src/test/app/Clawback_test.cpp | 971 ++++++++++++++++++++++ src/test/jtx/flags.h | 3 + src/test/jtx/impl/trust.cpp | 11 + src/test/jtx/trust.h | 3 + src/test/rpc/AccountSet_test.cpp | 6 + 19 files changed, 1343 insertions(+), 2 deletions(-) create mode 100644 src/ripple/app/tx/impl/Clawback.cpp create mode 100644 src/ripple/app/tx/impl/Clawback.h create mode 100644 src/test/app/Clawback_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index a853a6cff53..3b77e35c8c0 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -427,6 +427,7 @@ target_sources (rippled PRIVATE src/ripple/app/tx/impl/CancelOffer.cpp src/ripple/app/tx/impl/CashCheck.cpp src/ripple/app/tx/impl/Change.cpp + src/ripple/app/tx/impl/Clawback.cpp src/ripple/app/tx/impl/CreateCheck.cpp src/ripple/app/tx/impl/CreateOffer.cpp src/ripple/app/tx/impl/CreateTicket.cpp @@ -692,6 +693,7 @@ if (tests) src/test/app/AccountTxPaging_test.cpp src/test/app/AmendmentTable_test.cpp src/test/app/Check_test.cpp + src/test/app/Clawback_test.cpp src/test/app/CrossingLimits_test.cpp src/test/app/DeliverMin_test.cpp src/test/app/DepositAuth_test.cpp diff --git a/src/ripple/app/tx/impl/Clawback.cpp b/src/ripple/app/tx/impl/Clawback.cpp new file mode 100644 index 00000000000..d4a16e7adbc --- /dev/null +++ b/src/ripple/app/tx/impl/Clawback.cpp @@ -0,0 +1,138 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +NotTEC +Clawback::preflight(PreflightContext const& ctx) +{ + if (!ctx.rules.enabled(featureClawback)) + return temDISABLED; + + if (auto const ret = preflight1(ctx); !isTesSuccess(ret)) + return ret; + + if (ctx.tx.getFlags() & tfClawbackMask) + return temINVALID_FLAG; + + AccountID const issuer = ctx.tx[sfAccount]; + STAmount const clawAmount = ctx.tx[sfAmount]; + + // The issuer field is used for the token holder instead + AccountID const& holder = clawAmount.getIssuer(); + + if (issuer == holder || isXRP(clawAmount) || clawAmount <= beast::zero) + return temBAD_AMOUNT; + + return preflight2(ctx); +} + +TER +Clawback::preclaim(PreclaimContext const& ctx) +{ + AccountID const issuer = ctx.tx[sfAccount]; + STAmount const clawAmount = ctx.tx[sfAmount]; + AccountID const& holder = clawAmount.getIssuer(); + + auto const sleIssuer = ctx.view.read(keylet::account(issuer)); + auto const sleHolder = ctx.view.read(keylet::account(holder)); + if (!sleIssuer || !sleHolder) + return terNO_ACCOUNT; + + std::uint32_t const issuerFlagsIn = sleIssuer->getFieldU32(sfFlags); + + // If AllowClawback is not set or NoFreeze is set, return no permission + if (!(issuerFlagsIn & lsfAllowClawback) || (issuerFlagsIn & lsfNoFreeze)) + return tecNO_PERMISSION; + + auto const sleRippleState = + ctx.view.read(keylet::line(holder, issuer, clawAmount.getCurrency())); + if (!sleRippleState) + return tecNO_LINE; + + STAmount const balance = (*sleRippleState)[sfBalance]; + + // If balance is positive, issuer must have higher address than holder + if (balance > beast::zero && issuer < holder) + return tecNO_PERMISSION; + + // If balance is negative, issuer must have lower address than holder + if (balance < beast::zero && issuer > holder) + return tecNO_PERMISSION; + + // At this point, we know that issuer and holder accounts + // are correct and a trustline exists between them. + // + // Must now explicitly check the balance to make sure + // available balance is non-zero. + // + // We can't directly check the balance of trustline because + // the available balance of a trustline is prone to new changes (eg. + // XLS-34). So we must use `accountHolds`. + if (accountHolds( + ctx.view, + holder, + clawAmount.getCurrency(), + issuer, + fhIGNORE_FREEZE, + ctx.j) <= beast::zero) + return tecINSUFFICIENT_FUNDS; + + return tesSUCCESS; +} + +TER +Clawback::doApply() +{ + AccountID const& issuer = account_; + STAmount clawAmount = ctx_.tx[sfAmount]; + AccountID const holder = clawAmount.getIssuer(); // cannot be reference + + // Replace the `issuer` field with issuer's account + clawAmount.setIssuer(issuer); + if (holder == issuer) + return tecINTERNAL; + + // Get the spendable balance. Must use `accountHolds`. + STAmount const spendableAmount = accountHolds( + view(), + holder, + clawAmount.getCurrency(), + clawAmount.getIssuer(), + fhIGNORE_FREEZE, + j_); + + return rippleCredit( + view(), + holder, + issuer, + std::min(spendableAmount, clawAmount), + true, + j_); +} + +} // namespace ripple diff --git a/src/ripple/app/tx/impl/Clawback.h b/src/ripple/app/tx/impl/Clawback.h new file mode 100644 index 00000000000..c5f072c8463 --- /dev/null +++ b/src/ripple/app/tx/impl/Clawback.h @@ -0,0 +1,48 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_TX_CLAWBACK_H_INCLUDED +#define RIPPLE_TX_CLAWBACK_H_INCLUDED + +#include + +namespace ripple { + +class Clawback : public Transactor +{ +public: + static constexpr ConsequencesFactoryType ConsequencesFactory{Normal}; + + explicit Clawback(ApplyContext& ctx) : Transactor(ctx) + { + } + + static NotTEC + preflight(PreflightContext const& ctx); + + static TER + preclaim(PreclaimContext const& ctx); + + TER + doApply() override; +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/app/tx/impl/InvariantCheck.cpp b/src/ripple/app/tx/impl/InvariantCheck.cpp index 8664c6492b9..a2e452d4c44 100644 --- a/src/ripple/app/tx/impl/InvariantCheck.cpp +++ b/src/ripple/app/tx/impl/InvariantCheck.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -717,4 +718,62 @@ NFTokenCountTracking::finalize( return true; } +//------------------------------------------------------------------------------ + +void +ValidClawback::visitEntry( + bool, + std::shared_ptr const& before, + std::shared_ptr const&) +{ + if (before && before->getType() == ltRIPPLE_STATE) + trustlinesChanged++; +} + +bool +ValidClawback::finalize( + STTx const& tx, + TER const result, + XRPAmount const, + ReadView const& view, + beast::Journal const& j) +{ + if (tx.getTxnType() != ttCLAWBACK) + return true; + + if (result == tesSUCCESS) + { + if (trustlinesChanged > 1) + { + JLOG(j.fatal()) + << "Invariant failed: more than one trustline changed."; + return false; + } + + AccountID const issuer = tx.getAccountID(sfAccount); + STAmount const amount = tx.getFieldAmount(sfAmount); + AccountID const& holder = amount.getIssuer(); + STAmount const holderBalance = accountHolds( + view, holder, amount.getCurrency(), issuer, fhIGNORE_FREEZE, j); + + if (holderBalance.signum() < 0) + { + JLOG(j.fatal()) + << "Invariant failed: trustline balance is negative"; + return false; + } + } + else + { + if (trustlinesChanged != 0) + { + JLOG(j.fatal()) << "Invariant failed: some trustlines were changed " + "despite failure of the transaction."; + return false; + } + } + + return true; +} + } // namespace ripple diff --git a/src/ripple/app/tx/impl/InvariantCheck.h b/src/ripple/app/tx/impl/InvariantCheck.h index fe17db44fa7..5194a9c34c6 100644 --- a/src/ripple/app/tx/impl/InvariantCheck.h +++ b/src/ripple/app/tx/impl/InvariantCheck.h @@ -389,6 +389,34 @@ class NFTokenCountTracking beast::Journal const&); }; +/** + * @brief Invariant: Token holder's trustline balance cannot be negative after + * Clawback. + * + * We iterate all the trust lines affected by this transaction and ensure + * that no more than one trustline is modified, and also holder's balance is + * non-negative. + */ +class ValidClawback +{ + std::uint32_t trustlinesChanged = 0; + +public: + void + visitEntry( + bool, + std::shared_ptr const&, + std::shared_ptr const&); + + bool + finalize( + STTx const&, + TER const, + XRPAmount const, + ReadView const&, + beast::Journal const&); +}; + // additional invariant checks can be declared above and then added to this // tuple using InvariantChecks = std::tuple< @@ -402,7 +430,8 @@ using InvariantChecks = std::tuple< NoZeroEscrow, ValidNewAccountRoot, ValidNFTokenPage, - NFTokenCountTracking>; + NFTokenCountTracking, + ValidClawback>; /** * @brief get a tuple of all invariant checks diff --git a/src/ripple/app/tx/impl/SetAccount.cpp b/src/ripple/app/tx/impl/SetAccount.cpp index 167644f45fb..123a5bdc91e 100644 --- a/src/ripple/app/tx/impl/SetAccount.cpp +++ b/src/ripple/app/tx/impl/SetAccount.cpp @@ -218,6 +218,37 @@ SetAccount::preclaim(PreclaimContext const& ctx) } } + // + // Clawback + // + if (ctx.view.rules().enabled(featureClawback)) + { + if (uSetFlag == asfAllowClawback) + { + if (uFlagsIn & lsfNoFreeze) + { + JLOG(ctx.j.trace()) << "Can't set Clawback if NoFreeze is set"; + return tecNO_PERMISSION; + } + + if (!dirIsEmpty(ctx.view, keylet::ownerDir(id))) + { + JLOG(ctx.j.trace()) << "Owner directory not empty."; + return tecOWNERS; + } + } + else if (uSetFlag == asfNoFreeze) + { + // Cannot set NoFreeze if clawback is enabled + if (uFlagsIn & lsfAllowClawback) + { + JLOG(ctx.j.trace()) + << "Can't set NoFreeze if clawback is enabled"; + return tecNO_PERMISSION; + } + } + } + return tesSUCCESS; } @@ -562,6 +593,14 @@ SetAccount::doApply() uFlagsOut &= ~lsfDisallowIncomingTrustline; } + // Set flag for clawback + if (ctx_.view().rules().enabled(featureClawback) && + uSetFlag == asfAllowClawback) + { + JLOG(j_.trace()) << "set allow clawback"; + uFlagsOut |= lsfAllowClawback; + } + if (uFlagsIn != uFlagsOut) sle->setFieldU32(sfFlags, uFlagsOut); diff --git a/src/ripple/app/tx/impl/applySteps.cpp b/src/ripple/app/tx/impl/applySteps.cpp index 85959862dba..d0a09c31029 100644 --- a/src/ripple/app/tx/impl/applySteps.cpp +++ b/src/ripple/app/tx/impl/applySteps.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -147,6 +148,8 @@ invoke_preflight(PreflightContext const& ctx) return invoke_preflight_helper(ctx); case ttNFTOKEN_ACCEPT_OFFER: return invoke_preflight_helper(ctx); + case ttCLAWBACK: + return invoke_preflight_helper(ctx); default: assert(false); return {temUNKNOWN, TxConsequences{temUNKNOWN}}; @@ -248,6 +251,8 @@ invoke_preclaim(PreclaimContext const& ctx) return invoke_preclaim(ctx); case ttNFTOKEN_ACCEPT_OFFER: return invoke_preclaim(ctx); + case ttCLAWBACK: + return invoke_preclaim(ctx); default: assert(false); return temUNKNOWN; @@ -311,6 +316,8 @@ invoke_calculateBaseFee(ReadView const& view, STTx const& tx) return NFTokenCancelOffer::calculateBaseFee(view, tx); case ttNFTOKEN_ACCEPT_OFFER: return NFTokenAcceptOffer::calculateBaseFee(view, tx); + case ttCLAWBACK: + return Clawback::calculateBaseFee(view, tx); default: assert(false); return XRPAmount{0}; @@ -463,6 +470,10 @@ invoke_apply(ApplyContext& ctx) NFTokenAcceptOffer p(ctx); return p(); } + case ttCLAWBACK: { + Clawback p(ctx); + return p(); + } default: assert(false); return {temUNKNOWN, false}; diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 48198e38315..a8a71775202 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 59; +static constexpr std::size_t numFeatures = 60; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -346,6 +346,7 @@ extern uint256 const fixUniversalNumber; extern uint256 const fixNonFungibleTokensV1_2; extern uint256 const fixNFTokenRemint; extern uint256 const fixReducedOffersV1; +extern uint256 const featureClawback; } // namespace ripple diff --git a/src/ripple/protocol/LedgerFormats.h b/src/ripple/protocol/LedgerFormats.h index 8245f352c8e..c72b769ef2a 100644 --- a/src/ripple/protocol/LedgerFormats.h +++ b/src/ripple/protocol/LedgerFormats.h @@ -243,6 +243,9 @@ enum LedgerSpecificFlags { 0x10000000, // True, reject new paychans lsfDisallowIncomingTrustline = 0x20000000, // True, reject new trustlines (only if no issued assets) + lsfAMM [[maybe_unused]] = 0x40000000, // True, AMM account + lsfAllowClawback = + 0x80000000, // True, enable clawback // ltOFFER lsfPassive = 0x00010000, diff --git a/src/ripple/protocol/TxFlags.h b/src/ripple/protocol/TxFlags.h index c0dd080f6f7..9eaf5de7860 100644 --- a/src/ripple/protocol/TxFlags.h +++ b/src/ripple/protocol/TxFlags.h @@ -88,6 +88,7 @@ constexpr std::uint32_t asfDisallowIncomingNFTokenOffer = 12; constexpr std::uint32_t asfDisallowIncomingCheck = 13; constexpr std::uint32_t asfDisallowIncomingPayChan = 14; constexpr std::uint32_t asfDisallowIncomingTrustline = 15; +constexpr std::uint32_t asfAllowClawback = 16; // OfferCreate flags: constexpr std::uint32_t tfPassive = 0x00010000; @@ -159,6 +160,9 @@ constexpr std::uint32_t const tfNFTokenCancelOfferMask = ~(tfUniversal); // NFTokenAcceptOffer flags: constexpr std::uint32_t const tfNFTokenAcceptOfferMask = ~tfUniversal; +// Clawback flags: +constexpr std::uint32_t const tfClawbackMask = ~tfUniversal; + // clang-format on } // namespace ripple diff --git a/src/ripple/protocol/TxFormats.h b/src/ripple/protocol/TxFormats.h index 250c29d69c1..11df0af7b23 100644 --- a/src/ripple/protocol/TxFormats.h +++ b/src/ripple/protocol/TxFormats.h @@ -139,6 +139,9 @@ enum TxType : std::uint16_t /** This transaction accepts an existing offer to buy or sell an existing NFT. */ ttNFTOKEN_ACCEPT_OFFER = 29, + /** This transaction claws back issued tokens. */ + ttCLAWBACK = 30, + /** This system-generated transaction type is used to update the status of the various amendments. For details, see: https://xrpl.org/amendments.html diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index 09923109671..14170674d94 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -453,6 +453,7 @@ REGISTER_FIX (fixUniversalNumber, Supported::yes, VoteBehavior::De REGISTER_FIX (fixNonFungibleTokensV1_2, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FIX (fixNFTokenRemint, Supported::yes, VoteBehavior::DefaultNo); REGISTER_FIX (fixReducedOffersV1, Supported::yes, VoteBehavior::DefaultNo); +REGISTER_FEATURE(Clawback, Supported::yes, VoteBehavior::DefaultNo); // The following amendments are obsolete, but must remain supported // because they could potentially get enabled. diff --git a/src/ripple/protocol/impl/TxFormats.cpp b/src/ripple/protocol/impl/TxFormats.cpp index 91a6bcb581b..d6d017508f2 100644 --- a/src/ripple/protocol/impl/TxFormats.cpp +++ b/src/ripple/protocol/impl/TxFormats.cpp @@ -328,6 +328,14 @@ TxFormats::TxFormats() {sfTicketSequence, soeOPTIONAL}, }, commonFields); + + add(jss::Clawback, + ttCLAWBACK, + { + {sfAmount, soeREQUIRED}, + {sfTicketSequence, soeOPTIONAL}, + }, + commonFields); } TxFormats const& diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 8fa1b6cc24c..4612bd7811d 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -53,6 +53,7 @@ JSS(Check); // ledger type. JSS(CheckCancel); // transaction type. JSS(CheckCash); // transaction type. JSS(CheckCreate); // transaction type. +JSS(Clawback); // transaction type. JSS(ClearFlag); // field. JSS(DeliverMin); // in: TransactionSign JSS(DepositPreauth); // transaction and ledger type. diff --git a/src/test/app/Clawback_test.cpp b/src/test/app/Clawback_test.cpp new file mode 100644 index 00000000000..33b9049790e --- /dev/null +++ b/src/test/app/Clawback_test.cpp @@ -0,0 +1,971 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +class Clawback_test : public beast::unit_test::suite +{ + template + static std::string + to_string(T const& t) + { + return boost::lexical_cast(t); + } + + // Helper function that returns the owner count of an account root. + static std::uint32_t + ownerCount(test::jtx::Env const& env, test::jtx::Account const& acct) + { + std::uint32_t ret{0}; + if (auto const sleAcct = env.le(acct)) + ret = sleAcct->at(sfOwnerCount); + return ret; + } + + // Helper function that returns the number of tickets held by an account. + static std::uint32_t + ticketCount(test::jtx::Env const& env, test::jtx::Account const& acct) + { + std::uint32_t ret{0}; + if (auto const sleAcct = env.le(acct)) + ret = sleAcct->at(~sfTicketCount).value_or(0); + return ret; + } + + // Helper function that returns the freeze status of a trustline + static bool + getLineFreezeFlag( + test::jtx::Env const& env, + test::jtx::Account const& src, + test::jtx::Account const& dst, + Currency const& cur) + { + if (auto sle = env.le(keylet::line(src, dst, cur))) + { + auto const useHigh = src.id() > dst.id(); + return sle->isFlag(useHigh ? lsfHighFreeze : lsfLowFreeze); + } + Throw("No line in getLineFreezeFlag"); + return false; // silence warning + } + + void + testAllowClawbackFlag(FeatureBitset features) + { + testcase("Enable AllowClawback flag"); + using namespace test::jtx; + + // Test that one can successfully set asfAllowClawback flag. + // If successful, asfNoFreeze can no longer be set. + // Also, asfAllowClawback cannot be cleared. + { + Env env(*this, features); + Account alice{"alice"}; + + env.fund(XRP(1000), alice); + env.close(); + + // set asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // clear asfAllowClawback does nothing + env(fclear(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // asfNoFreeze cannot be set when asfAllowClawback is set + env.require(nflags(alice, asfNoFreeze)); + env(fset(alice, asfNoFreeze), ter(tecNO_PERMISSION)); + env.close(); + } + + // Test that asfAllowClawback cannot be set when + // asfNoFreeze has been set + { + Env env(*this, features); + Account alice{"alice"}; + + env.fund(XRP(1000), alice); + env.close(); + + env.require(nflags(alice, asfNoFreeze)); + + // set asfNoFreeze + env(fset(alice, asfNoFreeze)); + env.close(); + + // NoFreeze is set + env.require(flags(alice, asfNoFreeze)); + + // asfAllowClawback cannot be set if asfNoFreeze is set + env(fset(alice, asfAllowClawback), ter(tecNO_PERMISSION)); + env.close(); + + env.require(nflags(alice, asfAllowClawback)); + } + + // Test that asfAllowClawback is not allowed when owner dir is non-empty + { + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + auto const USD = alice["USD"]; + env.require(nflags(alice, asfAllowClawback)); + + // alice issues 10 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(10))); + env.close(); + + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 1); + + // alice fails to enable clawback because she has trustline with bob + env(fset(alice, asfAllowClawback), ter(tecOWNERS)); + env.close(); + + // bob sets trustline to default limit and pays alice back to delete + // the trustline + env(trust(bob, USD(0), 0)); + env(pay(bob, alice, USD(10))); + + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 0); + + // alice now is able to set asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 0); + } + + // Test that one cannot enable asfAllowClawback when + // featureClawback amendment is disabled + { + Env env(*this, features - featureClawback); + + Account alice{"alice"}; + + env.fund(XRP(1000), alice); + env.close(); + + env.require(nflags(alice, asfAllowClawback)); + + // alice attempts to set asfAllowClawback flag while amendment is + // disabled. no error is returned, but the flag remains to be unset. + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(nflags(alice, asfAllowClawback)); + + // now enable clawback amendment + env.enableFeature(featureClawback); + env.close(); + + // asfAllowClawback can be set + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + } + } + + void + testValidation(FeatureBitset features) + { + testcase("Validation"); + using namespace test::jtx; + + // Test that Clawback tx fails for the following: + // 1. when amendment is disabled + // 2. when asfAllowClawback flag has not been set + { + Env env(*this, features - featureClawback); + + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + env.require(nflags(alice, asfAllowClawback)); + + auto const USD = alice["USD"]; + + // alice issues 10 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(10))); + env.close(); + + env.require(balance(bob, alice["USD"](10))); + env.require(balance(alice, bob["USD"](-10))); + + // clawback fails because amendment is disabled + env(claw(alice, bob["USD"](5)), ter(temDISABLED)); + env.close(); + + // now enable clawback amendment + env.enableFeature(featureClawback); + env.close(); + + // clawback fails because asfAllowClawback has not been set + env(claw(alice, bob["USD"](5)), ter(tecNO_PERMISSION)); + env.close(); + + env.require(balance(bob, alice["USD"](10))); + env.require(balance(alice, bob["USD"](-10))); + } + + // Test that Clawback tx fails for the following: + // 1. invalid flag + // 2. negative STAmount + // 3. zero STAmount + // 4. XRP amount + // 5. `account` and `issuer` fields are same account + // 6. trustline has a balance of 0 + // 7. trustline does not exist + { + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + auto const USD = alice["USD"]; + + // alice issues 10 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(10))); + env.close(); + + env.require(balance(bob, alice["USD"](10))); + env.require(balance(alice, bob["USD"](-10))); + + // fails due to invalid flag + env(claw(alice, bob["USD"](5)), + txflags(0x00008000), + ter(temINVALID_FLAG)); + env.close(); + + // fails due to negative amount + env(claw(alice, bob["USD"](-5)), ter(temBAD_AMOUNT)); + env.close(); + + // fails due to zero amount + env(claw(alice, bob["USD"](0)), ter(temBAD_AMOUNT)); + env.close(); + + // fails because amount is in XRP + env(claw(alice, XRP(10)), ter(temBAD_AMOUNT)); + env.close(); + + // fails when `issuer` field in `amount` is not token holder + // NOTE: we are using the `issuer` field for the token holder + env(claw(alice, alice["USD"](5)), ter(temBAD_AMOUNT)); + env.close(); + + // bob pays alice back, trustline has a balance of 0 + env(pay(bob, alice, USD(10))); + env.close(); + + // bob still owns the trustline that has 0 balance + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 1); + env.require(balance(bob, alice["USD"](0))); + env.require(balance(alice, bob["USD"](0))); + + // clawback fails because because balance is 0 + env(claw(alice, bob["USD"](5)), ter(tecINSUFFICIENT_FUNDS)); + env.close(); + + // set the limit to default, which should delete the trustline + env(trust(bob, USD(0), 0)); + env.close(); + + // bob no longer owns the trustline + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 0); + + // clawback fails because trustline does not exist + env(claw(alice, bob["USD"](5)), ter(tecNO_LINE)); + env.close(); + } + } + + void + testPermission(FeatureBitset features) + { + // Checks the tx submitter has the permission to clawback. + // Exercises preclaim code + testcase("Permission"); + using namespace test::jtx; + + // Clawing back from an non-existent account returns error + { + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + + // bob's account is not funded and does not exist + env.fund(XRP(1000), alice); + env.close(); + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // bob, the token holder, does not exist + env(claw(alice, bob["USD"](5)), ter(terNO_ACCOUNT)); + env.close(); + } + + // Test that trustline cannot be clawed by someone who is + // not the issuer of the currency + { + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + Account cindy{"cindy"}; + + env.fund(XRP(1000), alice, bob, cindy); + env.close(); + + auto const USD = alice["USD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // cindy sets asfAllowClawback + env(fset(cindy, asfAllowClawback)); + env.close(); + env.require(flags(cindy, asfAllowClawback)); + + // alice issues 1000 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(1000))); + env.close(); + + env.require(balance(bob, alice["USD"](1000))); + env.require(balance(alice, bob["USD"](-1000))); + + // cindy tries to claw from bob, and fails because trustline does + // not exist + env(claw(cindy, bob["USD"](200)), ter(tecNO_LINE)); + env.close(); + } + + // When a trustline is created between issuer and holder, + // we must make sure the holder is unable to claw back from + // the issuer by impersonating the issuer account. + // + // This must be tested bidirectionally for both accounts because the + // issuer could be either the low or high account in the trustline + // object + { + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + auto const USD = alice["USD"]; + auto const CAD = bob["CAD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // bob sets asfAllowClawback + env(fset(bob, asfAllowClawback)); + env.close(); + env.require(flags(bob, asfAllowClawback)); + + // alice issues 10 USD to bob. + // bob then attempts to submit a clawback tx to claw USD from alice. + // this must FAIL, because bob is not the issuer for this + // trustline!!! + { + // bob creates a trustline with alice, and alice sends 10 USD to + // bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(10))); + env.close(); + + env.require(balance(bob, alice["USD"](10))); + env.require(balance(alice, bob["USD"](-10))); + + // bob cannot claw back USD from alice because he's not the + // issuer + env(claw(bob, alice["USD"](5)), ter(tecNO_PERMISSION)); + env.close(); + } + + // bob issues 10 CAD to alice. + // alice then attempts to submit a clawback tx to claw CAD from bob. + // this must FAIL, because alice is not the issuer for this + // trustline!!! + { + // alice creates a trustline with bob, and bob sends 10 CAD to + // alice + env.trust(CAD(1000), alice); + env(pay(bob, alice, CAD(10))); + env.close(); + + env.require(balance(bob, alice["CAD"](-10))); + env.require(balance(alice, bob["CAD"](10))); + + // alice cannot claw back CAD from bob because she's not the + // issuer + env(claw(alice, bob["CAD"](5)), ter(tecNO_PERMISSION)); + env.close(); + } + } + } + + void + testEnabled(FeatureBitset features) + { + testcase("Enable clawback"); + using namespace test::jtx; + + // Test that alice is able to successfully clawback tokens from bob + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + auto const USD = alice["USD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // alice issues 1000 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(1000))); + env.close(); + + env.require(balance(bob, alice["USD"](1000))); + env.require(balance(alice, bob["USD"](-1000))); + + // alice claws back 200 USD from bob + env(claw(alice, bob["USD"](200))); + env.close(); + + // bob should have 800 USD left + env.require(balance(bob, alice["USD"](800))); + env.require(balance(alice, bob["USD"](-800))); + + // alice claws back 800 USD from bob again + env(claw(alice, bob["USD"](800))); + env.close(); + + // trustline has a balance of 0 + env.require(balance(bob, alice["USD"](0))); + env.require(balance(alice, bob["USD"](0))); + } + + void + testMultiLine(FeatureBitset features) + { + // Test scenarios where multiple trustlines are involved + testcase("Multi line"); + using namespace test::jtx; + + // Both alice and bob issues their own "USD" to cindy. + // When alice and bob tries to claw back, they will only + // claw back from their respective trustline. + { + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + Account cindy{"cindy"}; + + env.fund(XRP(1000), alice, bob, cindy); + env.close(); + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // bob sets asfAllowClawback + env(fset(bob, asfAllowClawback)); + env.close(); + env.require(flags(bob, asfAllowClawback)); + + // alice sends 1000 USD to cindy + env.trust(alice["USD"](1000), cindy); + env(pay(alice, cindy, alice["USD"](1000))); + env.close(); + + // bob sends 1000 USD to cindy + env.trust(bob["USD"](1000), cindy); + env(pay(bob, cindy, bob["USD"](1000))); + env.close(); + + // alice claws back 200 USD from cindy + env(claw(alice, cindy["USD"](200))); + env.close(); + + // cindy has 800 USD left in alice's trustline after clawed by alice + env.require(balance(cindy, alice["USD"](800))); + env.require(balance(alice, cindy["USD"](-800))); + + // cindy still has 1000 USD in bob's trustline + env.require(balance(cindy, bob["USD"](1000))); + env.require(balance(bob, cindy["USD"](-1000))); + + // bob claws back 600 USD from cindy + env(claw(bob, cindy["USD"](600))); + env.close(); + + // cindy has 400 USD left in bob's trustline after clawed by bob + env.require(balance(cindy, bob["USD"](400))); + env.require(balance(bob, cindy["USD"](-400))); + + // cindy still has 800 USD in alice's trustline + env.require(balance(cindy, alice["USD"](800))); + env.require(balance(alice, cindy["USD"](-800))); + } + + // alice issues USD to both bob and cindy. + // when alice claws back from bob, only bob's USD balance is + // affected, and cindy's balance remains unchanged, and vice versa. + { + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + Account cindy{"cindy"}; + + env.fund(XRP(1000), alice, bob, cindy); + env.close(); + + auto const USD = alice["USD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // alice sends 600 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(600))); + env.close(); + + env.require(balance(alice, bob["USD"](-600))); + env.require(balance(bob, alice["USD"](600))); + + // alice sends 1000 USD to cindy + env.trust(USD(1000), cindy); + env(pay(alice, cindy, USD(1000))); + env.close(); + + env.require(balance(alice, cindy["USD"](-1000))); + env.require(balance(cindy, alice["USD"](1000))); + + // alice claws back 500 USD from bob + env(claw(alice, bob["USD"](500))); + env.close(); + + // bob's balance is reduced + env.require(balance(alice, bob["USD"](-100))); + env.require(balance(bob, alice["USD"](100))); + + // cindy's balance is unchanged + env.require(balance(alice, cindy["USD"](-1000))); + env.require(balance(cindy, alice["USD"](1000))); + + // alice claws back 300 USD from cindy + env(claw(alice, cindy["USD"](300))); + env.close(); + + // bob's balance is unchanged + env.require(balance(alice, bob["USD"](-100))); + env.require(balance(bob, alice["USD"](100))); + + // cindy's balance is reduced + env.require(balance(alice, cindy["USD"](-700))); + env.require(balance(cindy, alice["USD"](700))); + } + } + + void + testBidirectionalLine(FeatureBitset features) + { + testcase("Bidirectional line"); + using namespace test::jtx; + + // Test when both alice and bob issues USD to each other. + // This scenario creates only one trustline. + // In this case, both alice and bob can be seen as the "issuer" + // and they can send however many USDs to each other. + // We test that only the person who has a negative balance from their + // perspective is allowed to clawback + Env env(*this, features); + + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // bob sets asfAllowClawback + env(fset(bob, asfAllowClawback)); + env.close(); + env.require(flags(bob, asfAllowClawback)); + + // alice issues 1000 USD to bob + env.trust(alice["USD"](1000), bob); + env(pay(alice, bob, alice["USD"](1000))); + env.close(); + + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 1); + + // bob is the holder, and alice is the issuer + env.require(balance(bob, alice["USD"](1000))); + env.require(balance(alice, bob["USD"](-1000))); + + // bob issues 1500 USD to alice + env.trust(bob["USD"](1500), alice); + env(pay(bob, alice, bob["USD"](1500))); + env.close(); + + BEAST_EXPECT(ownerCount(env, alice) == 1); + BEAST_EXPECT(ownerCount(env, bob) == 1); + + // bob has negative 500 USD because bob issued 500 USD more than alice + // bob can now been seen as the issuer, while alice is the holder + env.require(balance(bob, alice["USD"](-500))); + env.require(balance(alice, bob["USD"](500))); + + // At this point, both alice and bob are the issuers of USD + // and can send USD to each other through one trustline + + // alice fails to clawback. Even though she is also an issuer, + // the trustline balance is positive from her perspective + env(claw(alice, bob["USD"](200)), ter(tecNO_PERMISSION)); + env.close(); + + // bob is able to successfully clawback from alice because + // the trustline balance is negative from his perspective + env(claw(bob, alice["USD"](200))); + env.close(); + + env.require(balance(bob, alice["USD"](-300))); + env.require(balance(alice, bob["USD"](300))); + + // alice pays bob 1000 USD + env(pay(alice, bob, alice["USD"](1000))); + env.close(); + + // bob's balance becomes positive from his perspective because + // alice issued more USD than the balance + env.require(balance(bob, alice["USD"](700))); + env.require(balance(alice, bob["USD"](-700))); + + // bob is now the holder and fails to clawback + env(claw(bob, alice["USD"](200)), ter(tecNO_PERMISSION)); + env.close(); + + // alice successfully claws back + env(claw(alice, bob["USD"](200))); + env.close(); + + env.require(balance(bob, alice["USD"](500))); + env.require(balance(alice, bob["USD"](-500))); + } + + void + testDeleteDefaultLine(FeatureBitset features) + { + testcase("Delete default trustline"); + using namespace test::jtx; + + // If clawback results the trustline to be default, + // trustline should be automatically deleted + Env env(*this, features); + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + auto const USD = alice["USD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // alice issues 1000 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(1000))); + env.close(); + + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 1); + + env.require(balance(bob, alice["USD"](1000))); + env.require(balance(alice, bob["USD"](-1000))); + + // set limit to default, + env(trust(bob, USD(0), 0)); + env.close(); + + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 1); + + // alice claws back full amount from bob, and should also delete + // trustline + env(claw(alice, bob["USD"](1000))); + env.close(); + + // bob no longer owns the trustline because it was deleted + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 0); + } + + void + testFrozenLine(FeatureBitset features) + { + testcase("Frozen trustline"); + using namespace test::jtx; + + // Claws back from frozen trustline + // and the trustline should remain frozen + Env env(*this, features); + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + auto const USD = alice["USD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // alice issues 1000 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(1000))); + env.close(); + + env.require(balance(bob, alice["USD"](1000))); + env.require(balance(alice, bob["USD"](-1000))); + + // freeze trustline + env(trust(alice, bob["USD"](0), tfSetFreeze)); + env.close(); + + // alice claws back 200 USD from bob + env(claw(alice, bob["USD"](200))); + env.close(); + + // bob should have 800 USD left + env.require(balance(bob, alice["USD"](800))); + env.require(balance(alice, bob["USD"](-800))); + + // trustline remains frozen + BEAST_EXPECT(getLineFreezeFlag(env, alice, bob, USD.currency)); + } + + void + testAmountExceedsAvailable(FeatureBitset features) + { + testcase("Amount exceeds available"); + using namespace test::jtx; + + // When alice tries to claw back an amount that is greater + // than what bob holds, only the max available balance is clawed + Env env(*this, features); + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + auto const USD = alice["USD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // alice issues 1000 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(1000))); + env.close(); + + env.require(balance(bob, alice["USD"](1000))); + env.require(balance(alice, bob["USD"](-1000))); + + // alice tries to claw back 2000 USD + env(claw(alice, bob["USD"](2000))); + env.close(); + + // check alice and bob's balance. + // alice was only able to claw back 1000 USD at maximum + env.require(balance(bob, alice["USD"](0))); + env.require(balance(alice, bob["USD"](0))); + + // bob still owns the trustline because trustline is not in default + // state + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 1); + + // set limit to default, + env(trust(bob, USD(0), 0)); + env.close(); + + // verify that bob's trustline was deleted + BEAST_EXPECT(ownerCount(env, alice) == 0); + BEAST_EXPECT(ownerCount(env, bob) == 0); + } + + void + testTickets(FeatureBitset features) + { + testcase("Tickets"); + using namespace test::jtx; + + // Tests clawback with tickets + Env env(*this, features); + Account alice{"alice"}; + Account bob{"bob"}; + + env.fund(XRP(1000), alice, bob); + env.close(); + + auto const USD = alice["USD"]; + + // alice sets asfAllowClawback + env(fset(alice, asfAllowClawback)); + env.close(); + env.require(flags(alice, asfAllowClawback)); + + // alice issues 100 USD to bob + env.trust(USD(1000), bob); + env(pay(alice, bob, USD(100))); + env.close(); + + env.require(balance(bob, alice["USD"](100))); + env.require(balance(alice, bob["USD"](-100))); + + // alice creates 10 tickets + std::uint32_t ticketCnt = 10; + std::uint32_t aliceTicketSeq{env.seq(alice) + 1}; + env(ticket::create(alice, ticketCnt)); + env.close(); + std::uint32_t const aliceSeq{env.seq(alice)}; + BEAST_EXPECT(ticketCount(env, alice) == ticketCnt); + BEAST_EXPECT(ownerCount(env, alice) == ticketCnt); + + while (ticketCnt > 0) + { + // alice claws back 5 USD using a ticket + env(claw(alice, bob["USD"](5)), ticket::use(aliceTicketSeq++)); + env.close(); + + ticketCnt--; + BEAST_EXPECT(ticketCount(env, alice) == ticketCnt); + BEAST_EXPECT(ownerCount(env, alice) == ticketCnt); + } + + // alice clawed back 50 USD total, trustline has 50 USD remaining + env.require(balance(bob, alice["USD"](50))); + env.require(balance(alice, bob["USD"](-50))); + + // Verify that the account sequence numbers did not advance. + BEAST_EXPECT(env.seq(alice) == aliceSeq); + } + + void + testWithFeats(FeatureBitset features) + { + testAllowClawbackFlag(features); + testValidation(features); + testPermission(features); + testEnabled(features); + testMultiLine(features); + testBidirectionalLine(features); + testDeleteDefaultLine(features); + testFrozenLine(features); + testAmountExceedsAvailable(features); + testTickets(features); + } + +public: + void + run() override + { + using namespace test::jtx; + FeatureBitset const all{supported_amendments()}; + + testWithFeats(all); + } +}; + +BEAST_DEFINE_TESTSUITE(Clawback, app, ripple); +} // namespace ripple diff --git a/src/test/jtx/flags.h b/src/test/jtx/flags.h index a9ecaf8e2e0..a6f4345cfd2 100644 --- a/src/test/jtx/flags.h +++ b/src/test/jtx/flags.h @@ -80,6 +80,9 @@ class flags_helper case asfDepositAuth: mask_ |= lsfDepositAuth; break; + case asfAllowClawback: + mask_ |= lsfAllowClawback; + break; default: Throw("unknown flag"); } diff --git a/src/test/jtx/impl/trust.cpp b/src/test/jtx/impl/trust.cpp index 4fd0ad5967d..cce4657e025 100644 --- a/src/test/jtx/impl/trust.cpp +++ b/src/test/jtx/impl/trust.cpp @@ -59,6 +59,17 @@ trust( return jv; } +Json::Value +claw(Account const& account, STAmount const& amount) +{ + Json::Value jv; + jv[jss::Account] = account.human(); + jv[jss::Amount] = amount.getJson(JsonOptions::none); + jv[jss::TransactionType] = jss::Clawback; + + return jv; +} + } // namespace jtx } // namespace test } // namespace ripple diff --git a/src/test/jtx/trust.h b/src/test/jtx/trust.h index ba0bc995914..5b6dd78b3cd 100644 --- a/src/test/jtx/trust.h +++ b/src/test/jtx/trust.h @@ -40,6 +40,9 @@ trust( Account const& peer, std::uint32_t flags); +Json::Value +claw(Account const& account, STAmount const& amount); + } // namespace jtx } // namespace test } // namespace ripple diff --git a/src/test/rpc/AccountSet_test.cpp b/src/test/rpc/AccountSet_test.cpp index f935e0a846f..fc2b4ffe116 100644 --- a/src/test/rpc/AccountSet_test.cpp +++ b/src/test/rpc/AccountSet_test.cpp @@ -93,6 +93,12 @@ class AccountSet_test : public beast::unit_test::suite // and are tested elsewhere continue; } + if (flag == asfAllowClawback) + { + // The asfAllowClawback flag can't be cleared. It is tested + // elsewhere. + continue; + } if (std::find(goodFlags.begin(), goodFlags.end(), flag) != goodFlags.end()) From beba87129e6de43576b1f90a71370b16a081f40e Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Mon, 26 Jun 2023 14:24:00 -0700 Subject: [PATCH 180/508] Set version to 1.12.0-b1 --- src/ripple/protocol/impl/BuildInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 734773fa03e..dbe59c1763f 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.11.0" +char const* const versionString = "1.12.0-b1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) From 534a36536cf25d0b1307ee793405f76d2385c463 Mon Sep 17 00:00:00 2001 From: Denis Angell Date: Tue, 27 Jun 2023 02:50:03 -0400 Subject: [PATCH 181/508] refactor: replace hand-rolled lexicalCast (#4473) Replace hand-rolled code with std::from_chars for better maintainability. The C++ std::from_chars function is intended to be as fast as possible, so it is unlikely to be slower than the code it replaces. This change is a net gain because it reduces the amount of hand-rolled code. --- .../app/rdb/backend/detail/impl/Node.cpp | 13 +- src/ripple/beast/core/LexicalCast.h | 140 +++--------------- 2 files changed, 22 insertions(+), 131 deletions(-) diff --git a/src/ripple/app/rdb/backend/detail/impl/Node.cpp b/src/ripple/app/rdb/backend/detail/impl/Node.cpp index b3b354ebe72..8948360a3ad 100644 --- a/src/ripple/app/rdb/backend/detail/impl/Node.cpp +++ b/src/ripple/app/rdb/backend/detail/impl/Node.cpp @@ -518,7 +518,7 @@ getHashByIndex(soci::session& session, LedgerIndex ledgerIndex) std::string sql = "SELECT LedgerHash FROM Ledgers INDEXED BY SeqLedger WHERE LedgerSeq='"; - sql.append(beast::lexicalCastThrow(ledgerIndex)); + sql.append(std::to_string(ledgerIndex)); sql.append("';"); std::string hash; @@ -581,9 +581,9 @@ getHashesByIndex( { std::string sql = "SELECT LedgerSeq,LedgerHash,PrevHash FROM Ledgers WHERE LedgerSeq >= "; - sql.append(beast::lexicalCastThrow(minSeq)); + sql.append(std::to_string(minSeq)); sql.append(" AND LedgerSeq <= "); - sql.append(beast::lexicalCastThrow(maxSeq)); + sql.append(std::to_string(maxSeq)); sql.append(";"); std::uint64_t ls; @@ -761,8 +761,7 @@ transactionsSQL( boost::format("SELECT %s FROM AccountTransactions " "WHERE Account = '%s' %s %s LIMIT %u, %u;") % selection % toBase58(options.account) % maxClause % minClause % - beast::lexicalCastThrow(options.offset) % - beast::lexicalCastThrow(numberOfResults)); + options.offset % numberOfResults); else sql = boost::str( boost::format( @@ -775,9 +774,7 @@ transactionsSQL( "LIMIT %u, %u;") % selection % toBase58(options.account) % maxClause % minClause % (descending ? "DESC" : "ASC") % (descending ? "DESC" : "ASC") % - (descending ? "DESC" : "ASC") % - beast::lexicalCastThrow(options.offset) % - beast::lexicalCastThrow(numberOfResults)); + (descending ? "DESC" : "ASC") % options.offset % numberOfResults); JLOG(j.trace()) << "txSQL query: " << sql; return sql; } diff --git a/src/ripple/beast/core/LexicalCast.h b/src/ripple/beast/core/LexicalCast.h index de1a6a396e9..f4c78341b91 100644 --- a/src/ripple/beast/core/LexicalCast.h +++ b/src/ripple/beast/core/LexicalCast.h @@ -23,8 +23,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -32,111 +32,10 @@ #include #include -#include - namespace beast { namespace detail { -#if BOOST_COMP_MSVC -#pragma warning(push) -#pragma warning(disable : 4800) -#pragma warning(disable : 4804) -#endif - -template -bool -parse_integral(Int& num, FwdIt first, FwdIt last, Accumulator accumulator) -{ - num = 0; - - if (first == last) - return false; - - while (first != last) - { - auto const c = *first++; - if (c < '0' || c > '9') - return false; - if (!accumulator(num, Int(c - '0'))) - return false; - } - - return true; -} - -template -bool -parse_negative_integral(Int& num, FwdIt first, FwdIt last) -{ - Int limit_value = std::numeric_limits::min() / 10; - Int limit_digit = std::numeric_limits::min() % 10; - - if (limit_digit < 0) - limit_digit = -limit_digit; - - return parse_integral( - num, first, last, [limit_value, limit_digit](Int& value, Int digit) { - assert((digit >= 0) && (digit <= 9)); - if (value < limit_value || - (value == limit_value && digit > limit_digit)) - return false; - value = (value * 10) - digit; - return true; - }); -} - -template -bool -parse_positive_integral(Int& num, FwdIt first, FwdIt last) -{ - Int limit_value = std::numeric_limits::max() / 10; - Int limit_digit = std::numeric_limits::max() % 10; - - return parse_integral( - num, first, last, [limit_value, limit_digit](Int& value, Int digit) { - assert((digit >= 0) && (digit <= 9)); - if (value > limit_value || - (value == limit_value && digit > limit_digit)) - return false; - value = (value * 10) + digit; - return true; - }); -} - -template -bool -parseSigned(IntType& result, FwdIt first, FwdIt last) -{ - static_assert( - std::is_signed::value, - "You may only call parseSigned with a signed integral type."); - - if (first != last && *first == '-') - return parse_negative_integral(result, first + 1, last); - - if (first != last && *first == '+') - return parse_positive_integral(result, first + 1, last); - - return parse_positive_integral(result, first, last); -} - -template -bool -parseUnsigned(UIntType& result, FwdIt first, FwdIt last) -{ - static_assert( - std::is_unsigned::value, - "You may only call parseUnsigned with an unsigned integral type."); - - if (first != last && *first == '+') - return parse_positive_integral(result, first + 1, last); - - return parse_positive_integral(result, first, last); -} - -//------------------------------------------------------------------------------ - // These specializatons get called by the non-member functions to do the work template struct LexicalCast; @@ -148,7 +47,7 @@ struct LexicalCast explicit LexicalCast() = default; template - std::enable_if_t::value, bool> + std::enable_if_t, bool> operator()(std::string& out, Arithmetic in) { out = std::to_string(in); @@ -156,7 +55,7 @@ struct LexicalCast } template - std::enable_if_t::value, bool> + std::enable_if_t, bool> operator()(std::string& out, Enumeration in) { out = std::to_string( @@ -172,21 +71,24 @@ struct LexicalCast explicit LexicalCast() = default; static_assert( - std::is_integral::value, + std::is_integral_v, "beast::LexicalCast can only be used with integral types"); template - std::enable_if_t::value, bool> + std::enable_if_t< + std::is_integral_v && !std::is_same_v, + bool> operator()(Integral& out, std::string const& in) const { - return parseUnsigned(out, in.begin(), in.end()); - } + auto first = in.data(); + auto last = in.data() + in.size(); - template - std::enable_if_t::value, bool> - operator()(Integral& out, std::string const& in) const - { - return parseSigned(out, in.begin(), in.end()); + if (first != last && *first == '+') + ++first; + + auto ret = std::from_chars(first, last, out); + + return ret.ec == std::errc() && ret.ptr == last; } bool @@ -242,10 +144,6 @@ struct LexicalCast } }; -#if BOOST_COMP_MSVC -#pragma warning(pop) -#endif - } // namespace detail //------------------------------------------------------------------------------ @@ -278,9 +176,7 @@ template Out lexicalCastThrow(In in) { - Out out; - - if (lexicalCastChecked(out, in)) + if (Out out; lexicalCastChecked(out, in)) return out; throw BadLexicalCast(); @@ -295,9 +191,7 @@ template Out lexicalCast(In in, Out defaultValue = Out()) { - Out out; - - if (lexicalCastChecked(out, in)) + if (Out out; lexicalCastChecked(out, in)) return out; return defaultValue; From 6b4437db3951a60d53ec3e4c165b084500a4a4c5 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Tue, 27 Jun 2023 03:23:52 -0500 Subject: [PATCH 182/508] Fix package definition for Conan (#4485) Fix the libxrpl library target for consumers using Conan. * Fix installation issues and update includes. * Update requirements in the Conan package info. * libxrpl requires openssl::crypto. (Conan is a software package manager for C++.) --- Builds/CMake/RippledCore.cmake | 57 ++++++++++++++++--- conanfile.py | 6 +- src/ripple/protocol/impl/secp256k1.h | 2 +- src/secp256k1/CMakeLists.txt | 2 +- src/secp256k1/src/bench_ecdh.c | 4 +- src/secp256k1/src/bench_internal.c | 2 +- src/secp256k1/src/bench_recover.c | 4 +- src/secp256k1/src/bench_schnorr_verify.c | 4 +- src/secp256k1/src/bench_sign.c | 2 +- src/secp256k1/src/bench_verify.c | 2 +- src/secp256k1/src/gen_context.c | 2 +- .../src/java/org_bitcoin_NativeSecp256k1.c | 6 +- .../src/java/org_bitcoin_NativeSecp256k1.h | 2 +- .../src/java/org_bitcoin_Secp256k1Context.c | 2 +- .../src/java/org_bitcoin_Secp256k1Context.h | 2 +- src/secp256k1/src/modules/ecdh/main_impl.h | 2 +- .../src/modules/recovery/main_impl.h | 2 +- src/secp256k1/src/secp256k1.c | 2 +- src/secp256k1/src/tests.c | 2 +- src/secp256k1/src/tests_exhaustive.c | 4 +- 20 files changed, 79 insertions(+), 32 deletions(-) diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 3b77e35c8c0..9f5813297b2 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -149,31 +149,54 @@ target_link_libraries (xrpl_core #]=================================] install ( FILES + src/ripple/basics/algorithm.h + src/ripple/basics/Archive.h src/ripple/basics/base64.h + src/ripple/basics/base_uint.h + src/ripple/basics/BasicConfig.h src/ripple/basics/Blob.h src/ripple/basics/Buffer.h + src/ripple/basics/ByteUtilities.h + src/ripple/basics/chrono.h + src/ripple/basics/comparators.h + src/ripple/basics/CompressionAlgorithms.h + src/ripple/basics/contract.h src/ripple/basics/CountedObject.h + src/ripple/basics/DecayingSample.h + src/ripple/basics/Expected.h + src/ripple/basics/FeeUnits.h src/ripple/basics/FileUtilities.h + src/ripple/basics/hardened_hash.h src/ripple/basics/IOUAmount.h + src/ripple/basics/join.h + src/ripple/basics/KeyCache.h src/ripple/basics/LocalValue.h src/ripple/basics/Log.h + src/ripple/basics/make_SSLContext.h src/ripple/basics/MathUtilities.h + src/ripple/basics/mulDiv.h src/ripple/basics/Number.h + src/ripple/basics/partitioned_unordered_map.h + src/ripple/basics/PerfLog.h + src/ripple/basics/random.h + src/ripple/basics/RangeSet.h + src/ripple/basics/README.md + src/ripple/basics/ResolverAsio.h + src/ripple/basics/Resolver.h src/ripple/basics/safe_cast.h + src/ripple/basics/scope.h + src/ripple/basics/SHAMapHash.h src/ripple/basics/Slice.h src/ripple/basics/spinlock.h + src/ripple/basics/strHex.h src/ripple/basics/StringUtilities.h + src/ripple/basics/TaggedCache.h + src/ripple/basics/tagged_integer.h src/ripple/basics/ThreadSafetyAnalysis.h src/ripple/basics/ToString.h src/ripple/basics/UnorderedContainers.h + src/ripple/basics/UptimeClock.h src/ripple/basics/XRPAmount.h - src/ripple/basics/algorithm.h - src/ripple/basics/base_uint.h - src/ripple/basics/chrono.h - src/ripple/basics/contract.h - src/ripple/basics/FeeUnits.h - src/ripple/basics/hardened_hash.h - src/ripple/basics/strHex.h DESTINATION include/ripple/basics) install ( FILES @@ -287,6 +310,26 @@ install ( src/ripple/beast/unit_test.h src/ripple/beast/xor_shift_engine.h DESTINATION include/ripple/beast) +install ( + FILES + src/ripple/beast/unit_test/amount.hpp + src/ripple/beast/unit_test/dstream.hpp + src/ripple/beast/unit_test/global_suites.hpp + src/ripple/beast/unit_test/main.cpp + src/ripple/beast/unit_test/match.hpp + src/ripple/beast/unit_test/recorder.hpp + src/ripple/beast/unit_test/reporter.hpp + src/ripple/beast/unit_test/results.hpp + src/ripple/beast/unit_test/runner.hpp + src/ripple/beast/unit_test/suite.hpp + src/ripple/beast/unit_test/suite_info.hpp + src/ripple/beast/unit_test/suite_list.hpp + src/ripple/beast/unit_test/thread.hpp + DESTINATION include/ripple/beast/unit_test) +install ( + FILES + src/ripple/beast/unit_test/detail/const_container.hpp + DESTINATION include/ripple/beast/unit_test/detail) install ( FILES src/ripple/beast/utility/Journal.h diff --git a/conanfile.py b/conanfile.py index 6e616b3c9f4..9c135d9e445 100644 --- a/conanfile.py +++ b/conanfile.py @@ -151,4 +151,8 @@ def package_info(self): 'libsecp256k1.a', ] libxrpl.includedirs = ['include'] - libxrpl.requires = ['boost::boost'] + libxrpl.requires = [ + 'boost::boost', + 'openssl::crypto', + 'date::date', + ] diff --git a/src/ripple/protocol/impl/secp256k1.h b/src/ripple/protocol/impl/secp256k1.h index 8877fff28cf..36f4d39a133 100644 --- a/src/ripple/protocol/impl/secp256k1.h +++ b/src/ripple/protocol/impl/secp256k1.h @@ -20,7 +20,7 @@ #ifndef RIPPLE_PROTOCOL_SECP256K1_H_INCLUDED #define RIPPLE_PROTOCOL_SECP256K1_H_INCLUDED -#include +#include namespace ripple { diff --git a/src/secp256k1/CMakeLists.txt b/src/secp256k1/CMakeLists.txt index 6b41b66c8ab..304ae00cb5f 100644 --- a/src/secp256k1/CMakeLists.txt +++ b/src/secp256k1/CMakeLists.txt @@ -24,7 +24,7 @@ target_compile_definitions(secp256k1 PRIVATE ) target_include_directories(secp256k1 PUBLIC $ - PRIVATE $ + PUBLIC $ ) target_compile_options(secp256k1 PRIVATE $<$:-wd4319> diff --git a/src/secp256k1/src/bench_ecdh.c b/src/secp256k1/src/bench_ecdh.c index cde5e2dbb4e..0cf47f7f652 100644 --- a/src/secp256k1/src/bench_ecdh.c +++ b/src/secp256k1/src/bench_ecdh.c @@ -6,8 +6,8 @@ #include -#include "include/secp256k1.h" -#include "include/secp256k1_ecdh.h" +#include "secp256k1.h" +#include "secp256k1_ecdh.h" #include "util.h" #include "bench.h" diff --git a/src/secp256k1/src/bench_internal.c b/src/secp256k1/src/bench_internal.c index 0809f77bda1..66b690109b7 100644 --- a/src/secp256k1/src/bench_internal.c +++ b/src/secp256k1/src/bench_internal.c @@ -5,7 +5,7 @@ **********************************************************************/ #include -#include "include/secp256k1.h" +#include "secp256k1.h" #include "util.h" #include "hash_impl.h" diff --git a/src/secp256k1/src/bench_recover.c b/src/secp256k1/src/bench_recover.c index 6489378cc64..e59120333ae 100644 --- a/src/secp256k1/src/bench_recover.c +++ b/src/secp256k1/src/bench_recover.c @@ -4,8 +4,8 @@ * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_recovery.h" +#include "secp256k1.h" +#include "secp256k1_recovery.h" #include "util.h" #include "bench.h" diff --git a/src/secp256k1/src/bench_schnorr_verify.c b/src/secp256k1/src/bench_schnorr_verify.c index 5f137dda23e..f5c3312d8a5 100644 --- a/src/secp256k1/src/bench_schnorr_verify.c +++ b/src/secp256k1/src/bench_schnorr_verify.c @@ -7,8 +7,8 @@ #include #include -#include "include/secp256k1.h" -#include "include/secp256k1_schnorr.h" +#include "secp256k1.h" +#include "secp256k1_schnorr.h" #include "util.h" #include "bench.h" diff --git a/src/secp256k1/src/bench_sign.c b/src/secp256k1/src/bench_sign.c index ed7224d757e..2ff60e2a14b 100644 --- a/src/secp256k1/src/bench_sign.c +++ b/src/secp256k1/src/bench_sign.c @@ -4,7 +4,7 @@ * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ -#include "include/secp256k1.h" +#include "secp256k1.h" #include "util.h" #include "bench.h" diff --git a/src/secp256k1/src/bench_verify.c b/src/secp256k1/src/bench_verify.c index 418defa0aa2..ed454e90baf 100644 --- a/src/secp256k1/src/bench_verify.c +++ b/src/secp256k1/src/bench_verify.c @@ -7,7 +7,7 @@ #include #include -#include "include/secp256k1.h" +#include "secp256k1.h" #include "util.h" #include "bench.h" diff --git a/src/secp256k1/src/gen_context.c b/src/secp256k1/src/gen_context.c index 1835fd491d1..7247fbee0ef 100644 --- a/src/secp256k1/src/gen_context.c +++ b/src/secp256k1/src/gen_context.c @@ -7,7 +7,7 @@ #define USE_BASIC_CONFIG 1 #include "basic-config.h" -#include "include/secp256k1.h" +#include "secp256k1.h" #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" diff --git a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c index bcef7b32ce3..3970b655a49 100644 --- a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c +++ b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.c @@ -2,9 +2,9 @@ #include #include #include "org_bitcoin_NativeSecp256k1.h" -#include "include/secp256k1.h" -#include "include/secp256k1_ecdh.h" -#include "include/secp256k1_recovery.h" +#include "secp256k1.h" +#include "secp256k1_ecdh.h" +#include "secp256k1_recovery.h" SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone diff --git a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h index fe613c9e9e7..665cf4dd5c7 100644 --- a/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h +++ b/src/secp256k1/src/java/org_bitcoin_NativeSecp256k1.h @@ -1,6 +1,6 @@ /* DO NOT EDIT THIS FILE - it is machine generated */ #include -#include "include/secp256k1.h" +#include "secp256k1.h" /* Header for class org_bitcoin_NativeSecp256k1 */ #ifndef _Included_org_bitcoin_NativeSecp256k1 diff --git a/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c b/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c index a52939e7e7d..81a87a10e93 100644 --- a/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c +++ b/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.c @@ -1,7 +1,7 @@ #include #include #include "org_bitcoin_Secp256k1Context.h" -#include "include/secp256k1.h" +#include "secp256k1.h" SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context (JNIEnv* env, jclass classObject) diff --git a/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h b/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h index 0d2bc84b7f3..f1ed91ad87d 100644 --- a/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h +++ b/src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h @@ -1,6 +1,6 @@ /* DO NOT EDIT THIS FILE - it is machine generated */ #include -#include "include/secp256k1.h" +#include "secp256k1.h" /* Header for class org_bitcoin_Secp256k1Context */ #ifndef _Included_org_bitcoin_Secp256k1Context diff --git a/src/secp256k1/src/modules/ecdh/main_impl.h b/src/secp256k1/src/modules/ecdh/main_impl.h index 9e30fb73dd7..bd22b2fd1ca 100644 --- a/src/secp256k1/src/modules/ecdh/main_impl.h +++ b/src/secp256k1/src/modules/ecdh/main_impl.h @@ -7,7 +7,7 @@ #ifndef _SECP256K1_MODULE_ECDH_MAIN_ #define _SECP256K1_MODULE_ECDH_MAIN_ -#include "include/secp256k1_ecdh.h" +#include "secp256k1_ecdh.h" #include "ecmult_const_impl.h" int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *result, const secp256k1_pubkey *point, const unsigned char *scalar) { diff --git a/src/secp256k1/src/modules/recovery/main_impl.h b/src/secp256k1/src/modules/recovery/main_impl.h index c6fbe239813..c2c66e3990d 100755 --- a/src/secp256k1/src/modules/recovery/main_impl.h +++ b/src/secp256k1/src/modules/recovery/main_impl.h @@ -7,7 +7,7 @@ #ifndef _SECP256K1_MODULE_RECOVERY_MAIN_ #define _SECP256K1_MODULE_RECOVERY_MAIN_ -#include "include/secp256k1_recovery.h" +#include "secp256k1_recovery.h" static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) { (void)ctx; diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c index fb8b882faaf..1db0b6b6a0a 100644 --- a/src/secp256k1/src/secp256k1.c +++ b/src/secp256k1/src/secp256k1.c @@ -4,7 +4,7 @@ * file COPYING or http://www.opensource.org/licenses/mit-license.php.* **********************************************************************/ -#include "include/secp256k1.h" +#include "secp256k1.h" #include "util.h" #include "num_impl.h" diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c index 9ae7d302813..1ca75297b54 100644 --- a/src/secp256k1/src/tests.c +++ b/src/secp256k1/src/tests.c @@ -14,7 +14,7 @@ #include #include "secp256k1.c" -#include "include/secp256k1.h" +#include "secp256k1.h" #include "testrand_impl.h" #ifdef ENABLE_OPENSSL_TESTS diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c index b040bb0733d..645fb9b3022 100644 --- a/src/secp256k1/src/tests_exhaustive.c +++ b/src/secp256k1/src/tests_exhaustive.c @@ -21,14 +21,14 @@ #define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */ #endif -#include "include/secp256k1.h" +#include "secp256k1.h" #include "group.h" #include "secp256k1.c" #include "testrand_impl.h" #ifdef ENABLE_MODULE_RECOVERY #include "src/modules/recovery/main_impl.h" -#include "include/secp256k1_recovery.h" +#include "secp256k1_recovery.h" #endif /** stolen from tests.c */ From 0e983528e19567a103f7ff48093481130c570070 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Tue, 27 Jun 2023 17:52:15 -0700 Subject: [PATCH 183/508] fix: remove deprecated fields in `ledger` method (#4244) Remove deprecated fields from the ledger command: * accepted * hash (use ledger_hash instead) * seqNum (use ledger_index instead) * totalCoins (use total_coins instead) Update SHAMapStore unit tests to use `jss:ledger_hash` instead of the deprecated `hash` field. Fix #3214 --- src/ripple/app/ledger/impl/LedgerToJson.cpp | 5 ----- src/ripple/protocol/jss.h | 2 -- src/test/app/SHAMapStore_test.cpp | 9 +++++---- src/test/rpc/LedgerRPC_test.cpp | 8 -------- 4 files changed, 5 insertions(+), 19 deletions(-) diff --git a/src/ripple/app/ledger/impl/LedgerToJson.cpp b/src/ripple/app/ledger/impl/LedgerToJson.cpp index 558757d511b..8234ba16f9e 100644 --- a/src/ripple/app/ledger/impl/LedgerToJson.cpp +++ b/src/ripple/app/ledger/impl/LedgerToJson.cpp @@ -53,7 +53,6 @@ fillJson(Object& json, bool closed, LedgerInfo const& info, bool bFull) { json[jss::parent_hash] = to_string(info.parentHash); json[jss::ledger_index] = to_string(info.seq); - json[jss::seqNum] = to_string(info.seq); // DEPRECATED if (closed) { @@ -70,10 +69,6 @@ fillJson(Object& json, bool closed, LedgerInfo const& info, bool bFull) json[jss::account_hash] = to_string(info.accountHash); json[jss::total_coins] = to_string(info.drops); - // These next three are DEPRECATED. - json[jss::hash] = to_string(info.hash); - json[jss::totalCoins] = to_string(info.drops); - json[jss::accepted] = closed; json[jss::close_flags] = info.closeFlags; // Always show fields that contribute to the ledger hash diff --git a/src/ripple/protocol/jss.h b/src/ripple/protocol/jss.h index 4612bd7811d..44508a8a781 100644 --- a/src/ripple/protocol/jss.h +++ b/src/ripple/protocol/jss.h @@ -528,7 +528,6 @@ JSS(send_max); // in: PathRequest, RipplePathFind JSS(seq); // in: LedgerEntry; // out: NetworkOPs, RPCSub, AccountOffers, // ValidatorList, ValidatorInfo, Manifest -JSS(seqNum); // out: LedgerToJson JSS(sequence); // in: UNL JSS(sequence_count); // out: AccountInfo JSS(server_domain); // out: NetworkOPs @@ -583,7 +582,6 @@ JSS(timeouts); // out: InboundLedger JSS(track); // out: PeerImp JSS(traffic); // out: Overlay JSS(total); // out: counters -JSS(totalCoins); // out: LedgerToJson JSS(total_bytes_recv); // out: Peers JSS(total_bytes_sent); // out: Peers JSS(total_coins); // out: LedgerToJson diff --git a/src/test/app/SHAMapStore_test.cpp b/src/test/app/SHAMapStore_test.cpp index 010c83a4210..8a47b186957 100644 --- a/src/test/app/SHAMapStore_test.cpp +++ b/src/test/app/SHAMapStore_test.cpp @@ -85,7 +85,8 @@ class SHAMapStore_test : public beast::unit_test::suite const std::string outTxHash = to_string(info.txHash); auto const& ledger = json[jss::result][jss::ledger]; - return outHash == ledger[jss::hash].asString() && outSeq == seq && + return outHash == ledger[jss::ledger_hash].asString() && + outSeq == seq && outParentHash == ledger[jss::parent_hash].asString() && outDrops == ledger[jss::total_coins].asString() && outCloseTime == ledger[jss::close_time].asUInt() && @@ -111,9 +112,9 @@ class SHAMapStore_test : public beast::unit_test::suite BEAST_EXPECT( json.isMember(jss::result) && json[jss::result].isMember(jss::ledger) && - json[jss::result][jss::ledger].isMember(jss::hash) && - json[jss::result][jss::ledger][jss::hash].isString()); - return json[jss::result][jss::ledger][jss::hash].asString(); + json[jss::result][jss::ledger].isMember(jss::ledger_hash) && + json[jss::result][jss::ledger][jss::ledger_hash].isString()); + return json[jss::result][jss::ledger][jss::ledger_hash].asString(); } void diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 6644e15e959..8a09f2a23b6 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -78,10 +78,6 @@ class LedgerRPC_test : public beast::unit_test::suite env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::ledger][jss::closed] == true); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "1"); - BEAST_EXPECT(jrr[jss::ledger][jss::accepted] == true); - BEAST_EXPECT( - jrr[jss::ledger][jss::totalCoins] == - env.balance(env.master).value().getText()); } { @@ -91,10 +87,6 @@ class LedgerRPC_test : public beast::unit_test::suite env.rpc("json", "ledger", to_string(jvParams))[jss::result]; BEAST_EXPECT(jrr[jss::ledger][jss::closed] == true); BEAST_EXPECT(jrr[jss::ledger][jss::ledger_index] == "1"); - BEAST_EXPECT(jrr[jss::ledger][jss::accepted] == true); - BEAST_EXPECT( - jrr[jss::ledger][jss::totalCoins] == - env.balance(env.master).value().getText()); } { From 11e914fbe97184e0566085948b04942a8301ac36 Mon Sep 17 00:00:00 2001 From: Scott Schurr Date: Wed, 28 Jun 2023 03:57:52 +0300 Subject: [PATCH 184/508] refactor: rename ServerHandlerImp to ServerHandler (#4516) Rename `ServerHandlerImp` to `ServerHandler`. There was no other ServerHandler definition despite the existence of a header suggesting that there was. This resolves a piece of historical confusion in the code, which was identified during a code review. The changes in the diff may look more extensive than they actually are. The contents of `impl/ServerHandlerImp.h` were merged into `ServerHandler.h`, making the latter file appear to have undergone significant modifications. However, this is a non-breaking refactor that only restructures code. --- Builds/CMake/RippledCore.cmake | 2 +- src/ripple/rpc/ServerHandler.h | 199 ++++++++++++++++- ...ServerHandlerImp.cpp => ServerHandler.cpp} | 43 ++-- src/ripple/rpc/impl/ServerHandlerImp.h | 201 ------------------ 4 files changed, 215 insertions(+), 230 deletions(-) rename src/ripple/rpc/impl/{ServerHandlerImp.cpp => ServerHandler.cpp} (98%) delete mode 100644 src/ripple/rpc/impl/ServerHandlerImp.h diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 9f5813297b2..53a5e61a7b7 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -692,7 +692,7 @@ target_sources (rippled PRIVATE src/ripple/rpc/impl/RPCHandler.cpp src/ripple/rpc/impl/RPCHelpers.cpp src/ripple/rpc/impl/Role.cpp - src/ripple/rpc/impl/ServerHandlerImp.cpp + src/ripple/rpc/impl/ServerHandler.cpp src/ripple/rpc/impl/ShardArchiveHandler.cpp src/ripple/rpc/impl/ShardVerificationScheduler.cpp src/ripple/rpc/impl/Status.cpp diff --git a/src/ripple/rpc/ServerHandler.h b/src/ripple/rpc/ServerHandler.h index 54cccdff64e..07fb61362a0 100644 --- a/src/ripple/rpc/ServerHandler.h +++ b/src/ripple/rpc/ServerHandler.h @@ -20,21 +20,200 @@ #ifndef RIPPLE_RPC_SERVERHANDLER_H_INCLUDED #define RIPPLE_RPC_SERVERHANDLER_H_INCLUDED -#include -#include -#include +#include #include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include namespace ripple { -using ServerHandler = ServerHandlerImp; +inline bool +operator<(Port const& lhs, Port const& rhs) +{ + return lhs.name < rhs.name; +} + +class ServerHandler +{ +public: + struct Setup + { + explicit Setup() = default; + + std::vector ports; + + // Memberspace + struct client_t + { + explicit client_t() = default; + + bool secure = false; + std::string ip; + std::uint16_t port = 0; + std::string user; + std::string password; + std::string admin_user; + std::string admin_password; + }; + + // Configuration when acting in client role + client_t client; + + // Configuration for the Overlay + struct overlay_t + { + explicit overlay_t() = default; + + boost::asio::ip::address ip; + std::uint16_t port = 0; + }; + + overlay_t overlay; + + void + makeContexts(); + }; + +private: + using socket_type = boost::beast::tcp_stream; + using stream_type = boost::beast::ssl_stream; + + Application& app_; + Resource::Manager& m_resourceManager; + beast::Journal m_journal; + NetworkOPs& m_networkOPs; + std::unique_ptr m_server; + Setup setup_; + JobQueue& m_jobQueue; + beast::insight::Counter rpc_requests_; + beast::insight::Event rpc_size_; + beast::insight::Event rpc_time_; + std::mutex mutex_; + std::condition_variable condition_; + bool stopped_{false}; + std::map, int> count_; + + // A private type used to restrict access to the ServerHandler constructor. + struct ServerHandlerCreator + { + explicit ServerHandlerCreator() = default; + }; + + // Friend declaration that allows make_ServerHandler to access the + // private type that restricts access to the ServerHandler ctor. + friend std::unique_ptr + make_ServerHandler( + Application& app, + boost::asio::io_service&, + JobQueue&, + NetworkOPs&, + Resource::Manager&, + CollectorManager& cm); + +public: + // Must be public so make_unique can call it. + ServerHandler( + ServerHandlerCreator const&, + Application& app, + boost::asio::io_service& io_service, + JobQueue& jobQueue, + NetworkOPs& networkOPs, + Resource::Manager& resourceManager, + CollectorManager& cm); + + ~ServerHandler(); + + using Output = Json::Output; + + void + setup(Setup const& setup, beast::Journal journal); + + Setup const& + setup() const + { + return setup_; + } + + void + stop(); + + // + // Handler + // + + bool + onAccept(Session& session, boost::asio::ip::tcp::endpoint endpoint); + + Handoff + onHandoff( + Session& session, + std::unique_ptr&& bundle, + http_request_type&& request, + boost::asio::ip::tcp::endpoint const& remote_address); + + Handoff + onHandoff( + Session& session, + http_request_type&& request, + boost::asio::ip::tcp::endpoint const& remote_address) + { + return onHandoff( + session, + {}, + std::forward(request), + remote_address); + } + + void + onRequest(Session& session); + + void + onWSMessage( + std::shared_ptr session, + std::vector const& buffers); + + void + onClose(Session& session, boost::system::error_code const&); + + void + onStopped(Server&); + +private: + Json::Value + processSession( + std::shared_ptr const& session, + std::shared_ptr const& coro, + Json::Value const& jv); + + void + processSession( + std::shared_ptr const&, + std::shared_ptr coro); + + void + processRequest( + Port const& port, + std::string const& request, + beast::IP::Endpoint const& remoteIPAddress, + Output&&, + std::shared_ptr coro, + boost::string_view forwardedFor, + boost::string_view user); + + Handoff + statusResponse(http_request_type const& request) const; +}; ServerHandler::Setup setup_ServerHandler(Config const& c, std::ostream&& log); diff --git a/src/ripple/rpc/impl/ServerHandlerImp.cpp b/src/ripple/rpc/impl/ServerHandler.cpp similarity index 98% rename from src/ripple/rpc/impl/ServerHandlerImp.cpp rename to src/ripple/rpc/impl/ServerHandler.cpp index f269283b83a..85bff54232b 100644 --- a/src/ripple/rpc/impl/ServerHandlerImp.cpp +++ b/src/ripple/rpc/impl/ServerHandler.cpp @@ -17,6 +17,8 @@ */ //============================================================================== +#include + #include #include #include @@ -35,9 +37,7 @@ #include #include #include -#include #include -#include #include #include #include @@ -101,7 +101,8 @@ authorized(Port const& port, std::map const& h) return strUser == port.user && strPassword == port.password; } -ServerHandlerImp::ServerHandlerImp( +ServerHandler::ServerHandler( + ServerHandlerCreator const&, Application& app, boost::asio::io_service& io_service, JobQueue& jobQueue, @@ -121,13 +122,13 @@ ServerHandlerImp::ServerHandlerImp( rpc_time_ = group->make_event("time"); } -ServerHandlerImp::~ServerHandlerImp() +ServerHandler::~ServerHandler() { m_server = nullptr; } void -ServerHandlerImp::setup(Setup const& setup, beast::Journal journal) +ServerHandler::setup(Setup const& setup, beast::Journal journal) { setup_ = setup; m_server->ports(setup.ports); @@ -136,7 +137,7 @@ ServerHandlerImp::setup(Setup const& setup, beast::Journal journal) //------------------------------------------------------------------------------ void -ServerHandlerImp::stop() +ServerHandler::stop() { m_server->close(); { @@ -148,7 +149,7 @@ ServerHandlerImp::stop() //------------------------------------------------------------------------------ bool -ServerHandlerImp::onAccept( +ServerHandler::onAccept( Session& session, boost::asio::ip::tcp::endpoint endpoint) { @@ -170,7 +171,7 @@ ServerHandlerImp::onAccept( } Handoff -ServerHandlerImp::onHandoff( +ServerHandler::onHandoff( Session& session, std::unique_ptr&& bundle, http_request_type&& request, @@ -272,7 +273,7 @@ buffers_to_string(ConstBufferSequence const& bs) } void -ServerHandlerImp::onRequest(Session& session) +ServerHandler::onRequest(Session& session) { // Make sure RPC is enabled on the port if (session.port().protocol.count("http") == 0 && @@ -312,7 +313,7 @@ ServerHandlerImp::onRequest(Session& session) } void -ServerHandlerImp::onWSMessage( +ServerHandler::onWSMessage( std::shared_ptr session, std::vector const& buffers) { @@ -362,14 +363,14 @@ ServerHandlerImp::onWSMessage( } void -ServerHandlerImp::onClose(Session& session, boost::system::error_code const&) +ServerHandler::onClose(Session& session, boost::system::error_code const&) { std::lock_guard lock(mutex_); --count_[session.port()]; } void -ServerHandlerImp::onStopped(Server&) +ServerHandler::onStopped(Server&) { std::lock_guard lock(mutex_); stopped_ = true; @@ -398,7 +399,7 @@ logDuration( } Json::Value -ServerHandlerImp::processSession( +ServerHandler::processSession( std::shared_ptr const& session, std::shared_ptr const& coro, Json::Value const& jv) @@ -545,7 +546,7 @@ ServerHandlerImp::processSession( // Run as a coroutine. void -ServerHandlerImp::processSession( +ServerHandler::processSession( std::shared_ptr const& session, std::shared_ptr coro) { @@ -586,7 +587,7 @@ Json::Int constexpr forbidden = -32605; Json::Int constexpr wrong_version = -32606; void -ServerHandlerImp::processRequest( +ServerHandler::processRequest( Port const& port, std::string const& request, beast::IP::Endpoint const& remoteIPAddress, @@ -1022,7 +1023,7 @@ ServerHandlerImp::processRequest( is reported, meaning the server can accept more connections. */ Handoff -ServerHandlerImp::statusResponse(http_request_type const& request) const +ServerHandler::statusResponse(http_request_type const& request) const { using namespace boost::beast::http; Handoff handoff; @@ -1252,8 +1253,14 @@ make_ServerHandler( Resource::Manager& resourceManager, CollectorManager& cm) { - return std::make_unique( - app, io_service, jobQueue, networkOPs, resourceManager, cm); + return std::make_unique( + ServerHandler::ServerHandlerCreator(), + app, + io_service, + jobQueue, + networkOPs, + resourceManager, + cm); } } // namespace ripple diff --git a/src/ripple/rpc/impl/ServerHandlerImp.h b/src/ripple/rpc/impl/ServerHandlerImp.h deleted file mode 100644 index 7c0bf9c9ae5..00000000000 --- a/src/ripple/rpc/impl/ServerHandlerImp.h +++ /dev/null @@ -1,201 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2012, 2013 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_RPC_SERVERHANDLERIMP_H_INCLUDED -#define RIPPLE_RPC_SERVERHANDLERIMP_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -inline bool -operator<(Port const& lhs, Port const& rhs) -{ - return lhs.name < rhs.name; -} - -class ServerHandlerImp -{ -public: - struct Setup - { - explicit Setup() = default; - - std::vector ports; - - // Memberspace - struct client_t - { - explicit client_t() = default; - - bool secure = false; - std::string ip; - std::uint16_t port = 0; - std::string user; - std::string password; - std::string admin_user; - std::string admin_password; - }; - - // Configuration when acting in client role - client_t client; - - // Configuration for the Overlay - struct overlay_t - { - explicit overlay_t() = default; - - boost::asio::ip::address ip; - std::uint16_t port = 0; - }; - - overlay_t overlay; - - void - makeContexts(); - }; - -private: - using socket_type = boost::beast::tcp_stream; - using stream_type = boost::beast::ssl_stream; - - Application& app_; - Resource::Manager& m_resourceManager; - beast::Journal m_journal; - NetworkOPs& m_networkOPs; - std::unique_ptr m_server; - Setup setup_; - JobQueue& m_jobQueue; - beast::insight::Counter rpc_requests_; - beast::insight::Event rpc_size_; - beast::insight::Event rpc_time_; - std::mutex mutex_; - std::condition_variable condition_; - bool stopped_{false}; - std::map, int> count_; - -public: - ServerHandlerImp( - Application& app, - boost::asio::io_service& io_service, - JobQueue& jobQueue, - NetworkOPs& networkOPs, - Resource::Manager& resourceManager, - CollectorManager& cm); - - ~ServerHandlerImp(); - - using Output = Json::Output; - - void - setup(Setup const& setup, beast::Journal journal); - - Setup const& - setup() const - { - return setup_; - } - - void - stop(); - - // - // Handler - // - - bool - onAccept(Session& session, boost::asio::ip::tcp::endpoint endpoint); - - Handoff - onHandoff( - Session& session, - std::unique_ptr&& bundle, - http_request_type&& request, - boost::asio::ip::tcp::endpoint const& remote_address); - - Handoff - onHandoff( - Session& session, - http_request_type&& request, - boost::asio::ip::tcp::endpoint const& remote_address) - { - return onHandoff( - session, - {}, - std::forward(request), - remote_address); - } - - void - onRequest(Session& session); - - void - onWSMessage( - std::shared_ptr session, - std::vector const& buffers); - - void - onClose(Session& session, boost::system::error_code const&); - - void - onStopped(Server&); - -private: - Json::Value - processSession( - std::shared_ptr const& session, - std::shared_ptr const& coro, - Json::Value const& jv); - - void - processSession( - std::shared_ptr const&, - std::shared_ptr coro); - - void - processRequest( - Port const& port, - std::string const& request, - beast::IP::Endpoint const& remoteIPAddress, - Output&&, - std::shared_ptr coro, - boost::string_view forwardedFor, - boost::string_view user); - - Handoff - statusResponse(http_request_type const& request) const; -}; - -} // namespace ripple - -#endif From 4111382a31c64abd259975d1d4324c23ccd5c212 Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Wed, 28 Jun 2023 16:23:12 -0400 Subject: [PATCH 185/508] Fix build references to deleted ServerHandlerImp: (#4592) * Commits 0b812cd (#4427) and 11e914f (#4516) conflict. The first added references to `ServerHandlerImp` in files outside of that class's organizational unit (which is technically incorrect). The second removed `ServerHandlerImp`, but was not up to date with develop. This results in the build failing. * Fixes the build by changing references to `ServerHandlerImp` to the more correct `ServerHandler`. --- src/ripple/app/main/Application.cpp | 2 +- src/ripple/app/main/Application.h | 4 ++-- src/ripple/app/misc/NetworkOPs.cpp | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 8ed328df440..42bf6d66c9d 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -602,7 +602,7 @@ class ApplicationImp : public Application, public BasicApp return *m_networkOPs; } - virtual ServerHandlerImp& + virtual ServerHandler& getServerHandler() override { assert(serverHandler_); diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index d2ba8f7cc75..67343852b66 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -89,7 +89,7 @@ class Overlay; class PathRequests; class PendingSaves; class PublicKey; -class ServerHandlerImp; +class ServerHandler; class SecretKey; class STLedgerEntry; class TimeKeeper; @@ -232,7 +232,7 @@ class Application : public beast::PropertyStream::Source getOPs() = 0; virtual OrderBookDB& getOrderBookDB() = 0; - virtual ServerHandlerImp& + virtual ServerHandler& getServerHandler() = 0; virtual TransactionMaster& getMasterTransaction() = 0; diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 6f51f811055..79cd857b785 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -64,8 +64,8 @@ #include #include #include +#include #include -#include #include #include From d34e8be3162a5e89e268a5f053de499e9560280f Mon Sep 17 00:00:00 2001 From: Peter Chen <34582813+PeterChen13579@users.noreply.github.com> Date: Thu, 29 Jun 2023 11:41:13 -0400 Subject: [PATCH 186/508] APIv2: add error messages for account_tx (#4571) Certain inputs for the AccountTx method should return an error. In other words, an invalid request from a user or client now results in an error message. Since this can change the response from the API, it is an API breaking change. This commit maintains backward compatibility by keeping the existing behavior for existing requests. When clients specify "api_version": 2, they will be able to get the updated error messages. Update unit tests to check the error based on the API version. * Fix #4288 * Fix #4545 --- src/ripple/rpc/handlers/AccountTx.cpp | 29 +++- src/test/rpc/AccountTx_test.cpp | 237 +++++++++++++++++--------- 2 files changed, 185 insertions(+), 81 deletions(-) diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index f65657d92ea..bfbc76362a3 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -53,9 +53,23 @@ using LedgerSpecifier = RelationalDatabase::LedgerSpecifier; // parses args into a ledger specifier, or returns a Json object on error std::variant, Json::Value> -parseLedgerArgs(Json::Value const& params) +parseLedgerArgs(RPC::Context& context, Json::Value const& params) { Json::Value response; + // if ledger_index_min or max is specified, then ledger_hash or ledger_index + // should not be specified. Error out if it is + if (context.apiVersion > 1) + { + if ((params.isMember(jss::ledger_index_min) || + params.isMember(jss::ledger_index_max)) && + (params.isMember(jss::ledger_hash) || + params.isMember(jss::ledger_index))) + { + RPC::Status status{rpcINVALID_PARAMS, "invalidParams"}; + status.inject(response); + return response; + } + } if (params.isMember(jss::ledger_index_min) || params.isMember(jss::ledger_index_max)) { @@ -145,6 +159,17 @@ getLedgerRange( using T = std::decay_t; if constexpr (std::is_same_v) { + // if ledger_index_min or ledger_index_max is out of + // valid ledger range, error out. exclude -1 as + // it is a valid input + if (context.apiVersion > 1) + { + if ((ls.max > uValidatedMax && ls.max != -1) || + (ls.min < uValidatedMin && ls.min != 0)) + { + return rpcLGR_IDX_MALFORMED; + } + } if (ls.min > uValidatedMin) { uLedgerMin = ls.min; @@ -379,7 +404,7 @@ doAccountTxJson(RPC::JsonContext& context) args.account = *account; - auto parseRes = parseLedgerArgs(params); + auto parseRes = parseLedgerArgs(context, params); if (auto jv = std::get_if(&parseRes)) { return *jv; diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 75147875d1a..2e09ad93b86 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -108,7 +108,7 @@ class AccountTx_test : public beast::unit_test::suite }; void - testParameters() + testParameters(unsigned int apiVersion) { using namespace test::jtx; @@ -143,104 +143,178 @@ class AccountTx_test : public beast::unit_test::suite }; Json::Value jParms; + jParms[jss::api_version] = apiVersion; - BEAST_EXPECT(isErr( - env.rpc("json", "account_tx", to_string(jParms)), - rpcINVALID_PARAMS)); - - jParms[jss::account] = "0xDEADBEEF"; - - BEAST_EXPECT(isErr( - env.rpc("json", "account_tx", to_string(jParms)), - rpcACT_MALFORMED)); - - jParms[jss::account] = A1.human(); - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(jParms)))); - - // Ledger min/max index + if (apiVersion < 2) { - Json::Value p{jParms}; - p[jss::ledger_index_min] = -1; - p[jss::ledger_index_max] = -1; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); - - p[jss::ledger_index_min] = 0; - p[jss::ledger_index_max] = 100; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(jParms)), + rpcINVALID_PARAMS)); - p[jss::ledger_index_min] = 1; - p[jss::ledger_index_max] = 2; - BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p)))); + jParms[jss::account] = "0xDEADBEEF"; - p[jss::ledger_index_min] = 2; - p[jss::ledger_index_max] = 1; BEAST_EXPECT(isErr( - env.rpc("json", "account_tx", to_string(p)), - (RPC::apiMaximumSupportedVersion == 1 ? rpcLGR_IDXS_INVALID - : rpcINVALID_LGR_RANGE))); - } + env.rpc("json", "account_tx", to_string(jParms)), + rpcACT_MALFORMED)); - // Ledger index min only - { - Json::Value p{jParms}; - p[jss::ledger_index_min] = -1; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + jParms[jss::account] = A1.human(); + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(jParms)))); - p[jss::ledger_index_min] = 1; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + // Ledger min/max index + { + Json::Value p{jParms}; + p[jss::ledger_index_min] = -1; + p[jss::ledger_index_max] = -1; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_min] = 0; + p[jss::ledger_index_max] = 100; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_min] = 1; + p[jss::ledger_index_max] = 2; + BEAST_EXPECT( + noTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_min] = 2; + p[jss::ledger_index_max] = 1; + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + (RPC::apiMaximumSupportedVersion == 1 + ? rpcLGR_IDXS_INVALID + : rpcINVALID_LGR_RANGE))); + } - p[jss::ledger_index_min] = env.current()->info().seq; - BEAST_EXPECT(isErr( - env.rpc("json", "account_tx", to_string(p)), - (RPC::apiMaximumSupportedVersion == 1 ? rpcLGR_IDXS_INVALID - : rpcINVALID_LGR_RANGE))); - } + // Ledger index min only + { + Json::Value p{jParms}; + p[jss::ledger_index_min] = -1; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_min] = 1; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_min] = env.current()->info().seq; + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + (RPC::apiMaximumSupportedVersion == 1 + ? rpcLGR_IDXS_INVALID + : rpcINVALID_LGR_RANGE))); + } - // Ledger index max only - { - Json::Value p{jParms}; - p[jss::ledger_index_max] = -1; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + // Ledger index max only + { + Json::Value p{jParms}; + p[jss::ledger_index_max] = -1; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_max] = env.current()->info().seq; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_max] = 3; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_max] = env.closed()->info().seq; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_max] = env.closed()->info().seq - 1; + BEAST_EXPECT( + noTxs(env.rpc("json", "account_tx", to_string(p)))); + } - p[jss::ledger_index_max] = env.current()->info().seq; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + // Ledger Sequence + { + Json::Value p{jParms}; - p[jss::ledger_index_max] = env.closed()->info().seq; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + p[jss::ledger_index] = env.closed()->info().seq; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); - p[jss::ledger_index_max] = env.closed()->info().seq - 1; - BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p)))); - } + p[jss::ledger_index] = env.closed()->info().seq - 1; + BEAST_EXPECT( + noTxs(env.rpc("json", "account_tx", to_string(p)))); - // Ledger Sequence - { - Json::Value p{jParms}; + p[jss::ledger_index] = env.current()->info().seq; + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + rpcLGR_NOT_VALIDATED)); - p[jss::ledger_index] = env.closed()->info().seq; - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + p[jss::ledger_index] = env.current()->info().seq + 1; + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + rpcLGR_NOT_FOUND)); + } - p[jss::ledger_index] = env.closed()->info().seq - 1; - BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p)))); + // Ledger Hash + { + Json::Value p{jParms}; - p[jss::ledger_index] = env.current()->info().seq; - BEAST_EXPECT(isErr( - env.rpc("json", "account_tx", to_string(p)), - rpcLGR_NOT_VALIDATED)); + p[jss::ledger_hash] = to_string(env.closed()->info().hash); + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); - p[jss::ledger_index] = env.current()->info().seq + 1; - BEAST_EXPECT(isErr( - env.rpc("json", "account_tx", to_string(p)), rpcLGR_NOT_FOUND)); + p[jss::ledger_hash] = + to_string(env.closed()->info().parentHash); + BEAST_EXPECT( + noTxs(env.rpc("json", "account_tx", to_string(p)))); + } } - - // Ledger Hash + else { - Json::Value p{jParms}; + // Ledger index max/min/index all specified + // ERRORS out with invalid Parenthesis + { + jParms[jss::account] = "0xDEADBEEF"; + jParms[jss::account] = A1.human(); + Json::Value p{jParms}; - p[jss::ledger_hash] = to_string(env.closed()->info().hash); - BEAST_EXPECT(hasTxs(env.rpc("json", "account_tx", to_string(p)))); + p[jss::ledger_index_max] = -1; + p[jss::ledger_index_min] = -1; + p[jss::ledger_index] = -1; - p[jss::ledger_hash] = to_string(env.closed()->info().parentHash); - BEAST_EXPECT(noTxs(env.rpc("json", "account_tx", to_string(p)))); + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + rpcINVALID_PARAMS)); + } + + // Ledger index min/max only + { + Json::Value p{jParms}; + p[jss::ledger_index_max] = 100; + p[jss::ledger_index_min] = 0; + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + rpcLGR_IDX_MALFORMED)); + + p[jss::ledger_index_max] = -1; + p[jss::ledger_index_min] = -1; + BEAST_EXPECT( + hasTxs(env.rpc("json", "account_tx", to_string(p)))); + + p[jss::ledger_index_min] = 2; + p[jss::ledger_index_max] = 1; + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + rpcINVALID_LGR_RANGE)); + } + + // Ledger index max only + { + Json::Value p{jParms}; + p[jss::ledger_index_max] = env.current()->info().seq; + BEAST_EXPECT(isErr( + env.rpc("json", "account_tx", to_string(p)), + rpcLGR_IDX_MALFORMED)); + } } } @@ -593,7 +667,12 @@ class AccountTx_test : public beast::unit_test::suite void run() override { - testParameters(); + for (auto testVersion = RPC::apiMinimumSupportedVersion; + testVersion <= RPC::apiBetaVersion; + ++testVersion) + { + testParameters(testVersion); + } testContents(); testAccountDelete(); } From 1c2ae10dc057d5c7c76d0df2c5a92fc753e4769a Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Thu, 29 Jun 2023 09:12:15 -0700 Subject: [PATCH 187/508] fix: Update Handler::Condition enum values #3417 (#4239) - Use powers of two to clearly indicate the bitmask - Replace bitmask with explicit if-conditions to better indicate predicates Change enum values to be powers of two (fix #3417) #4239 Implement the simplified condition evaluation removes the complex bitwise and(&) operator Implement the second proposed solution in Nik Bougalis's comment - Software does not distinguish between different Conditions (Version: 1.5) #3417 (comment) I have tested this code change by performing RPC calls with the commands server_info, server_state, peers and validation_info. These commands worked as expected. --- src/ripple/rpc/impl/Handler.h | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/ripple/rpc/impl/Handler.h b/src/ripple/rpc/impl/Handler.h index 73f2232d5cd..e2188ef51e7 100644 --- a/src/ripple/rpc/impl/Handler.h +++ b/src/ripple/rpc/impl/Handler.h @@ -39,8 +39,8 @@ namespace RPC { enum Condition { NO_CONDITION = 0, NEEDS_NETWORK_CONNECTION = 1, - NEEDS_CURRENT_LEDGER = 2 + NEEDS_NETWORK_CONNECTION, - NEEDS_CLOSED_LEDGER = 4 + NEEDS_NETWORK_CONNECTION, + NEEDS_CURRENT_LEDGER = 1 << 1, + NEEDS_CLOSED_LEDGER = 1 << 2, }; struct Handler @@ -94,20 +94,18 @@ conditionMet(Condition condition_required, T& context) } if (context.app.getOPs().isAmendmentBlocked() && - (condition_required & NEEDS_CURRENT_LEDGER || - condition_required & NEEDS_CLOSED_LEDGER)) + (condition_required != NO_CONDITION)) { return rpcAMENDMENT_BLOCKED; } if (context.app.getOPs().isUNLBlocked() && - (condition_required & NEEDS_CURRENT_LEDGER || - condition_required & NEEDS_CLOSED_LEDGER)) + (condition_required != NO_CONDITION)) { return rpcEXPIRED_VALIDATOR_LIST; } - if ((condition_required & NEEDS_NETWORK_CONNECTION) && + if ((condition_required != NO_CONDITION) && (context.netOps.getOperatingMode() < OperatingMode::SYNCING)) { JLOG(context.j.info()) << "Insufficient network mode for RPC: " @@ -119,7 +117,7 @@ conditionMet(Condition condition_required, T& context) } if (!context.app.config().standalone() && - condition_required & NEEDS_CURRENT_LEDGER) + condition_required != NO_CONDITION) { if (context.ledgerMaster.getValidatedLedgerAge() > Tuning::maxValidatedLedgerAge) @@ -143,7 +141,7 @@ conditionMet(Condition condition_required, T& context) } } - if ((condition_required & NEEDS_CLOSED_LEDGER) && + if ((condition_required != NO_CONDITION) && !context.ledgerMaster.getClosedLedger()) { if (context.apiVersion == 1) From 54afdaa101022445856c0925434e0cfb12dda2bc Mon Sep 17 00:00:00 2001 From: John Freeman Date: Thu, 29 Jun 2023 17:31:36 -0500 Subject: [PATCH 188/508] ci: cancel overridden workflows (#4597) Small quality-of-life improvements to workflows using new concurrency control features: https://docs.github.com/en/actions/using-jobs/using-concurrency At time of this commit, macOS runners are oversubscribed. This may help. --- .github/workflows/doxygen.yml | 5 ++++- .github/workflows/macos.yml | 5 ++++- .github/workflows/nix.yml | 7 +++++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/doxygen.yml b/.github/workflows/doxygen.yml index db98018753d..65515ea737f 100644 --- a/.github/workflows/doxygen.yml +++ b/.github/workflows/doxygen.yml @@ -4,6 +4,9 @@ on: push: branches: - develop +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: job: @@ -18,7 +21,7 @@ jobs: echo ${PATH} | tr ':' '\n' cmake --version doxygen --version - env + env | sort - name: build run: | mkdir build diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 94151bcf7eb..888ad6f7692 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,5 +1,8 @@ name: macos on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: @@ -29,7 +32,7 @@ jobs: python --version conan --version cmake --version - env + env | sort - name: build uses: ./.github/actions/build with: diff --git a/.github/workflows/nix.yml b/.github/workflows/nix.yml index 706bdbe103b..4198369e78a 100644 --- a/.github/workflows/nix.yml +++ b/.github/workflows/nix.yml @@ -1,5 +1,8 @@ name: nix on: [push, pull_request] +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true # This workflow has two job matrixes. # They can be considered phases because the second matrix ("test") @@ -57,7 +60,7 @@ jobs: echo ${PATH} | tr ':' '\n' conan --version cmake --version - env + env | sort - name: configure Conan env: CONAN_URL: http://18.143.149.228:8081/artifactory/api/conan/conan-non-prod @@ -139,7 +142,7 @@ jobs: echo ${PATH} | tr ':' '\n' conan --version cmake --version - env + env | sort ls ~/.conan - name: checkout uses: actions/checkout@v3 From 1cb67fbd20ac22cbfb3d08747ed09506b39e6a2c Mon Sep 17 00:00:00 2001 From: Michael Legleux Date: Thu, 29 Jun 2023 20:15:11 -0700 Subject: [PATCH 189/508] fix: deb package build (#4591) The debug packages were named with the extension ".ddeb", but due to a bug in Artifactory, they need to have the ".deb" extension. Debug symbol packages with ".ddeb" extensions are not indexed, and thus are not visible in apt clients. * Fix the issue by renaming the debug packages in the build script. * Use GCC-11 and update GCC Conan profile. * This software requires GCC 11 and C++20. However, reporting mode is built with C++17. This is a quick band-aid to fix the build. Later, it will be better to remove this package-building code. For context, a Debian (deb) package contains bundled software and resources necessary for installing and managing software on a Debian-based system, including Ubuntu and derivatives. --- .gitignore | 1 + Builds/containers/packaging/dpkg/build_dpkg.sh | 4 ++++ Builds/containers/packaging/dpkg/debian/rules | 18 ++++++++++++++++-- 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 5e4414e8e62..87d9b3b8e00 100644 --- a/.gitignore +++ b/.gitignore @@ -108,3 +108,4 @@ pkg_out pkg CMakeUserPresets.json bld.rippled/ +.vscode diff --git a/Builds/containers/packaging/dpkg/build_dpkg.sh b/Builds/containers/packaging/dpkg/build_dpkg.sh index 3cfafe9e342..65bbb40f343 100755 --- a/Builds/containers/packaging/dpkg/build_dpkg.sh +++ b/Builds/containers/packaging/dpkg/build_dpkg.sh @@ -67,6 +67,10 @@ if [ -e rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.buildinfo ] ; then cp rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.buildinfo ${PKG_OUTDIR} fi +pushd ${PKG_OUTDIR} +for f in *.ddeb; do mv -- "$f" "${f%.ddeb}.deb"; done +popd + cat rippled_${RIPPLED_DPKG_FULL_VERSION}_amd64.changes # extract the text in the .changes file that appears between # Checksums-Sha256: ... diff --git a/Builds/containers/packaging/dpkg/debian/rules b/Builds/containers/packaging/dpkg/debian/rules index d62517d34b6..c7234c847dd 100755 --- a/Builds/containers/packaging/dpkg/debian/rules +++ b/Builds/containers/packaging/dpkg/debian/rules @@ -15,12 +15,25 @@ override_dh_systemd_start: dh_systemd_start --no-restart-on-upgrade override_dh_auto_configure: + apt install --yes gcc-11 g++-11 + update-alternatives --install \ + /usr/bin/gcc gcc /usr/bin/gcc-11 100 \ + --slave /usr/bin/g++ g++ /usr/bin/g++-11 \ + --slave /usr/bin/gcc-ar gcc-ar /usr/bin/gcc-ar-11 \ + --slave /usr/bin/gcc-nm gcc-nm /usr/bin/gcc-nm-11 \ + --slave /usr/bin/gcc-ranlib gcc-ranlib /usr/bin/gcc-ranlib-11 \ + --slave /usr/bin/gcov gcov /usr/bin/gcov-11 \ + --slave /usr/bin/gcov-tool gcov-tool /usr/bin/gcov-dump-11 \ + --slave /usr/bin/gcov-dump gcov-dump /usr/bin/gcov-tool-11 + update-alternatives --set gcc /usr/bin/gcc-11 env rm -rf bld - + conan profile update settings.compiler.cppstd=20 gcc + conan profile update settings.compiler.version=11 gcc conan export external/snappy snappy/1.1.9@ conan install . \ + --profile gcc \ --install-folder bld/rippled \ --build missing \ --build boost \ @@ -38,12 +51,14 @@ override_dh_auto_configure: -B bld/rippled conan install . \ + --profile gcc \ --install-folder bld/rippled-reporting \ --build missing \ --build boost \ --build sqlite3 \ --build libuv \ --settings build_type=Release \ + --settings compiler.cppstd=17 \ --options reporting=True cmake -DCMAKE_TOOLCHAIN_FILE:FILEPATH=build/generators/conan_toolchain.cmake \ @@ -58,7 +73,6 @@ override_dh_auto_configure: override_dh_auto_build: cmake --build bld/rippled --target rippled --target validator-keys -j${nproc} - cmake --build bld/rippled-reporting --target rippled -j${nproc} override_dh_auto_install: From f18c6dfea7870132490124e1942901a6a0cddc7e Mon Sep 17 00:00:00 2001 From: Peter Chen <34582813+PeterChen13579@users.noreply.github.com> Date: Fri, 30 Jun 2023 02:05:21 -0400 Subject: [PATCH 190/508] APIv2(account_info): handle invalid "signer_lists" value (#4585) When requesting `account_info` with an invalid `signer_lists` value, the API should return an "invalidParams" error. `signer_lists` should have a value of type boolean. If it is not a boolean, then it is invalid input. The response now indicates that. * This is an API breaking change, so the change is only reflected for requests containing `"api_version": 2` * Fix #4539 --- src/ripple/rpc/handlers/AccountInfo.cpp | 9 +++++++++ src/test/rpc/AccountInfo_test.cpp | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index 3af70324bcd..13487dd2dae 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -125,6 +125,15 @@ doAccountInfo(RPC::JsonContext& context) } result[jss::account_flags] = std::move(acctFlags); + // The document states that signer_lists is a bool, however + // assigning any string value works. Do not allow this. + // This check is for api Version 2 onwards only + if (!params[jss::signer_lists].isBool() && context.apiVersion > 1) + { + RPC::inject_error(rpcINVALID_PARAMS, result); + return result; + } + // Return SignerList(s) if that is requested. if (params.isMember(jss::signer_lists) && params[jss::signer_lists].asBool()) diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index b8e479225d1..9108ac6360a 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -217,6 +217,10 @@ class AccountInfo_test : public beast::unit_test::suite "\"api_version\": 2, \"account\": \"" + alice.human() + "\", " + "\"signer_lists\": true }"; + auto const withSignersAsString = std::string("{ ") + + "\"api_version\": 2, \"account\": \"" + alice.human() + "\", " + + "\"signer_lists\": asdfggh }"; + // Alice has no SignerList yet. { // account_info without the "signer_lists" argument. @@ -263,6 +267,13 @@ class AccountInfo_test : public beast::unit_test::suite auto const& entry0 = signerEntries[0u][sfSignerEntry.jsonName]; BEAST_EXPECT(entry0[sfSignerWeight.jsonName] == 3); } + { + // account_info with "signer_lists" as not bool should error out + auto const info = + env.rpc("json", "account_info", withSignersAsString); + BEAST_EXPECT(info[jss::status] == "error"); + BEAST_EXPECT(info[jss::error] == "invalidParams"); + } // Give alice a big signer list Account const demon{"demon"}; From 516ffb2147e7df3914dc9b790ea16a8a2190b52b Mon Sep 17 00:00:00 2001 From: Ed Hennis Date: Sat, 1 Jul 2023 03:13:37 -0400 Subject: [PATCH 191/508] chore: update checkout action version to v3: (#4598) * Update the version of the checkout action (for GitHub Actions) in `clang-format.yml` and `levelization.yml`. * The previous version, v2, was raising deprecation warnings due to its reliance on Node.js 12. * The latest checkout action version, v3, uses Node.js 16. --- .github/workflows/clang-format.yml | 2 +- .github/workflows/levelization.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml index 941789844b0..802748e84f0 100644 --- a/.github/workflows/clang-format.yml +++ b/.github/workflows/clang-format.yml @@ -8,7 +8,7 @@ jobs: env: CLANG_VERSION: 10 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install clang-format run: | codename=$( lsb_release --codename --short ) diff --git a/.github/workflows/levelization.yml b/.github/workflows/levelization.yml index 3ed07488257..3128513f667 100644 --- a/.github/workflows/levelization.yml +++ b/.github/workflows/levelization.yml @@ -8,7 +8,7 @@ jobs: env: CLANG_VERSION: 10 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Check levelization run: Builds/levelization/levelization.sh - name: Check for differences From 9c50415ebea2b729f90b23e19bae53fe517553ec Mon Sep 17 00:00:00 2001 From: Mayukha Vadari Date: Sat, 1 Jul 2023 18:23:57 -0400 Subject: [PATCH 192/508] add clang-format pre-commit hook (#4599) * Add a new YAML file (.pre-commit-config.yaml) to set up pre-commit hook for clang-format * The pre-commit hook is opt-in and needs to be installed in order to run automatically * Update CONTRIBUTING.md with instructions on how to set up and use the clang-format linter Automating the process of running clang-format before committing code helps to save time by removing the need to fix formatting issues later. This is a tooling improvement and doesn't change C++ code. --- .pre-commit-config.yaml | 6 ++++++ CONTRIBUTING.md | 5 +++++ 2 files changed, 11 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..04893e956f0 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,6 @@ +# .pre-commit-config.yaml +repos: +- repo: https://github.com/pre-commit/mirrors-clang-format + rev: v10.0.1 + hooks: + - id: clang-format diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4a09ad4b991..8c9a81bb7b4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -121,6 +121,11 @@ this: You can format individual files in place by running `clang-format -i ...` from any directory within this project. +You can install a pre-commit hook to automatically run `clang-format` before every commit: +``` +pip3 install pre-commit +pre-commit install +``` ## Avoid From 66bfe909e6de0dd1c7be0d9c01e088d3fed7f0f3 Mon Sep 17 00:00:00 2001 From: John Freeman Date: Mon, 3 Jul 2023 09:41:12 -0500 Subject: [PATCH 193/508] build: add binary hardening compile and link flags (#4603) Enhance security during the build process: * The '-fstack-protector' flag enables stack protection for preventing buffer overflow vulnerabilities. If an attempt is made to overflow the buffer, the program will terminate, thus protecting the integrity of the stack. * The '-Wl,-z,relro,-z,now' linker flag enables Read-only Relocations (RELRO), a feature that helps harden the binary against certain types of exploits, particularly those that involve overwriting the Global Offset Table (GOT). * This flag is only set for Linux builds, due to compatibility issues with apple-clang. * The `relro` option makes certain sections of memory read-only after initialization to prevent them from being overwritten, while `now` ensures that all dynamic symbols are resolved immediately on program start, reducing the window of opportunity for attacks. --- Builds/CMake/RippledCompiler.cmake | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Builds/CMake/RippledCompiler.cmake b/Builds/CMake/RippledCompiler.cmake index 2fbb6004333..aa8f1d1eb76 100644 --- a/Builds/CMake/RippledCompiler.cmake +++ b/Builds/CMake/RippledCompiler.cmake @@ -13,7 +13,6 @@ link_libraries (Ripple::common) set_target_properties (common PROPERTIES INTERFACE_POSITION_INDEPENDENT_CODE ON) set(CMAKE_CXX_EXTENSIONS OFF) -target_compile_features (common INTERFACE cxx_std_17) target_compile_definitions (common INTERFACE $<$:DEBUG _DEBUG> @@ -108,6 +107,7 @@ else () -Wno-char-subscripts -Wno-format -Wno-unused-local-typedefs + -fstack-protector $<$: -Wno-unused-but-set-variable -Wno-deprecated @@ -120,6 +120,7 @@ else () target_link_libraries (common INTERFACE -rdynamic + $<$:-Wl,-z,relro,-z,now> # link to static libc/c++ iff: # * static option set and # * NOT APPLE (AppleClang does not support static libc/c++) and From 77dc63b54909df451ae6bca5704bd6a4fb120bbb Mon Sep 17 00:00:00 2001 From: Shawn Xie <35279399+shawnxie999@users.noreply.github.com> Date: Wed, 5 Jul 2023 11:46:23 -0400 Subject: [PATCH 194/508] fix: add allowClawback flag for `account_info` (#4590) * Update the `account_info` API so that the `allowClawback` flag is included in the response. * The proposed `Clawback` amendement added an `allowClawback` flag in the `AccountRoot` object. * In the API response, under `account_flags`, there is now an `allowClawback` field with a boolean (`true` or `false`) value. * For reference, the XLS-39 Clawback implementation can be found in #4553 Fix #4588 --- src/ripple/rpc/handlers/AccountInfo.cpp | 8 +++++ src/test/rpc/AccountInfo_test.cpp | 43 ++++++++++++++++++++----- 2 files changed, 43 insertions(+), 8 deletions(-) diff --git a/src/ripple/rpc/handlers/AccountInfo.cpp b/src/ripple/rpc/handlers/AccountInfo.cpp index 13487dd2dae..c8d25a3d8b7 100644 --- a/src/ripple/rpc/handlers/AccountInfo.cpp +++ b/src/ripple/rpc/handlers/AccountInfo.cpp @@ -96,6 +96,9 @@ doAccountInfo(RPC::JsonContext& context) {"disallowIncomingPayChan", lsfDisallowIncomingPayChan}, {"disallowIncomingTrustline", lsfDisallowIncomingTrustline}}}; + static constexpr std::pair + allowClawbackFlag{"allowClawback", lsfAllowClawback}; + auto const sleAccepted = ledger->read(keylet::account(accountID)); if (sleAccepted) { @@ -123,6 +126,11 @@ doAccountInfo(RPC::JsonContext& context) for (auto const& lsf : disallowIncomingFlags) acctFlags[lsf.first.data()] = sleAccepted->isFlag(lsf.second); } + + if (ledger->rules().enabled(featureClawback)) + acctFlags[allowClawbackFlag.first.data()] = + sleAccepted->isFlag(allowClawbackFlag.second); + result[jss::account_flags] = std::move(acctFlags); // The document states that signer_lists is a bool, however diff --git a/src/test/rpc/AccountInfo_test.cpp b/src/test/rpc/AccountInfo_test.cpp index 9108ac6360a..a0a819502b1 100644 --- a/src/test/rpc/AccountInfo_test.cpp +++ b/src/test/rpc/AccountInfo_test.cpp @@ -519,13 +519,16 @@ class AccountInfo_test : public beast::unit_test::suite Env env(*this, features); Account const alice{"alice"}; - env.fund(XRP(1000), alice); + Account const bob{"bob"}; + env.fund(XRP(1000), alice, bob); - auto getAccountFlag = [&env, &alice](std::string_view fName) { + auto getAccountFlag = [&env]( + std::string_view fName, + Account const& account) { auto const info = env.rpc( "json", "account_info", - R"({"account" : ")" + alice.human() + R"("})"); + R"({"account" : ")" + account.human() + R"("})"); std::optional res; if (info[jss::result][jss::status] == "success" && @@ -553,7 +556,7 @@ class AccountInfo_test : public beast::unit_test::suite // as expected env(fclear(alice, asf.second)); env.close(); - auto const f1 = getAccountFlag(asf.first); + auto const f1 = getAccountFlag(asf.first, alice); BEAST_EXPECT(f1.has_value()); BEAST_EXPECT(!f1.value()); @@ -561,7 +564,7 @@ class AccountInfo_test : public beast::unit_test::suite // as expected env(fset(alice, asf.second)); env.close(); - auto const f2 = getAccountFlag(asf.first); + auto const f2 = getAccountFlag(asf.first, alice); BEAST_EXPECT(f2.has_value()); BEAST_EXPECT(f2.value()); } @@ -584,7 +587,7 @@ class AccountInfo_test : public beast::unit_test::suite // as expected env(fclear(alice, asf.second)); env.close(); - auto const f1 = getAccountFlag(asf.first); + auto const f1 = getAccountFlag(asf.first, alice); BEAST_EXPECT(f1.has_value()); BEAST_EXPECT(!f1.value()); @@ -592,7 +595,7 @@ class AccountInfo_test : public beast::unit_test::suite // as expected env(fset(alice, asf.second)); env.close(); - auto const f2 = getAccountFlag(asf.first); + auto const f2 = getAccountFlag(asf.first, alice); BEAST_EXPECT(f2.has_value()); BEAST_EXPECT(f2.value()); } @@ -601,9 +604,31 @@ class AccountInfo_test : public beast::unit_test::suite { for (auto& asf : disallowIncomingFlags) { - BEAST_EXPECT(!getAccountFlag(asf.first)); + BEAST_EXPECT(!getAccountFlag(asf.first, alice)); } } + + static constexpr std::pair + allowClawbackFlag{"allowClawback", asfAllowClawback}; + + if (features[featureClawback]) + { + // must use bob's account because alice has noFreeze set + auto const f1 = getAccountFlag(allowClawbackFlag.first, bob); + BEAST_EXPECT(f1.has_value()); + BEAST_EXPECT(!f1.value()); + + // Set allowClawback + env(fset(bob, allowClawbackFlag.second)); + env.close(); + auto const f2 = getAccountFlag(allowClawbackFlag.first, bob); + BEAST_EXPECT(f2.has_value()); + BEAST_EXPECT(f2.value()); + } + else + { + BEAST_EXPECT(!getAccountFlag(allowClawbackFlag.first, bob)); + } } void @@ -618,6 +643,8 @@ class AccountInfo_test : public beast::unit_test::suite ripple::test::jtx::supported_amendments()}; testAccountFlags(allFeatures); testAccountFlags(allFeatures - featureDisallowIncoming); + testAccountFlags( + allFeatures - featureDisallowIncoming - featureClawback); } }; From c6fee28b92c968a89f8fcdbd044d64b447a35db7 Mon Sep 17 00:00:00 2001 From: Chenna Keshava B S <21219765+ckeshava@users.noreply.github.com> Date: Wed, 5 Jul 2023 20:11:19 -0700 Subject: [PATCH 195/508] refactor: change the return type of mulDiv to std::optional (#4243) - Previously, mulDiv had `std::pair` as the output type. - This is an error-prone interface as it is easy to ignore when overflow occurs. - Using a return type of `std::optional` should decrease the likelihood of ignoring overflow. - It also allows for the use of optional::value_or() as a way to explicitly recover from overflow. - Include limits.h header file preprocessing directive in order to satisfy gcc's numeric_limits incomplete_type requirement. Fix #3495 --------- Co-authored-by: John Freeman --- src/ripple/app/misc/NetworkOPs.cpp | 4 +-- src/ripple/app/misc/TxQ.h | 15 +++------ src/ripple/app/misc/impl/LoadFeeTrack.cpp | 4 +-- src/ripple/app/misc/impl/TxQ.cpp | 32 +++++++++++--------- src/ripple/basics/FeeUnits.h | 32 +++++++++++--------- src/ripple/basics/impl/mulDiv.cpp | 14 +++------ src/ripple/basics/mulDiv.h | 14 ++++----- src/ripple/rpc/handlers/Fee1.cpp | 1 - src/ripple/rpc/impl/TransactionSign.cpp | 4 +-- src/test/app/TxQ_test.cpp | 1 - src/test/basics/FeeUnits_test.cpp | 37 +++++++++++++++-------- src/test/basics/mulDiv_test.cpp | 18 +++++------ 12 files changed, 93 insertions(+), 83 deletions(-) diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 79cd857b785..4d90e0622f8 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -2058,7 +2058,7 @@ NetworkOPsImp::pubServer() f.em->openLedgerFeeLevel, f.loadBaseServer, f.em->referenceFeeLevel) - .second); + .value_or(ripple::muldiv_max)); jvObj[jss::load_factor] = trunc32(loadFactor); jvObj[jss::load_factor_fee_escalation] = @@ -2506,7 +2506,7 @@ NetworkOPsImp::getServerInfo(bool human, bool admin, bool counters) escalationMetrics.openLedgerFeeLevel, loadBaseServer, escalationMetrics.referenceFeeLevel) - .second; + .value_or(ripple::muldiv_max); auto const loadFactor = std::max( safe_cast(loadFactorServer), diff --git a/src/ripple/app/misc/TxQ.h b/src/ripple/app/misc/TxQ.h index 69b6d264825..c7bd1f1d3d7 100644 --- a/src/ripple/app/misc/TxQ.h +++ b/src/ripple/app/misc/TxQ.h @@ -492,7 +492,7 @@ class TxQ @param seriesSize Total number of transactions in the series to be processed. - @return A `std::pair` as returned from @ref `mulDiv` indicating + @return A `std::pair` indicating whether the calculation result overflows. */ static std::pair @@ -862,20 +862,15 @@ template XRPAmount toDrops(FeeLevel const& level, XRPAmount baseFee) { - if (auto const drops = mulDiv(level, baseFee, TxQ::baseLevel); drops.first) - return drops.second; - - return XRPAmount(STAmount::cMaxNativeN); + return mulDiv(level, baseFee, TxQ::baseLevel) + .value_or(XRPAmount(STAmount::cMaxNativeN)); } inline FeeLevel64 toFeeLevel(XRPAmount const& drops, XRPAmount const& baseFee) { - if (auto const feeLevel = mulDiv(drops, TxQ::baseLevel, baseFee); - feeLevel.first) - return feeLevel.second; - - return FeeLevel64(std::numeric_limits::max()); + return mulDiv(drops, TxQ::baseLevel, baseFee) + .value_or(FeeLevel64(std::numeric_limits::max())); } } // namespace ripple diff --git a/src/ripple/app/misc/impl/LoadFeeTrack.cpp b/src/ripple/app/misc/impl/LoadFeeTrack.cpp index 11679c9a66e..86d145c856c 100644 --- a/src/ripple/app/misc/impl/LoadFeeTrack.cpp +++ b/src/ripple/app/misc/impl/LoadFeeTrack.cpp @@ -109,9 +109,9 @@ scaleFeeLoad( auto const result = mulDiv( fee, feeFactor, safe_cast(feeTrack.getLoadBase())); - if (!result.first) + if (!result) Throw("scaleFeeLoad"); - return result.second; + return *result; } } // namespace ripple diff --git a/src/ripple/app/misc/impl/TxQ.cpp b/src/ripple/app/misc/impl/TxQ.cpp index bf278970bb8..faaca0655cf 100644 --- a/src/ripple/app/misc/impl/TxQ.cpp +++ b/src/ripple/app/misc/impl/TxQ.cpp @@ -58,12 +58,8 @@ getFeeLevelPaid(ReadView const& view, STTx const& tx) return FeeLevel64(0); } - if (std::pair const feeLevelPaid = - mulDiv(effectiveFeePaid, TxQ::baseLevel, baseFee); - feeLevelPaid.first) - return feeLevelPaid.second; - - return FeeLevel64(std::numeric_limits::max()); + return mulDiv(effectiveFeePaid, TxQ::baseLevel, baseFee) + .value_or(FeeLevel64(std::numeric_limits::max())); } static std::optional @@ -77,7 +73,8 @@ getLastLedgerSequence(STTx const& tx) static FeeLevel64 increase(FeeLevel64 level, std::uint32_t increasePercent) { - return mulDiv(level, 100 + increasePercent, 100).second; + return mulDiv(level, 100 + increasePercent, 100) + .value_or(static_cast(ripple::muldiv_max)); } ////////////////////////////////////////////////////////////////////////// @@ -114,16 +111,19 @@ TxQ::FeeMetrics::update( // upperLimit must be >= minimumTxnCount_ or std::clamp can give // unexpected results auto const upperLimit = std::max( - mulDiv(txnsExpected_, cutPct, 100).second, minimumTxnCount_); + mulDiv(txnsExpected_, cutPct, 100).value_or(ripple::muldiv_max), + minimumTxnCount_); txnsExpected_ = std::clamp( - mulDiv(size, cutPct, 100).second, minimumTxnCount_, upperLimit); + mulDiv(size, cutPct, 100).value_or(ripple::muldiv_max), + minimumTxnCount_, + upperLimit); recentTxnCounts_.clear(); } else if (size > txnsExpected_ || size > targetTxnCount_) { recentTxnCounts_.push_back( mulDiv(size, 100 + setup.normalConsensusIncreasePercent, 100) - .second); + .value_or(ripple::muldiv_max)); auto const iter = std::max_element(recentTxnCounts_.begin(), recentTxnCounts_.end()); BOOST_ASSERT(iter != recentTxnCounts_.end()); @@ -181,7 +181,8 @@ TxQ::FeeMetrics::scaleFeeLevel(Snapshot const& snapshot, OpenView const& view) { // Compute escalated fee level // Don't care about the overflow flag - return mulDiv(multiplier, current * current, target * target).second; + return mulDiv(multiplier, current * current, target * target) + .value_or(static_cast(ripple::muldiv_max)); } return baseLevel; @@ -264,7 +265,7 @@ TxQ::FeeMetrics::escalatedSeriesFeeLevel( auto const totalFeeLevel = mulDiv( multiplier, sumNlast.second - sumNcurrent.second, target * target); - return totalFeeLevel; + return {totalFeeLevel.has_value(), *totalFeeLevel}; } LedgerHash TxQ::MaybeTx::parentHashComp{}; @@ -1782,8 +1783,11 @@ TxQ::getTxRequiredFeeAndSeq( std::uint32_t const accountSeq = sle ? (*sle)[sfSequence] : 0; std::uint32_t const availableSeq = nextQueuableSeqImpl(sle, lock).value(); - - return {mulDiv(fee, baseFee, baseLevel).second, accountSeq, availableSeq}; + return { + mulDiv(fee, baseFee, baseLevel) + .value_or(XRPAmount(std::numeric_limits::max())), + accountSeq, + availableSeq}; } std::vector diff --git a/src/ripple/basics/FeeUnits.h b/src/ripple/basics/FeeUnits.h index c74524c7c71..c0f1afbe6c6 100644 --- a/src/ripple/basics/FeeUnits.h +++ b/src/ripple/basics/FeeUnits.h @@ -409,7 +409,7 @@ template < class Source2, class Dest, class = enable_muldiv_t> -std::pair +std::optional mulDivU(Source1 value, Dest mul, Source2 div) { // Fees can never be negative in any context. @@ -420,7 +420,7 @@ mulDivU(Source1 value, Dest mul, Source2 div) assert(value.value() >= 0); assert(mul.value() >= 0); assert(div.value() >= 0); - return {false, Dest{0}}; + return std::nullopt; } using desttype = typename Dest::value_type; @@ -428,12 +428,12 @@ mulDivU(Source1 value, Dest mul, Source2 div) // Shortcuts, since these happen a lot in the real world if (value == div) - return {true, mul}; + return mul; if (mul.value() == div.value()) { if (value.value() > max) - return {false, Dest{max}}; - return {true, Dest{static_cast(value.value())}}; + return std::nullopt; + return Dest{static_cast(value.value())}; } using namespace boost::multiprecision; @@ -447,9 +447,9 @@ mulDivU(Source1 value, Dest mul, Source2 div) auto quotient = product / div.value(); if (quotient > max) - return {false, Dest{max}}; + return std::nullopt; - return {true, Dest{static_cast(quotient)}}; + return Dest{static_cast(quotient)}; } } // namespace feeunit @@ -464,7 +464,7 @@ template < class Source2, class Dest, class = feeunit::enable_muldiv_t> -std::pair +std::optional mulDiv(Source1 value, Dest mul, Source2 div) { return feeunit::mulDivU(value, mul, div); @@ -475,7 +475,7 @@ template < class Source2, class Dest, class = feeunit::enable_muldiv_commute_t> -std::pair +std::optional mulDiv(Dest value, Source1 mul, Source2 div) { // Multiplication is commutative @@ -483,7 +483,7 @@ mulDiv(Dest value, Source1 mul, Source2 div) } template > -std::pair +std::optional mulDiv(std::uint64_t value, Dest mul, std::uint64_t div) { // Give the scalars a non-tag so the @@ -492,7 +492,7 @@ mulDiv(std::uint64_t value, Dest mul, std::uint64_t div) } template > -std::pair +std::optional mulDiv(Dest value, std::uint64_t mul, std::uint64_t div) { // Multiplication is commutative @@ -503,20 +503,24 @@ template < class Source1, class Source2, class = feeunit::enable_muldiv_sources_t> -std::pair +std::optional mulDiv(Source1 value, std::uint64_t mul, Source2 div) { // Give the scalars a dimensionless unit so the // unit-handling version gets called. auto unitresult = feeunit::mulDivU(value, feeunit::scalar(mul), div); - return {unitresult.first, unitresult.second.value()}; + + if (!unitresult) + return std::nullopt; + + return unitresult->value(); } template < class Source1, class Source2, class = feeunit::enable_muldiv_sources_t> -std::pair +std::optional mulDiv(std::uint64_t value, Source1 mul, Source2 div) { // Multiplication is commutative diff --git a/src/ripple/basics/impl/mulDiv.cpp b/src/ripple/basics/impl/mulDiv.cpp index 20e72e0477a..6dd01c71fe7 100644 --- a/src/ripple/basics/impl/mulDiv.cpp +++ b/src/ripple/basics/impl/mulDiv.cpp @@ -17,15 +17,13 @@ */ //============================================================================== -#include #include #include -#include -#include +#include namespace ripple { -std::pair +std::optional mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div) { using namespace boost::multiprecision; @@ -35,12 +33,10 @@ mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div) result /= div; - auto constexpr limit = std::numeric_limits::max(); + if (result > ripple::muldiv_max) + return std::nullopt; - if (result > limit) - return {false, limit}; - - return {true, static_cast(result)}; + return static_cast(result); } } // namespace ripple diff --git a/src/ripple/basics/mulDiv.h b/src/ripple/basics/mulDiv.h index 30579c255ac..e338f87c819 100644 --- a/src/ripple/basics/mulDiv.h +++ b/src/ripple/basics/mulDiv.h @@ -21,9 +21,12 @@ #define RIPPLE_BASICS_MULDIV_H_INCLUDED #include +#include +#include #include namespace ripple { +auto constexpr muldiv_max = std::numeric_limits::max(); /** Return value*mul/div accurately. Computes the result of the multiplication and division in @@ -31,14 +34,11 @@ namespace ripple { Throws: None Returns: - `std::pair`: - `first` is `false` if the calculation overflows, - `true` if the calculation is safe. - `second` is the result of the calculation if - `first` is `false`, max value of `uint64_t` - if `true`. + `std::optional`: + `std::nullopt` if the calculation overflows. Otherwise, `value * mul + / div`. */ -std::pair +std::optional mulDiv(std::uint64_t value, std::uint64_t mul, std::uint64_t div); } // namespace ripple diff --git a/src/ripple/rpc/handlers/Fee1.cpp b/src/ripple/rpc/handlers/Fee1.cpp index 554480f10af..89fa9c6ea07 100644 --- a/src/ripple/rpc/handlers/Fee1.cpp +++ b/src/ripple/rpc/handlers/Fee1.cpp @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include diff --git a/src/ripple/rpc/impl/TransactionSign.cpp b/src/ripple/rpc/impl/TransactionSign.cpp index c903c26f8e3..7610682fd1a 100644 --- a/src/ripple/rpc/impl/TransactionSign.cpp +++ b/src/ripple/rpc/impl/TransactionSign.cpp @@ -738,9 +738,9 @@ checkFee( auto const limit = [&]() { // Scale fee units to drops: auto const result = mulDiv(feeDefault, mult, div); - if (!result.first) + if (!result) Throw("mulDiv"); - return result.second; + return *result; }(); if (fee > limit) diff --git a/src/test/app/TxQ_test.cpp b/src/test/app/TxQ_test.cpp index 8bf359e101c..4bc0040f867 100644 --- a/src/test/app/TxQ_test.cpp +++ b/src/test/app/TxQ_test.cpp @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include diff --git a/src/test/basics/FeeUnits_test.cpp b/src/test/basics/FeeUnits_test.cpp index 85527423c58..3ded5812947 100644 --- a/src/test/basics/FeeUnits_test.cpp +++ b/src/test/basics/FeeUnits_test.cpp @@ -50,12 +50,17 @@ class feeunits_test : public beast::unit_test::suite FeeLevel32 f{10}; FeeLevel32 baseFee{100}; - auto drops = mulDiv(baseFee, x, f).second; + auto drops = mulDiv(baseFee, x, f); + BEAST_EXPECT(drops); BEAST_EXPECT(drops.value() == 1000); - BEAST_EXPECT( - (std::is_same_v)); - BEAST_EXPECT((std::is_same_v)); + BEAST_EXPECT((std::is_same_v< + std::remove_reference_t::unit_type, + feeunit::dropTag>)); + + BEAST_EXPECT((std::is_same_v< + std::remove_reference_t, + XRPAmount>)); } { XRPAmount x{100}; @@ -70,12 +75,16 @@ class feeunits_test : public beast::unit_test::suite FeeLevel64 f{10}; FeeLevel64 baseFee{100}; - auto drops = mulDiv(baseFee, x, f).second; + auto drops = mulDiv(baseFee, x, f); + BEAST_EXPECT(drops); BEAST_EXPECT(drops.value() == 1000); - BEAST_EXPECT( - (std::is_same_v)); - BEAST_EXPECT((std::is_same_v)); + BEAST_EXPECT((std::is_same_v< + std::remove_reference_t::unit_type, + feeunit::dropTag>)); + BEAST_EXPECT((std::is_same_v< + std::remove_reference_t, + XRPAmount>)); } { FeeLevel64 x{1024}; @@ -91,12 +100,16 @@ class feeunits_test : public beast::unit_test::suite XRPAmount basefee{10}; FeeLevel64 referencefee{256}; - auto drops = mulDiv(x, basefee, referencefee).second; + auto drops = mulDiv(x, basefee, referencefee); + BEAST_EXPECT(drops); BEAST_EXPECT(drops.value() == 40); - BEAST_EXPECT( - (std::is_same_v)); - BEAST_EXPECT((std::is_same_v)); + BEAST_EXPECT((std::is_same_v< + std::remove_reference_t::unit_type, + feeunit::dropTag>)); + BEAST_EXPECT((std::is_same_v< + std::remove_reference_t, + XRPAmount>)); } } diff --git a/src/test/basics/mulDiv_test.cpp b/src/test/basics/mulDiv_test.cpp index 2dfc9760608..f51b91fecf4 100644 --- a/src/test/basics/mulDiv_test.cpp +++ b/src/test/basics/mulDiv_test.cpp @@ -32,27 +32,27 @@ struct mulDiv_test : beast::unit_test::suite const std::uint64_t max32 = std::numeric_limits::max(); auto result = mulDiv(85, 20, 5); - BEAST_EXPECT(result.first && result.second == 340); + BEAST_EXPECT(result && *result == 340); result = mulDiv(20, 85, 5); - BEAST_EXPECT(result.first && result.second == 340); + BEAST_EXPECT(result && *result == 340); result = mulDiv(0, max - 1, max - 3); - BEAST_EXPECT(result.first && result.second == 0); + BEAST_EXPECT(result && *result == 0); result = mulDiv(max - 1, 0, max - 3); - BEAST_EXPECT(result.first && result.second == 0); + BEAST_EXPECT(result && *result == 0); result = mulDiv(max, 2, max / 2); - BEAST_EXPECT(result.first && result.second == 4); + BEAST_EXPECT(result && *result == 4); result = mulDiv(max, 1000, max / 1000); - BEAST_EXPECT(result.first && result.second == 1000000); + BEAST_EXPECT(result && *result == 1000000); result = mulDiv(max, 1000, max / 1001); - BEAST_EXPECT(result.first && result.second == 1001000); + BEAST_EXPECT(result && *result == 1001000); result = mulDiv(max32 + 1, max32 + 1, 5); - BEAST_EXPECT(result.first && result.second == 3689348814741910323); + BEAST_EXPECT(result && *result == 3689348814741910323); // Overflow result = mulDiv(max - 1, max - 2, 5); - BEAST_EXPECT(!result.first && result.second == max); + BEAST_EXPECT(!result); } }; From a45a95e5ea9f79458366ef21018778427a344b0e Mon Sep 17 00:00:00 2001 From: Arihant Kothari Date: Thu, 6 Jul 2023 14:58:53 -0400 Subject: [PATCH 196/508] APIv2(ledger_entry): return "invalidParams" when fields missing (#4552) Improve error handling for ledger_entry by returning an "invalidParams" error when one or more request fields are specified incorrectly, or one or more required fields are missing. For example, if none of of the following fields is provided, then the API should return an invalidParams error: * index, account_root, directory, offer, ripple_state, check, escrow, payment_channel, deposit_preauth, ticket Prior to this commit, the API returned an "unknownOption" error instead. Since the error was actually due to invalid parameters, rather than unknown options, this error was misleading. Since this is an API breaking change, the "invalidParams" error is only returned for requests using api_version: 2 and above. To maintain backward compatibility, the "unknownOption" error is still returned for api_version: 1. Related: #4573 Fix #4303 --- src/ripple/rpc/handlers/LedgerEntry.cpp | 7 ++++++- src/test/rpc/LedgerRPC_test.cpp | 23 +++++++++++++++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/ripple/rpc/handlers/LedgerEntry.cpp b/src/ripple/rpc/handlers/LedgerEntry.cpp index fff2ceac01c..19eedd395cb 100644 --- a/src/ripple/rpc/handlers/LedgerEntry.cpp +++ b/src/ripple/rpc/handlers/LedgerEntry.cpp @@ -358,7 +358,12 @@ doLedgerEntry(RPC::JsonContext& context) } } else - jvResult[jss::error] = "unknownOption"; + { + if (context.apiVersion < 2u) + jvResult[jss::error] = "unknownOption"; + else + jvResult[jss::error] = "invalidParams"; + } } if (uNodeIndex.isNonZero()) diff --git a/src/test/rpc/LedgerRPC_test.cpp b/src/test/rpc/LedgerRPC_test.cpp index 8a09f2a23b6..9c9a63005a7 100644 --- a/src/test/rpc/LedgerRPC_test.cpp +++ b/src/test/rpc/LedgerRPC_test.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include namespace ripple { @@ -1212,9 +1213,11 @@ class LedgerRPC_test : public beast::unit_test::suite } void - testLedgerEntryUnknownOption() + testLedgerEntryInvalidParams(unsigned int apiVersion) { - testcase("ledger_entry Request Unknown Option"); + testcase( + "ledger_entry Request With Invalid Parameters v" + + std::to_string(apiVersion)); using namespace test::jtx; Env env{*this}; @@ -1222,11 +1225,16 @@ class LedgerRPC_test : public beast::unit_test::suite // "features" is not an option supported by ledger_entry. Json::Value jvParams; + jvParams[jss::api_version] = apiVersion; jvParams[jss::features] = ledgerHash; jvParams[jss::ledger_hash] = ledgerHash; Json::Value const jrr = env.rpc("json", "ledger_entry", to_string(jvParams))[jss::result]; - checkErrorValue(jrr, "unknownOption", ""); + + if (apiVersion < 2u) + checkErrorValue(jrr, "unknownOption", ""); + else + checkErrorValue(jrr, "invalidParams", ""); } /// @brief ledger RPC requests as a way to drive @@ -1724,11 +1732,18 @@ class LedgerRPC_test : public beast::unit_test::suite testLedgerEntryPayChan(); testLedgerEntryRippleState(); testLedgerEntryTicket(); - testLedgerEntryUnknownOption(); testLookupLedger(); testNoQueue(); testQueue(); testLedgerAccountsOption(); + + // version specific tests + for (auto testVersion = RPC::apiMinimumSupportedVersion; + testVersion <= RPC::apiBetaVersion; + ++testVersion) + { + testLedgerEntryInvalidParams(testVersion); + } } }; From f7dd37e355754e576fee4bda357cec64a35871aa Mon Sep 17 00:00:00 2001 From: Elliot Lee Date: Fri, 7 Jul 2023 14:31:09 -0700 Subject: [PATCH 197/508] docs(CONTRIBUTING): push beta releases to `release` (#4589) Sections that were rewrapped were wrapped to 72 characters, the same as the recommendation for commit messages. --- CONTRIBUTING.md | 89 ++++++++++++++++++++++++++----------------------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8c9a81bb7b4..df1d9f2a2f4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,54 +1,51 @@ The XRP Ledger has many and diverse stakeholders, and everyone deserves -a chance to contribute meaningful changes to the code that runs the XRPL. +a chance to contribute meaningful changes to the code that runs the +XRPL. # Contributing -We assume you are familiar with the general practice of [making contributions -on GitHub][1]. -This file includes only special instructions specific to this project. +We assume you are familiar with the general practice of [making +contributions on GitHub][1]. This file includes only special +instructions specific to this project. ## Before you start -All of your contributions must be developed in your personal +In general, contributions should be developed in your personal [fork](https://github.com/XRPLF/rippled/fork). -No personal branches may ever be pushed to the [main project][rippled]. -These are the only branches that may ever exist in the main project: + +The following branches exist in the main project repository: - `develop`: The latest set of unreleased features, and the most common starting point for contributions. -- `release`: The latest release candidate. +- `release`: The latest beta release or release candidate. - `master`: The latest stable release. - `gh-pages`: The documentation for this project, built by Doxygen. -The tip of each branch must be signed. -In order for GitHub to sign a squashed commit that it builds from your pull -request, -all of your commits must be signed, -and GitHub must know your verifying key. -Please walk through the excellent documentation from GitHub to set -up [signature verification][signing]. +The tip of each branch must be signed. In order for GitHub to sign a +squashed commit that it builds from your pull request, GitHub must know +your verifying key. Please set up [signature verification][signing]. [rippled]: https://github.com/XRPLF/rippled -[signing]: https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification +[signing]: + https://docs.github.com/en/authentication/managing-commit-signature-verification/about-commit-signature-verification ## Major contributions -If your contribution is a major feature or breaking change, -then you must first write an XRP Ledger Standard (XLS) describing it. -Go to [XRPL-Standards](https://github.com/XRPLF/XRPL-Standards/discussions), -choose the next available standard number, and -open a discussion with an appropriate title to propose your draft standard. +If your contribution is a major feature or breaking change, then you +must first write an XRP Ledger Standard (XLS) describing it. Go to +[XRPL-Standards](https://github.com/XRPLF/XRPL-Standards/discussions), +choose the next available standard number, and open a discussion with an +appropriate title to propose your draft standard. When you submit a pull request, please link the corresponding XLS in the -description. -An XLS still in draft status is considered a work-in-progress and open for -discussion. -Please do not submit a pull request before allowing due time for questions, -suggestions, and changes to the XLS draft. -It is the responsibility of the XLS author to update the draft to match the -final implementation when its corresponding pull request is merged. +description. An XLS still in draft status is considered a +work-in-progress and open for discussion. Please allow time for +questions, suggestions, and changes to the XLS draft. It is the +responsibility of the XLS author to update the draft to match the final +implementation when its corresponding pull request is merged, unless the +author delegates that responsibility to others. ## Before making a pull request @@ -79,13 +76,13 @@ Header includes must be [levelized](./Builds/levelization). ## Pull requests -Pull requests must target the `develop` branch.[^1] +In general, pull requests use `develop` as the base branch. -[^1]: There are exceptions to this policy for hotfixes, but no one consulting - this document will be in that situation. +(Hotfixes are an exception.) Changes to pull requests must be added as new commits. -You may **never force push a branch in a pull request** (e.g. after a rebase). +Once code reviewers have started looking at your code, please avoid +force-pushing a branch in a pull request. This preserves the ability for reviewers to filter changes since their last review. @@ -100,8 +97,9 @@ to preserve a linear history for the `develop` branch. # Style guide -This is a non-exhaustive list of recommended style guidelines. -These are not always strictly enforced and serve as a way to keep the codebase coherent rather than a set of _thou shalt not_ commandments. +This is a non-exhaustive list of recommended style guidelines. These are +not always strictly enforced and serve as a way to keep the codebase +coherent rather than a set of _thou shalt not_ commandments. ## Formatting @@ -133,20 +131,27 @@ pre-commit install 2. Proliferation of new files and classes. 3. Complex inheritance and complex OOP patterns. 4. Unmanaged memory allocation and raw pointers. -5. Macros and non-trivial templates (unless they add significant value.) -6. Lambda patterns (unless these add significant value.) -7. CPU or architecture-specific code unless there is a good reason to include it, and where it is used guard it with macros and provide explanatory comments. +5. Macros and non-trivial templates (unless they add significant value). +6. Lambda patterns (unless these add significant value). +7. CPU or architecture-specific code unless there is a good reason to + include it, and where it is used, guard it with macros and provide + explanatory comments. 8. Importing new libraries unless there is a very good reason to do so. ## Seek to 9. Extend functionality of existing code rather than creating new code. -10. Prefer readability over terseness where important logic is concerned. -11. Inline functions that are not used or are not likely to be used elsewhere in the codebase. -12. Use clear and self-explanatory names for functions, variables, structs and classes. -13. Use TitleCase for classes, structs and filenames, camelCase for function and variable names, lower case for namespaces and folders. -14. Provide as many comments as you feel that a competent programmer would need to understand what your code does. +10. Prefer readability over terseness where important logic is + concerned. +11. Inline functions that are not used or are not likely to be used + elsewhere in the codebase. +12. Use clear and self-explanatory names for functions, variables, + structs and classes. +13. Use TitleCase for classes, structs and filenames, camelCase for + function and variable names, lower case for namespaces and folders. +14. Provide as many comments as you feel that a competent programmer + would need to understand what your code does. # Maintainers From eeb8b41889bc990f1f42e5953a70a344a6e178cd Mon Sep 17 00:00:00 2001 From: John Freeman Date: Tue, 11 Jul 2023 16:26:15 -0500 Subject: [PATCH 198/508] Adapt to change in Conan recipe for NuDB (#4615) The recipe was updated a few days ago and the exported library target was renamed. --- CMakeLists.txt | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index da2ea1a5307..256f8dc7fc5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -100,7 +100,6 @@ target_link_libraries(ripple_libs INTERFACE ed25519::ed25519 LibArchive::LibArchive lz4::lz4 - nudb::core OpenSSL::Crypto OpenSSL::SSL Ripple::grpc_pbufs @@ -110,6 +109,16 @@ target_link_libraries(ripple_libs INTERFACE SQLite::SQLite3 ) +# Work around changes to Conan recipe for now. +if(TARGET nudb::core) + set(nudb nudb::core) +elseif(TARGET NuDB::nudb) + set(nudb NuDB::nudb) +else() + message(FATAL_ERROR "unknown nudb target") +endif() +target_link_libraries(ripple_libs INTERFACE ${nudb}) + if(reporting) find_package(cassandra-cpp-driver REQUIRED) find_package(PostgreSQL REQUIRED) From 3c9db4b69efb8f8e5382e4dfed5927ef28a303a3 Mon Sep 17 00:00:00 2001 From: Gregory Tsipenyuk Date: Wed, 12 Jul 2023 13:52:50 -0400 Subject: [PATCH 199/508] Introduce AMM support (XLS-30d): (#4294) Add AMM functionality: - InstanceCreate - Deposit - Withdraw - Governance - Auctioning - payment engine integration To support this functionality, add: - New RPC method, `amm_info`, to fetch pool and LPT balances - AMM Root Account - trust line for each IOU AMM token - trust line to track Liquidity Provider Tokens (LPT) - `ltAMM` object The `ltAMM` object tracks: - fee votes - auction slot bids - AMM tokens pair - total outstanding tokens balance - `AMMID` to AMM `RootAccountID` mapping Add new classes to facilitate AMM integration into the payment engine. `BookStep` uses these classes to infer if AMM liquidity can be consumed. The AMM formula implementation uses the new Number class added in #4192. IOUAmount and STAmount use Number arithmetic. Add AMM unit tests for all features. AMM requires the following amendments: - featureAMM - fixUniversalNumber - featureFlowCross Notes: - Current trading fee threshold is 1% - AMM currency is generated by: 0x03 + 152 bits of sha256{cur1, cur2} - Current max AMM Offers is 30 --------- Co-authored-by: Howard Hinnant --- Builds/CMake/RippledCore.cmake | 23 + Builds/levelization/results/ordering.txt | 2 + src/ripple/app/ledger/OrderBookDB.cpp | 18 +- src/ripple/app/misc/AMMHelpers.h | 311 ++ src/ripple/app/misc/AMMUtils.h | 98 + src/ripple/app/misc/impl/AMMHelpers.cpp | 206 + src/ripple/app/misc/impl/AMMUtils.cpp | 191 + src/ripple/app/paths/AMMContext.h | 119 + src/ripple/app/paths/AMMLiquidity.h | 148 + src/ripple/app/paths/AMMOffer.h | 149 + src/ripple/app/paths/Flow.cpp | 10 + src/ripple/app/paths/impl/AMMLiquidity.cpp | 223 + src/ripple/app/paths/impl/AMMOffer.cpp | 143 + src/ripple/app/paths/impl/BookStep.cpp | 303 +- src/ripple/app/paths/impl/PaySteps.cpp | 7 + src/ripple/app/paths/impl/Steps.h | 31 +- src/ripple/app/paths/impl/StrandFlow.h | 85 +- src/ripple/app/tx/impl/AMMBid.cpp | 353 ++ src/ripple/app/tx/impl/AMMBid.h | 86 + src/ripple/app/tx/impl/AMMCreate.cpp | 379 ++ src/ripple/app/tx/impl/AMMCreate.h | 82 + src/ripple/app/tx/impl/AMMDeposit.cpp | 837 ++++ src/ripple/app/tx/impl/AMMDeposit.h | 230 + src/ripple/app/tx/impl/AMMVote.cpp | 243 + src/ripple/app/tx/impl/AMMVote.h | 71 + src/ripple/app/tx/impl/AMMWithdraw.cpp | 813 ++++ src/ripple/app/tx/impl/AMMWithdraw.h | 230 + src/ripple/app/tx/impl/Escrow.cpp | 12 + src/ripple/app/tx/impl/Escrow.h | 3 + src/ripple/app/tx/impl/InvariantCheck.cpp | 17 +- src/ripple/app/tx/impl/Offer.h | 77 +- src/ripple/app/tx/impl/PayChan.cpp | 3 + src/ripple/app/tx/impl/Payment.cpp | 5 + src/ripple/app/tx/impl/applySteps.cpp | 55 + src/ripple/ledger/View.h | 46 +- src/ripple/ledger/impl/View.cpp | 77 +- src/ripple/net/impl/RPCCall.cpp | 1 + src/ripple/protocol/AMMCore.h | 134 + src/ripple/protocol/AmountConversions.h | 55 + src/ripple/protocol/ErrorCodes.h | 6 +- src/ripple/protocol/Feature.h | 3 +- src/ripple/protocol/Indexes.h | 7 + src/ripple/protocol/Issue.h | 18 +- src/ripple/protocol/LedgerFormats.h | 10 +- src/ripple/protocol/QualityFunction.h | 107 + src/ripple/protocol/SField.h | 25 + src/ripple/protocol/STIssue.h | 138 + src/ripple/protocol/STObject.h | 3 + src/ripple/protocol/TER.h | 7 + src/ripple/protocol/TxFlags.h | 17 + src/ripple/protocol/TxFormats.h | 15 + src/ripple/protocol/impl/AMMCore.cpp | 131 + src/ripple/protocol/impl/ErrorCodes.cpp | 1 + src/ripple/protocol/impl/Feature.cpp | 1 + src/ripple/protocol/impl/Indexes.cpp | 19 + .../protocol/impl/InnerObjectFormats.cpp | 18 + src/ripple/protocol/impl/Issue.cpp | 87 + src/ripple/protocol/impl/LedgerFormats.cpp | 14 + src/ripple/protocol/impl/QualityFunction.cpp | 59 + src/ripple/protocol/impl/SField.cpp | 28 +- src/ripple/protocol/impl/STAmount.cpp | 15 +- src/ripple/protocol/impl/STIssue.cpp | 113 + src/ripple/protocol/impl/STObject.cpp | 6 + src/ripple/protocol/impl/STParsedJSON.cpp | 12 + src/ripple/protocol/impl/STVar.cpp | 7 + src/ripple/protocol/impl/TER.cpp | 6 + src/ripple/protocol/impl/TxFormats.cpp | 58 + src/ripple/protocol/jss.h | 133 +- src/ripple/rpc/handlers/AMMInfo.cpp | 208 + src/ripple/rpc/handlers/Handlers.h | 2 + src/ripple/rpc/handlers/LedgerEntry.cpp | 33 + src/ripple/rpc/impl/Handler.cpp | 1 + src/ripple/rpc/impl/RPCHelpers.cpp | 5 +- src/test/app/AMMCalc_test.cpp | 457 ++ src/test/app/AMMExtended_test.cpp | 3909 ++++++++++++++++ src/test/app/AMM_test.cpp | 4169 +++++++++++++++++ src/test/app/CrossingLimits_test.cpp | 20 +- src/test/app/Escrow_test.cpp | 132 +- src/test/app/Flow_test.cpp | 9 +- src/test/app/Freeze_test.cpp | 27 +- src/test/app/Offer_test.cpp | 41 +- src/test/app/Path_test.cpp | 97 +- src/test/app/PayChan_test.cpp | 98 +- src/test/app/PayStrand_test.cpp | 32 +- src/test/app/TheoreticalQuality_test.cpp | 3 + src/test/app/TrustAndBalance_test.cpp | 18 +- src/test/jtx/AMM.h | 355 ++ src/test/jtx/AMMTest.h | 156 + src/test/jtx/TestHelpers.h | 412 ++ src/test/jtx/impl/AMM.cpp | 727 +++ src/test/jtx/impl/AMMTest.cpp | 278 ++ src/test/jtx/impl/TestHelpers.cpp | 388 ++ src/test/jtx/impl/paths.cpp | 8 +- src/test/jtx/impl/pay.cpp | 11 +- src/test/jtx/paths.h | 3 + src/test/jtx/pay.h | 2 + src/test/rpc/AMMInfo_test.cpp | 177 + src/test/rpc/AccountOffers_test.cpp | 8 +- src/test/rpc/LedgerData_test.cpp | 8 +- src/test/rpc/NoRipple_test.cpp | 7 +- 100 files changed, 18092 insertions(+), 649 deletions(-) create mode 100644 src/ripple/app/misc/AMMHelpers.h create mode 100644 src/ripple/app/misc/AMMUtils.h create mode 100644 src/ripple/app/misc/impl/AMMHelpers.cpp create mode 100644 src/ripple/app/misc/impl/AMMUtils.cpp create mode 100644 src/ripple/app/paths/AMMContext.h create mode 100644 src/ripple/app/paths/AMMLiquidity.h create mode 100644 src/ripple/app/paths/AMMOffer.h create mode 100644 src/ripple/app/paths/impl/AMMLiquidity.cpp create mode 100644 src/ripple/app/paths/impl/AMMOffer.cpp create mode 100644 src/ripple/app/tx/impl/AMMBid.cpp create mode 100644 src/ripple/app/tx/impl/AMMBid.h create mode 100644 src/ripple/app/tx/impl/AMMCreate.cpp create mode 100644 src/ripple/app/tx/impl/AMMCreate.h create mode 100644 src/ripple/app/tx/impl/AMMDeposit.cpp create mode 100644 src/ripple/app/tx/impl/AMMDeposit.h create mode 100644 src/ripple/app/tx/impl/AMMVote.cpp create mode 100644 src/ripple/app/tx/impl/AMMVote.h create mode 100644 src/ripple/app/tx/impl/AMMWithdraw.cpp create mode 100644 src/ripple/app/tx/impl/AMMWithdraw.h create mode 100644 src/ripple/protocol/AMMCore.h create mode 100644 src/ripple/protocol/QualityFunction.h create mode 100644 src/ripple/protocol/STIssue.h create mode 100644 src/ripple/protocol/impl/AMMCore.cpp create mode 100644 src/ripple/protocol/impl/QualityFunction.cpp create mode 100644 src/ripple/protocol/impl/STIssue.cpp create mode 100644 src/ripple/rpc/handlers/AMMInfo.cpp create mode 100644 src/test/app/AMMCalc_test.cpp create mode 100644 src/test/app/AMMExtended_test.cpp create mode 100644 src/test/app/AMM_test.cpp create mode 100644 src/test/jtx/AMM.h create mode 100644 src/test/jtx/AMMTest.h create mode 100644 src/test/jtx/TestHelpers.h create mode 100644 src/test/jtx/impl/AMM.cpp create mode 100644 src/test/jtx/impl/AMMTest.cpp create mode 100644 src/test/jtx/impl/TestHelpers.cpp create mode 100644 src/test/rpc/AMMInfo_test.cpp diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 53a5e61a7b7..b676c5ff5e9 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -74,6 +74,7 @@ target_sources (xrpl_core PRIVATE subdir: protocol #]===============================] src/ripple/protocol/impl/AccountID.cpp + src/ripple/protocol/impl/AMMCore.cpp src/ripple/protocol/impl/Book.cpp src/ripple/protocol/impl/BuildInfo.cpp src/ripple/protocol/impl/ErrorCodes.cpp @@ -81,10 +82,12 @@ target_sources (xrpl_core PRIVATE src/ripple/protocol/impl/Indexes.cpp src/ripple/protocol/impl/InnerObjectFormats.cpp src/ripple/protocol/impl/Issue.cpp + src/ripple/protocol/impl/STIssue.cpp src/ripple/protocol/impl/Keylet.cpp src/ripple/protocol/impl/LedgerFormats.cpp src/ripple/protocol/impl/PublicKey.cpp src/ripple/protocol/impl/Quality.cpp + src/ripple/protocol/impl/QualityFunction.cpp src/ripple/protocol/impl/Rate2.cpp src/ripple/protocol/impl/Rules.cpp src/ripple/protocol/impl/SField.cpp @@ -223,6 +226,7 @@ install ( install ( FILES src/ripple/protocol/AccountID.h + src/ripple/protocol/AMMCore.h src/ripple/protocol/AmountConversions.h src/ripple/protocol/Book.h src/ripple/protocol/BuildInfo.h @@ -239,12 +243,14 @@ install ( src/ripple/protocol/Protocol.h src/ripple/protocol/PublicKey.h src/ripple/protocol/Quality.h + src/ripple/protocol/QualityFunction.h src/ripple/protocol/Rate.h src/ripple/protocol/Rules.h src/ripple/protocol/SField.h src/ripple/protocol/SOTemplate.h src/ripple/protocol/STAccount.h src/ripple/protocol/STAmount.h + src/ripple/protocol/STIssue.h src/ripple/protocol/STArray.h src/ripple/protocol/STBase.h src/ripple/protocol/STBitString.h @@ -423,6 +429,8 @@ target_sources (rippled PRIVATE src/ripple/app/reporting/ReportingETL.cpp src/ripple/app/reporting/ETLSource.cpp src/ripple/app/reporting/P2pProxy.cpp + src/ripple/app/misc/impl/AMMHelpers.cpp + src/ripple/app/misc/impl/AMMUtils.cpp src/ripple/app/misc/CanonicalTXSet.cpp src/ripple/app/misc/FeeVoteImpl.cpp src/ripple/app/misc/HashRouter.cpp @@ -448,6 +456,8 @@ target_sources (rippled PRIVATE src/ripple/app/paths/RippleCalc.cpp src/ripple/app/paths/RippleLineCache.cpp src/ripple/app/paths/TrustLine.cpp + src/ripple/app/paths/impl/AMMLiquidity.cpp + src/ripple/app/paths/impl/AMMOffer.cpp src/ripple/app/paths/impl/BookStep.cpp src/ripple/app/paths/impl/DirectStep.cpp src/ripple/app/paths/impl/PaySteps.cpp @@ -464,6 +474,11 @@ target_sources (rippled PRIVATE src/ripple/app/rdb/impl/UnitaryShard.cpp src/ripple/app/rdb/impl/Vacuum.cpp src/ripple/app/rdb/impl/Wallet.cpp + src/ripple/app/tx/impl/AMMBid.cpp + src/ripple/app/tx/impl/AMMCreate.cpp + src/ripple/app/tx/impl/AMMDeposit.cpp + src/ripple/app/tx/impl/AMMVote.cpp + src/ripple/app/tx/impl/AMMWithdraw.cpp src/ripple/app/tx/impl/ApplyContext.cpp src/ripple/app/tx/impl/BookTip.cpp src/ripple/app/tx/impl/CancelCheck.cpp @@ -629,6 +644,7 @@ target_sources (rippled PRIVATE src/ripple/rpc/handlers/AccountObjects.cpp src/ripple/rpc/handlers/AccountOffers.cpp src/ripple/rpc/handlers/AccountTx.cpp + src/ripple/rpc/handlers/AMMInfo.cpp src/ripple/rpc/handlers/BlackList.cpp src/ripple/rpc/handlers/BookOffers.cpp src/ripple/rpc/handlers/CanDelete.cpp @@ -735,6 +751,9 @@ if (tests) src/test/app/AccountDelete_test.cpp src/test/app/AccountTxPaging_test.cpp src/test/app/AmendmentTable_test.cpp + src/test/app/AMM_test.cpp + src/test/app/AMMCalc_test.cpp + src/test/app/AMMExtended_test.cpp src/test/app/Check_test.cpp src/test/app/Clawback_test.cpp src/test/app/CrossingLimits_test.cpp @@ -877,9 +896,12 @@ if (tests) src/test/jtx/Env_test.cpp src/test/jtx/WSClient_test.cpp src/test/jtx/impl/Account.cpp + src/test/jtx/impl/AMM.cpp + src/test/jtx/impl/AMMTest.cpp src/test/jtx/impl/Env.cpp src/test/jtx/impl/JSONRPCClient.cpp src/test/jtx/impl/ManualTimeKeeper.cpp + src/test/jtx/impl/TestHelpers.cpp src/test/jtx/impl/WSClient.cpp src/test/jtx/impl/acctdelete.cpp src/test/jtx/impl/account_txn_id.cpp @@ -995,6 +1017,7 @@ if (tests) src/test/rpc/AccountSet_test.cpp src/test/rpc/AccountTx_test.cpp src/test/rpc/AmendmentBlocked_test.cpp + src/test/rpc/AMMInfo_test.cpp src/test/rpc/Book_test.cpp src/test/rpc/DepositAuthorized_test.cpp src/test/rpc/DeliveredAmount_test.cpp diff --git a/Builds/levelization/results/ordering.txt b/Builds/levelization/results/ordering.txt index 401040fc2d7..79dcdd3cc0f 100644 --- a/Builds/levelization/results/ordering.txt +++ b/Builds/levelization/results/ordering.txt @@ -142,6 +142,8 @@ test.jtx > ripple.json test.jtx > ripple.ledger test.jtx > ripple.net test.jtx > ripple.protocol +test.jtx > ripple.resource +test.jtx > ripple.rpc test.jtx > ripple.server test.ledger > ripple.app test.ledger > ripple.basics diff --git a/src/ripple/app/ledger/OrderBookDB.cpp b/src/ripple/app/ledger/OrderBookDB.cpp index 343e7f6269a..45262c4d8a9 100644 --- a/src/ripple/app/ledger/OrderBookDB.cpp +++ b/src/ripple/app/ledger/OrderBookDB.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -93,7 +94,7 @@ OrderBookDB::update(std::shared_ptr const& ledger) JLOG(j_.debug()) << "Beginning update (" << ledger->seq() << ")"; - // walk through the entire ledger looking for orderbook entries + // walk through the entire ledger looking for orderbook/AMM entries int cnt = 0; try @@ -126,6 +127,21 @@ OrderBookDB::update(std::shared_ptr const& ledger) ++cnt; } + else if (sle->getType() == ltAMM) + { + auto const issue1 = (*sle)[sfAsset]; + auto const issue2 = (*sle)[sfAsset2]; + auto addBook = [&](Issue const& in, Issue const& out) { + allBooks[in].insert(out); + + if (isXRP(out)) + xrpBooks.insert(in); + + ++cnt; + }; + addBook(issue1, issue2); + addBook(issue2, issue1); + } } } catch (SHAMapMissingNode const& mn) diff --git a/src/ripple/app/misc/AMMHelpers.h b/src/ripple/app/misc/AMMHelpers.h new file mode 100644 index 00000000000..24c25922800 --- /dev/null +++ b/src/ripple/app/misc/AMMHelpers.h @@ -0,0 +1,311 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_MISC_AMMHELPERS_H_INCLUDED +#define RIPPLE_APP_MISC_AMMHELPERS_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace ripple { + +/** Calculate LP Tokens given AMM pool reserves. + * @param asset1 AMM one side of the pool reserve + * @param asset2 AMM another side of the pool reserve + * @return LP Tokens as IOU + */ +STAmount +ammLPTokens( + STAmount const& asset1, + STAmount const& asset2, + Issue const& lptIssue); + +/** Calculate LP Tokens given asset's deposit amount. + * @param asset1Balance current AMM asset1 balance + * @param asset1Deposit requested asset1 deposit amount + * @param lptAMMBalance AMM LPT balance + * @param tfee trading fee in basis points + * @return tokens + */ +STAmount +lpTokensIn( + STAmount const& asset1Balance, + STAmount const& asset1Deposit, + STAmount const& lptAMMBalance, + std::uint16_t tfee); + +/** Calculate asset deposit given LP Tokens. + * @param asset1Balance current AMM asset1 balance + * @param lpTokens LP Tokens + * @param lptAMMBalance AMM LPT balance + * @param tfee trading fee in basis points + * @return + */ +STAmount +ammAssetIn( + STAmount const& asset1Balance, + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + std::uint16_t tfee); + +/** Calculate LP Tokens given asset's withdraw amount. Return 0 + * if can't calculate. + * @param asset1Balance current AMM asset1 balance + * @param asset1Withdraw requested asset1 withdraw amount + * @param lptAMMBalance AMM LPT balance + * @param tfee trading fee in basis points + * @return tokens out amount + */ +STAmount +lpTokensOut( + STAmount const& asset1Balance, + STAmount const& asset1Withdraw, + STAmount const& lptAMMBalance, + std::uint16_t tfee); + +/** Calculate asset withdrawal by tokens + * @param assetBalance balance of the asset being withdrawn + * @param lptAMMBalance total AMM Tokens balance + * @param lpTokens LP Tokens balance + * @param tfee trading fee in basis points + * @return calculated asset amount + */ +STAmount +withdrawByTokens( + STAmount const& assetBalance, + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + std::uint16_t tfee); + +/** Check if the relative distance between the qualities + * is within the requested distance. + * @param calcQuality calculated quality + * @param reqQuality requested quality + * @param dist requested relative distance + * @return true if within dist, false otherwise + */ +inline bool +withinRelativeDistance( + Quality const& calcQuality, + Quality const& reqQuality, + Number const& dist) +{ + if (calcQuality == reqQuality) + return true; + auto const [min, max] = std::minmax(calcQuality, reqQuality); + // Relative distance is (max - min)/max. Can't use basic operations + // on Quality. Have to use Quality::rate() instead, which + // is inverse of quality: (1/max.rate - 1/min.rate)/(1/max.rate) + return ((min.rate() - max.rate()) / min.rate()) < dist; +} + +/** Check if the relative distance between the amounts + * is within the requested distance. + * @param calc calculated amount + * @param req requested amount + * @param dist requested relative distance + * @return true if within dist, false otherwise + */ +// clang-format off +template + requires( + std::is_same_v || std::is_same_v || + std::is_same_v) +bool +withinRelativeDistance(Amt const& calc, Amt const& req, Number const& dist) +{ + if (calc == req) + return true; + auto const [min, max] = std::minmax(calc, req); + return ((max - min) / max) < dist; +} +// clang-format on + +/** Finds takerPays (i) and takerGets (o) such that given pool composition + * poolGets(I) and poolPays(O): (O - o) / (I + i) = quality. + * Where takerGets is calculated as the swapAssetIn (see below). + * The above equation produces the quadratic equation: + * i^2*(1-fee) + i*I*(2-fee) + I^2 - I*O/quality, + * which is solved for i, and o is found with swapAssetIn(). + * @param pool AMM pool balances + * @param quality requested quality + * @param tfee trading fee in basis points + * @return seated in/out amounts if the quality can be changed + */ +template +std::optional> +changeSpotPriceQuality( + TAmounts const& pool, + Quality const& quality, + std::uint16_t tfee) +{ + auto const f = feeMult(tfee); // 1 - fee + auto const& a = f; + auto const b = pool.in * (1 + f); + Number const c = pool.in * pool.in - pool.in * pool.out * quality.rate(); + if (auto const res = b * b - 4 * a * c; res < 0) + return std::nullopt; + else if (auto const nTakerPaysPropose = (-b + root2(res)) / (2 * a); + nTakerPaysPropose > 0) + { + auto const nTakerPays = [&]() { + // The fee might make the AMM offer quality less than CLOB quality. + // Therefore, AMM offer has to satisfy this constraint: o / i >= q. + // Substituting o with swapAssetIn() gives: + // i <= O / q - I / (1 - fee). + auto const nTakerPaysConstraint = + pool.out * quality.rate() - pool.in / f; + if (nTakerPaysPropose > nTakerPaysConstraint) + return nTakerPaysConstraint; + return nTakerPaysPropose; + }(); + if (nTakerPays <= 0) + return std::nullopt; + auto const takerPays = toAmount( + getIssue(pool.in), nTakerPays, Number::rounding_mode::upward); + // should not fail + if (auto const amounts = + TAmounts{ + takerPays, swapAssetIn(pool, takerPays, tfee)}; + Quality{amounts} < quality && + !withinRelativeDistance(Quality{amounts}, quality, Number(1, -7))) + Throw("changeSpotPriceQuality failed"); + else + return amounts; + } + return std::nullopt; +} + +/** AMM pool invariant - the product (A * B) after swap in/out has to remain + * at least the same: (A + in) * (B - out) >= A * B + * XRP round-off may result in a smaller product after swap in/out. + * To address this: + * - if on swapIn the out is XRP then the amount is round-off + * downward, making the product slightly larger since out + * value is reduced. + * - if on swapOut the in is XRP then the amount is round-off + * upward, making the product slightly larger since in + * value is increased. + */ + +/** Swap assetIn into the pool and swap out a proportional amount + * of the other asset. Implements AMM Swap in. + * @see [XLS30d:AMM + * Swap](https://github.com/XRPLF/XRPL-Standards/discussions/78) + * @param pool current AMM pool balances + * @param assetIn amount to swap in + * @param tfee trading fee in basis points + * @return + */ +template +TOut +swapAssetIn( + TAmounts const& pool, + TIn const& assetIn, + std::uint16_t tfee) +{ + return toAmount( + getIssue(pool.out), + pool.out - (pool.in * pool.out) / (pool.in + assetIn * feeMult(tfee)), + Number::rounding_mode::downward); +} + +/** Swap assetOut out of the pool and swap in a proportional amount + * of the other asset. Implements AMM Swap out. + * @see [XLS30d:AMM + * Swap](https://github.com/XRPLF/XRPL-Standards/discussions/78) + * @param pool current AMM pool balances + * @param assetOut amount to swap out + * @param tfee trading fee in basis points + * @return + */ +template +TIn +swapAssetOut( + TAmounts const& pool, + TOut const& assetOut, + std::uint16_t tfee) +{ + return toAmount( + getIssue(pool.in), + ((pool.in * pool.out) / (pool.out - assetOut) - pool.in) / + feeMult(tfee), + Number::rounding_mode::upward); +} + +/** Return square of n. + */ +Number +square(Number const& n); + +/** Adjust LP tokens to deposit/withdraw. + * Amount type keeps 16 digits. Maintaining the LP balance by adding deposited + * tokens or subtracting withdrawn LP tokens from LP balance results in + * losing precision in LP balance. I.e. the resulting LP balance + * is less than the actual sum of LP tokens. To adjust for this, subtract + * old tokens balance from the new one for deposit or vice versa for withdraw + * to cancel out the precision loss. + * @param lptAMMBalance LPT AMM Balance + * @param lpTokens LP tokens to deposit or withdraw + * @param isDeposit true if deposit, false if withdraw + */ +STAmount +adjustLPTokens( + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + bool isDeposit); + +/** Calls adjustLPTokens() and adjusts deposit or withdraw amounts if + * the adjusted LP tokens are less than the provided LP tokens. + * @param amountBalance asset1 pool balance + * @param amount asset1 to deposit or withdraw + * @param amount2 asset2 to deposit or withdraw + * @param lptAMMBalance LPT AMM Balance + * @param lpTokens LP tokens to deposit or withdraw + * @param tfee trading fee in basis points + * @param isDeposit true if deposit, false if withdraw + * @return + */ +std::tuple, STAmount> +adjustAmountsByLPTokens( + STAmount const& amountBalance, + STAmount const& amount, + std::optional const& amount2, + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + std::uint16_t tfee, + bool isDeposit); + +/** Positive solution for quadratic equation: + * x = (-b + sqrt(b**2 + 4*a*c))/(2*a) + */ +Number +solveQuadraticEq(Number const& a, Number const& b, Number const& c); + +} // namespace ripple + +#endif // RIPPLE_APP_MISC_AMMHELPERS_H_INCLUDED diff --git a/src/ripple/app/misc/AMMUtils.h b/src/ripple/app/misc/AMMUtils.h new file mode 100644 index 00000000000..1718df496b8 --- /dev/null +++ b/src/ripple/app/misc/AMMUtils.h @@ -0,0 +1,98 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== +#ifndef RIPPLE_APP_MISC_AMMUTILS_H_INLCUDED +#define RIPPLE_APP_MISC_AMMUTILS_H_INLCUDED + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +class ReadView; +class ApplyView; +class Sandbox; +class NetClock; + +/** Get AMM pool balances. + */ +std::pair +ammPoolHolds( + ReadView const& view, + AccountID const& ammAccountID, + Issue const& issue1, + Issue const& issue2, + FreezeHandling freezeHandling, + beast::Journal const j); + +/** Get AMM pool and LP token balances. If both optIssue are + * provided then they are used as the AMM token pair issues. + * Otherwise the missing issues are fetched from ammSle. + */ +Expected, TER> +ammHolds( + ReadView const& view, + SLE const& ammSle, + std::optional const& optIssue1, + std::optional const& optIssue2, + FreezeHandling freezeHandling, + beast::Journal const j); + +/** Get the balance of LP tokens. + */ +STAmount +ammLPHolds( + ReadView const& view, + Currency const& cur1, + Currency const& cur2, + AccountID const& ammAccount, + AccountID const& lpAccount, + beast::Journal const j); + +STAmount +ammLPHolds( + ReadView const& view, + SLE const& ammSle, + AccountID const& lpAccount, + beast::Journal const j); + +/** Get AMM trading fee for the given account. The fee is discounted + * if the account is the auction slot owner or one of the slot's authorized + * accounts. + */ +std::uint16_t +getTradingFee( + ReadView const& view, + SLE const& ammSle, + AccountID const& account); + +/** Returns total amount held by AMM for the given token. + */ +STAmount +ammAccountHolds( + ReadView const& view, + AccountID const& ammAccountID, + Issue const& issue); + +} // namespace ripple + +#endif // RIPPLE_APP_MISC_AMMUTILS_H_INLCUDED diff --git a/src/ripple/app/misc/impl/AMMHelpers.cpp b/src/ripple/app/misc/impl/AMMHelpers.cpp new file mode 100644 index 00000000000..736743eaaf7 --- /dev/null +++ b/src/ripple/app/misc/impl/AMMHelpers.cpp @@ -0,0 +1,206 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { + +STAmount +ammLPTokens( + STAmount const& asset1, + STAmount const& asset2, + Issue const& lptIssue) +{ + auto const tokens = root2(asset1 * asset2); + return toSTAmount(lptIssue, tokens); +} + +/* + * Equation 3: + * t = T * [(b/B - (sqrt(f2**2 - b/(B*f1)) - f2)) / + * (1 + sqrt(f2**2 - b/(B*f1)) - f2)] + * where f1 = 1 - tfee, f2 = (1 - tfee/2)/f1 + */ +STAmount +lpTokensIn( + STAmount const& asset1Balance, + STAmount const& asset1Deposit, + STAmount const& lptAMMBalance, + std::uint16_t tfee) +{ + auto const f1 = feeMult(tfee); + auto const f2 = feeMultHalf(tfee) / f1; + Number const r = asset1Deposit / asset1Balance; + auto const c = root2(f2 * f2 + r / f1) - f2; + auto const t = lptAMMBalance * (r - c) / (1 + c); + return toSTAmount(lptAMMBalance.issue(), t); +} + +/* Equation 4 solves equation 3 for b: + * Let f1 = 1 - tfee, f2 = (1 - tfee/2)/f1, t1 = t/T, t2 = 1 + t1, R = b/B + * then + * t1 = [R - sqrt(f2**2 + R/f1) + f2] / [1 + sqrt(f2**2 + R/f1] - f2] => + * sqrt(f2**2 + R/f1)*(t1 + 1) = R + f2 + t1*f2 - t1 => + * sqrt(f2**2 + R/f1)*t2 = R + t2*f2 - t1 => + * sqrt(f2**2 + R/f1) = R/t2 + f2 - t1/t2, let d = f2 - t1/t2 => + * sqrt(f2**2 + R/f1) = R/t2 + d => + * f2**2 + R/f1 = (R/t2)**2 +2*d*R/t2 + d**2 => + * (R/t2)**2 + R*(2*d/t2 - 1/f1) + d**2 - f2**2 = 0 + */ +STAmount +ammAssetIn( + STAmount const& asset1Balance, + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + std::uint16_t tfee) +{ + auto const f1 = feeMult(tfee); + auto const f2 = feeMultHalf(tfee) / f1; + auto const t1 = lpTokens / lptAMMBalance; + auto const t2 = 1 + t1; + auto const d = f2 - t1 / t2; + auto const a = 1 / (t2 * t2); + auto const b = 2 * d / t2 - 1 / f1; + auto const c = d * d - f2 * f2; + return toSTAmount( + asset1Balance.issue(), asset1Balance * solveQuadraticEq(a, b, c)); +} + +/* Equation 7: + * t = T * (c - sqrt(c**2 - 4*R))/2 + * where R = b/B, c = R*fee + 2 - fee + */ +STAmount +lpTokensOut( + STAmount const& asset1Balance, + STAmount const& asset1Withdraw, + STAmount const& lptAMMBalance, + std::uint16_t tfee) +{ + Number const fr = asset1Withdraw / asset1Balance; + auto const f1 = getFee(tfee); + auto const c = fr * f1 + 2 - f1; + auto const t = lptAMMBalance * (c - root2(c * c - 4 * fr)) / 2; + return toSTAmount(lptAMMBalance.issue(), t); +} + +/* Equation 8 solves equation 7 for b: + * c - 2*t/T = sqrt(c**2 - 4*R) => + * c**2 - 4*c*t/T + 4*t**2/T**2 = c**2 - 4*R => + * -4*c*t/T + 4*t**2/T**2 = -4*R => + * -c*t/T + t**2/T**2 = -R -=> + * substitute c = R*f + 2 - f => + * -(t/T)*(R*f + 2 - f) + (t/T)**2 = -R, let t1 = t/T => + * -t1*R*f -2*t1 +t1*f +t1**2 = -R => + * R = (t1**2 + t1*(f - 2)) / (t1*f - 1) + */ +STAmount +withdrawByTokens( + STAmount const& assetBalance, + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + std::uint16_t tfee) +{ + auto const f = getFee(tfee); + Number const t1 = lpTokens / lptAMMBalance; + auto const b = assetBalance * (t1 * t1 - t1 * (2 - f)) / (t1 * f - 1); + return toSTAmount(assetBalance.issue(), b); +} + +Number +square(Number const& n) +{ + return n * n; +} + +STAmount +adjustLPTokens( + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + bool isDeposit) +{ + // Force rounding downward to ensure adjusted tokens are less or equal + // to requested tokens. + saveNumberRoundMode rm(Number::setround(Number::rounding_mode::downward)); + if (isDeposit) + return (lptAMMBalance + lpTokens) - lptAMMBalance; + return (lpTokens - lptAMMBalance) + lptAMMBalance; +} + +std::tuple, STAmount> +adjustAmountsByLPTokens( + STAmount const& amountBalance, + STAmount const& amount, + std::optional const& amount2, + STAmount const& lptAMMBalance, + STAmount const& lpTokens, + std::uint16_t tfee, + bool isDeposit) +{ + auto const lpTokensActual = + adjustLPTokens(lptAMMBalance, lpTokens, isDeposit); + + if (lpTokensActual == beast::zero) + { + auto const amount2Opt = + amount2 ? std::make_optional(STAmount{}) : std::nullopt; + return std::make_tuple(STAmount{}, amount2Opt, lpTokensActual); + } + + if (lpTokensActual < lpTokens) + { + // Equal trade + if (amount2) + { + Number const fr = lpTokensActual / lpTokens; + auto const amountActual = toSTAmount(amount.issue(), fr * amount); + auto const amount2Actual = + toSTAmount(amount2->issue(), fr * *amount2); + return std::make_tuple( + amountActual < amount ? amountActual : amount, + amount2Actual < amount2 ? amount2Actual : amount2, + lpTokensActual); + } + + // Single trade + auto const amountActual = [&]() { + if (isDeposit) + return ammAssetIn( + amountBalance, lptAMMBalance, lpTokensActual, tfee); + else + return withdrawByTokens( + amountBalance, lptAMMBalance, lpTokens, tfee); + }(); + return amountActual < amount + ? std::make_tuple(amountActual, std::nullopt, lpTokensActual) + : std::make_tuple(amount, std::nullopt, lpTokensActual); + } + + assert(lpTokensActual == lpTokens); + + return {amount, amount2, lpTokensActual}; +} + +Number +solveQuadraticEq(Number const& a, Number const& b, Number const& c) +{ + return (-b + root2(b * b - 4 * a * c)) / (2 * a); +} + +} // namespace ripple diff --git a/src/ripple/app/misc/impl/AMMUtils.cpp b/src/ripple/app/misc/impl/AMMUtils.cpp new file mode 100644 index 00000000000..7156c77f21a --- /dev/null +++ b/src/ripple/app/misc/impl/AMMUtils.cpp @@ -0,0 +1,191 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== +#include +#include +#include +#include +#include +#include + +namespace ripple { + +std::pair +ammPoolHolds( + ReadView const& view, + AccountID const& ammAccountID, + Issue const& issue1, + Issue const& issue2, + FreezeHandling freezeHandling, + beast::Journal const j) +{ + auto const assetInBalance = + accountHolds(view, ammAccountID, issue1, freezeHandling, j); + auto const assetOutBalance = + accountHolds(view, ammAccountID, issue2, freezeHandling, j); + return std::make_pair(assetInBalance, assetOutBalance); +} + +Expected, TER> +ammHolds( + ReadView const& view, + SLE const& ammSle, + std::optional const& optIssue1, + std::optional const& optIssue2, + FreezeHandling freezeHandling, + beast::Journal const j) +{ + auto const issues = [&]() -> std::optional> { + auto const issue1 = ammSle[sfAsset]; + auto const issue2 = ammSle[sfAsset2]; + if (optIssue1 && optIssue2) + { + if (invalidAMMAssetPair( + *optIssue1, + *optIssue2, + std::make_optional(std::make_pair(issue1, issue2)))) + { + JLOG(j.debug()) << "ammHolds: Invalid optIssue1 or optIssue2 " + << *optIssue1 << " " << *optIssue2; + return std::nullopt; + } + return std::make_optional(std::make_pair(*optIssue1, *optIssue2)); + } + auto const singleIssue = + [&issue1, &issue2, &j]( + Issue checkIssue, + const char* label) -> std::optional> { + if (checkIssue == issue1) + return std::make_optional(std::make_pair(issue1, issue2)); + else if (checkIssue == issue2) + return std::make_optional(std::make_pair(issue2, issue1)); + JLOG(j.debug()) + << "ammHolds: Invalid " << label << " " << checkIssue; + return std::nullopt; + }; + if (optIssue1) + { + return singleIssue(*optIssue1, "optIssue1"); + } + else if (optIssue2) + { + return singleIssue(*optIssue2, "optIssue2"); + } + return std::make_optional(std::make_pair(issue1, issue2)); + }(); + if (!issues) + return Unexpected(tecAMM_INVALID_TOKENS); + auto const [asset1, asset2] = ammPoolHolds( + view, + ammSle.getAccountID(sfAccount), + issues->first, + issues->second, + freezeHandling, + j); + return std::make_tuple(asset1, asset2, ammSle[sfLPTokenBalance]); +} + +STAmount +ammLPHolds( + ReadView const& view, + Currency const& cur1, + Currency const& cur2, + AccountID const& ammAccount, + AccountID const& lpAccount, + beast::Journal const j) +{ + return accountHolds( + view, + lpAccount, + ammLPTCurrency(cur1, cur2), + ammAccount, + FreezeHandling::fhZERO_IF_FROZEN, + j); +} + +STAmount +ammLPHolds( + ReadView const& view, + SLE const& ammSle, + AccountID const& lpAccount, + beast::Journal const j) +{ + return ammLPHolds( + view, + ammSle[sfAsset].currency, + ammSle[sfAsset2].currency, + ammSle[sfAccount], + lpAccount, + j); +} + +std::uint16_t +getTradingFee(ReadView const& view, SLE const& ammSle, AccountID const& account) +{ + using namespace std::chrono; + if (ammSle.isFieldPresent(sfAuctionSlot)) + { + auto const& auctionSlot = + static_cast(ammSle.peekAtField(sfAuctionSlot)); + // Not expired + if (auto const expiration = auctionSlot[~sfExpiration]; + duration_cast( + view.info().parentCloseTime.time_since_epoch()) + .count() < expiration) + { + if (auctionSlot[~sfAccount] == account) + return auctionSlot[sfDiscountedFee]; + if (auctionSlot.isFieldPresent(sfAuthAccounts)) + { + for (auto const& acct : + auctionSlot.getFieldArray(sfAuthAccounts)) + if (acct[~sfAccount] == account) + return auctionSlot[sfDiscountedFee]; + } + } + } + return ammSle[sfTradingFee]; +} + +STAmount +ammAccountHolds( + ReadView const& view, + AccountID const& ammAccountID, + Issue const& issue) +{ + if (isXRP(issue)) + { + if (auto const sle = view.read(keylet::account(ammAccountID))) + return (*sle)[sfBalance]; + } + else if (auto const sle = view.read( + keylet::line(ammAccountID, issue.account, issue.currency)); + sle && + !isFrozen(view, ammAccountID, issue.currency, issue.account)) + { + auto amount = (*sle)[sfBalance]; + if (ammAccountID > issue.account) + amount.negate(); + amount.setIssuer(issue.account); + return amount; + } + + return STAmount{issue}; +} + +} // namespace ripple diff --git a/src/ripple/app/paths/AMMContext.h b/src/ripple/app/paths/AMMContext.h new file mode 100644 index 00000000000..06835189bb7 --- /dev/null +++ b/src/ripple/app/paths/AMMContext.h @@ -0,0 +1,119 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_PATHS_AMMCONTEXT_H_INCLUDED +#define RIPPLE_APP_PATHS_AMMCONTEXT_H_INCLUDED + +#include + +#include + +namespace ripple { + +/** Maintains AMM info per overall payment engine execution and + * individual iteration. + * Only one instance of this class is created in Flow.cpp::flow(). + * The reference is percolated through calls to AMMLiquidity class, + * which handles AMM offer generation. + */ +class AMMContext +{ +public: + // Restrict number of AMM offers. If this restriction is removed + // then need to restrict in some other way because AMM offers are + // not counted in the BookStep offer counter. + constexpr static std::uint8_t MaxIterations = 30; + +private: + // Tx account owner is required to get the AMM trading fee in BookStep + AccountID account_; + // true if payment has multiple paths + bool multiPath_{false}; + // Is true if AMM offer is consumed during a payment engine iteration. + bool ammUsed_{false}; + // Counter of payment engine iterations with consumed AMM + std::uint16_t ammIters_{0}; + +public: + AMMContext(AccountID const& account, bool multiPath) + : account_(account), multiPath_(multiPath) + { + } + ~AMMContext() = default; + AMMContext(AMMContext const&) = delete; + AMMContext& + operator=(AMMContext const&) = delete; + + bool + multiPath() const + { + return multiPath_; + } + + void + setMultiPath(bool fs) + { + multiPath_ = fs; + } + + void + setAMMUsed() + { + ammUsed_ = true; + } + + void + update() + { + if (ammUsed_) + ++ammIters_; + ammUsed_ = false; + } + + bool + maxItersReached() const + { + return ammIters_ >= MaxIterations; + } + + std::uint16_t + curIters() const + { + return ammIters_; + } + + AccountID + account() const + { + return account_; + } + + /** Strand execution may fail. Reset the flag at the start + * of each payment engine iteration. + */ + void + clear() + { + ammUsed_ = false; + } +}; + +} // namespace ripple + +#endif // RIPPLE_APP_PATHS_AMMCONTEXT_H_INCLUDED diff --git a/src/ripple/app/paths/AMMLiquidity.h b/src/ripple/app/paths/AMMLiquidity.h new file mode 100644 index 00000000000..7757bd4684d --- /dev/null +++ b/src/ripple/app/paths/AMMLiquidity.h @@ -0,0 +1,148 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_TX_AMMLIQUIDITY_H_INCLUDED +#define RIPPLE_APP_TX_AMMLIQUIDITY_H_INCLUDED + +#include "ripple/app/misc/AMMHelpers.h" +#include "ripple/app/misc/AMMUtils.h" +#include "ripple/app/paths/AMMContext.h" +#include "ripple/basics/Log.h" +#include "ripple/ledger/ReadView.h" +#include "ripple/ledger/View.h" +#include "ripple/protocol/Quality.h" +#include "ripple/protocol/STLedgerEntry.h" + +namespace ripple { + +template +class AMMOffer; + +/** AMMLiquidity class provides AMM offers to BookStep class. + * The offers are generated in two ways. If there are multiple + * paths specified to the payment transaction then the offers + * are generated based on the Fibonacci sequence with + * a limited number of payment engine iterations consuming AMM offers. + * These offers behave the same way as CLOB offers in that if + * there is a limiting step, then the offers are adjusted + * based on their quality. + * If there is only one path specified in the payment transaction + * then the offers are generated based on the competing CLOB offer + * quality. In this case the offer's size is set in such a way + * that the new AMM's pool spot price quality is equal to the CLOB's + * offer quality. + */ +template +class AMMLiquidity +{ +private: + inline static const Number InitialFibSeqPct = Number(5) / 20000; + AMMContext& ammContext_; + AccountID const ammAccountID_; + std::uint32_t const tradingFee_; + Issue const issueIn_; + Issue const issueOut_; + // Initial AMM pool balances + TAmounts const initialBalances_; + beast::Journal const j_; + +public: + AMMLiquidity( + ReadView const& view, + AccountID const& ammAccountID, + std::uint32_t tradingFee, + Issue const& in, + Issue const& out, + AMMContext& ammContext, + beast::Journal j); + ~AMMLiquidity() = default; + AMMLiquidity(AMMLiquidity const&) = delete; + AMMLiquidity& + operator=(AMMLiquidity const&) = delete; + + /** Generate AMM offer. Returns nullopt if clobQuality is provided + * and it is better than AMM offer quality. Otherwise returns AMM offer. + * If clobQuality is provided then AMM offer size is set based on the + * quality. + */ + std::optional> + getOffer(ReadView const& view, std::optional const& clobQuality) + const; + + AccountID const& + ammAccount() const + { + return ammAccountID_; + } + + bool + multiPath() const + { + return ammContext_.multiPath(); + } + + std::uint32_t + tradingFee() const + { + return tradingFee_; + } + + AMMContext& + context() const + { + return ammContext_; + } + + Issue const& + issueIn() const + { + return issueIn_; + } + + Issue const& + issueOut() const + { + return issueOut_; + } + +private: + /** Fetches current AMM balances. + */ + TAmounts + fetchBalances(ReadView const& view) const; + + /** Generate AMM offers with the offer size based on Fibonacci sequence. + * The sequence corresponds to the payment engine iterations with AMM + * liquidity. Iterations that don't consume AMM offers don't count. + * The number of iterations with AMM offers is limited. + * If the generated offer exceeds the pool balance then the function + * throws overflow exception. + */ + TAmounts + generateFibSeqOffer(TAmounts const& balances) const; + + /** Generate max offer + */ + AMMOffer + maxOffer(TAmounts const& balances) const; +}; + +} // namespace ripple + +#endif // RIPPLE_APP_TX_AMMLIQUIDITY_H_INCLUDED diff --git a/src/ripple/app/paths/AMMOffer.h b/src/ripple/app/paths/AMMOffer.h new file mode 100644 index 00000000000..10e6017dd96 --- /dev/null +++ b/src/ripple/app/paths/AMMOffer.h @@ -0,0 +1,149 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_AMMOFFER_H_INCLUDED +#define RIPPLE_APP_AMMOFFER_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { + +template +class AMMLiquidity; +class QualityFunction; + +/** Represents synthetic AMM offer in BookStep. AMMOffer mirrors TOffer + * methods for use in generic BookStep methods. AMMOffer amounts + * are changed indirectly in BookStep limiting steps. + */ +template +class AMMOffer +{ +private: + AMMLiquidity const& ammLiquidity_; + // Initial offer amounts. It is fibonacci seq generated for multi-path. + // If the offer size is set based on the competing CLOB offer then + // the AMM offer size is such that if the offer is consumed then + // the updated AMM pool SP quality is going to be equal to competing + // CLOB offer quality. If there is no competing CLOB offer then + // the initial size is set to in=cMax[Native,Value],balances.out. + // While this is not a "real" offer it simulates the case of + // the swap out of the entire side of the pool, in which case + // the swap in amount is infinite. + TAmounts const amounts_; + // If seated then current pool balances. Used in one-path limiting steps + // to swap in/out. + std::optional> const balances_; + // The Spot Price quality if balances != amounts + // else the amounts quality + Quality const quality_; + // AMM offer can be consumed once at a given iteration + bool consumed_; + +public: + AMMOffer( + AMMLiquidity const& ammLiquidity, + TAmounts const& amounts, + std::optional> const& balances, + Quality const& quality); + + Quality + quality() const noexcept + { + return quality_; + } + + Issue const& + issueIn() const; + + Issue const& + issueOut() const; + + AccountID const& + owner() const; + + std::optional + key() const + { + return std::nullopt; + } + + TAmounts const& + amount() const; + + void + consume(ApplyView& view, TAmounts const& consumed); + + bool + fully_consumed() const + { + return consumed_; + } + + /** Limit out of the provided offer. If one-path then swapOut + * using current balances. If multi-path then ceil_out using + * current quality. + */ + TAmounts + limitOut( + TAmounts const& offrAmt, + TOut const& limit, + bool fixReducedOffers, + bool roundUp) const; + + /** Limit in of the provided offer. If one-path then swapIn + * using current balances. If multi-path then ceil_in using + * current quality. + */ + TAmounts + limitIn(TAmounts const& offrAmt, TIn const& limit) const; + + QualityFunction + getQualityFunc() const; + + /** Send funds without incurring the transfer fee + */ + template + static TER + send(Args&&... args) + { + return accountSend(std::forward(args)..., WaiveTransferFee::Yes); + } + + bool + isFunded() const + { + // AMM offer is fully funded by the pool + return true; + } + + static std::pair + adjustRates(std::uint32_t ofrInRate, std::uint32_t ofrOutRate) + { + // AMM doesn't pay transfer fee on Payment tx + return {ofrInRate, QUALITY_ONE}; + } +}; + +} // namespace ripple + +#endif // RIPPLE_APP_AMMOFFER_H_INCLUDED diff --git a/src/ripple/app/paths/Flow.cpp b/src/ripple/app/paths/Flow.cpp index f177cfc1116..3d060fdc6bd 100644 --- a/src/ripple/app/paths/Flow.cpp +++ b/src/ripple/app/paths/Flow.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include #include @@ -84,6 +85,8 @@ flow( if (sendMax) sendMaxIssue = sendMax->issue(); + AMMContext ammContext(src, false); + // convert the paths to a collection of strands. Each strand is the // collection of account->account steps and book steps that may be used in // this payment. @@ -98,6 +101,7 @@ flow( defaultPaths, ownerPaysTransferFee, offerCrossing, + ammContext, j); if (toStrandsTer != tesSUCCESS) @@ -107,6 +111,8 @@ flow( return result; } + ammContext.setMultiPath(strands.size() > 1); + if (j.trace()) { j.trace() << "\nsrc: " << src << "\ndst: " << dst @@ -145,6 +151,7 @@ flow( limitQuality, sendMax, j, + ammContext, flowDebugInfo)); } @@ -163,6 +170,7 @@ flow( limitQuality, sendMax, j, + ammContext, flowDebugInfo)); } @@ -181,6 +189,7 @@ flow( limitQuality, sendMax, j, + ammContext, flowDebugInfo)); } @@ -198,6 +207,7 @@ flow( limitQuality, sendMax, j, + ammContext, flowDebugInfo)); } diff --git a/src/ripple/app/paths/impl/AMMLiquidity.cpp b/src/ripple/app/paths/impl/AMMLiquidity.cpp new file mode 100644 index 00000000000..3f22ebacec5 --- /dev/null +++ b/src/ripple/app/paths/impl/AMMLiquidity.cpp @@ -0,0 +1,223 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== +#include + +#include + +namespace ripple { + +template +AMMLiquidity::AMMLiquidity( + ReadView const& view, + AccountID const& ammAccountID, + std::uint32_t tradingFee, + Issue const& in, + Issue const& out, + AMMContext& ammContext, + beast::Journal j) + : ammContext_(ammContext) + , ammAccountID_(ammAccountID) + , tradingFee_(tradingFee) + , issueIn_(in) + , issueOut_(out) + , initialBalances_{fetchBalances(view)} + , j_(j) +{ +} + +template +TAmounts +AMMLiquidity::fetchBalances(ReadView const& view) const +{ + auto const assetIn = ammAccountHolds(view, ammAccountID_, issueIn_); + auto const assetOut = ammAccountHolds(view, ammAccountID_, issueOut_); + // This should not happen. + if (assetIn < beast::zero || assetOut < beast::zero) + Throw("AMMLiquidity: invalid balances"); + + return TAmounts{get(assetIn), get(assetOut)}; +} + +template +TAmounts +AMMLiquidity::generateFibSeqOffer( + TAmounts const& balances) const +{ + TAmounts cur{}; + + cur.in = toAmount( + getIssue(balances.in), + InitialFibSeqPct * initialBalances_.in, + Number::rounding_mode::upward); + cur.out = swapAssetIn(initialBalances_, cur.in, tradingFee_); + + if (ammContext_.curIters() == 0) + return cur; + + // clang-format off + constexpr std::uint32_t fib[AMMContext::MaxIterations] = { + 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, + 1597, 2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, + 196418, 317811, 514229, 832040, 1346269}; + // clang-format on + + assert(!ammContext_.maxItersReached()); + + cur.out = toAmount( + getIssue(balances.out), + cur.out * fib[ammContext_.curIters() - 1], + Number::rounding_mode::downward); + // swapAssetOut() returns negative in this case + if (cur.out >= balances.out) + Throw( + "AMMLiquidity: generateFibSeqOffer exceeds the balance"); + + cur.in = swapAssetOut(balances, cur.out, tradingFee_); + + return cur; +} + +template +constexpr T +maxAmount() +{ + if constexpr (std::is_same_v) + return XRPAmount(STAmount::cMaxNative); + else if constexpr (std::is_same_v) + return IOUAmount(STAmount::cMaxValue / 2, STAmount::cMaxOffset); + else if constexpr (std::is_same_v) + return STAmount(STAmount::cMaxValue / 2, STAmount::cMaxOffset); +} + +template +AMMOffer +AMMLiquidity::maxOffer(TAmounts const& balances) const +{ + return AMMOffer( + *this, + {maxAmount(), + swapAssetIn(balances, maxAmount(), tradingFee_)}, + balances, + Quality{balances}); +} + +template +std::optional> +AMMLiquidity::getOffer( + ReadView const& view, + std::optional const& clobQuality) const +{ + // Can't generate more offers if multi-path. + if (ammContext_.maxItersReached()) + return std::nullopt; + + auto const balances = fetchBalances(view); + + // Frozen accounts + if (balances.in == beast::zero || balances.out == beast::zero) + { + JLOG(j_.debug()) << "AMMLiquidity::getOffer, frozen accounts"; + return std::nullopt; + } + + JLOG(j_.trace()) << "AMMLiquidity::getOffer balances " + << to_string(initialBalances_.in) << " " + << to_string(initialBalances_.out) << " new balances " + << to_string(balances.in) << " " + << to_string(balances.out); + + // Can't generate AMM with a better quality than CLOB's + // quality if AMM's Spot Price quality is less than CLOB quality or is + // within a threshold. + // Spot price quality (SPQ) is calculated within some precision threshold. + // On the next iteration, after SPQ is changed, the new SPQ might be close + // to the requested clobQuality but not exactly and potentially SPQ may keep + // on approaching clobQuality for many iterations. Checking for the quality + // threshold prevents this scenario. + if (auto const spotPriceQ = Quality{balances}; clobQuality && + (spotPriceQ <= clobQuality || + withinRelativeDistance(spotPriceQ, *clobQuality, Number(1, -7)))) + { + JLOG(j_.trace()) << "AMMLiquidity::getOffer, higher clob quality"; + return std::nullopt; + } + + auto offer = [&]() -> std::optional> { + try + { + if (ammContext_.multiPath()) + { + auto const amounts = generateFibSeqOffer(balances); + if (clobQuality && Quality{amounts} < clobQuality) + return std::nullopt; + return AMMOffer( + *this, amounts, std::nullopt, Quality{amounts}); + } + else if (!clobQuality) + { + // If there is no CLOB to compare against, return the largest + // amount, which doesn't overflow. The size is going to be + // changed in BookStep per either deliver amount limit, or + // sendmax, or available output or input funds. + return maxOffer(balances); + } + else if ( + auto const amounts = + changeSpotPriceQuality(balances, *clobQuality, tradingFee_)) + { + return AMMOffer( + *this, *amounts, balances, Quality{*amounts}); + } + } + catch (std::overflow_error const& e) + { + JLOG(j_.error()) << "AMMLiquidity::getOffer overflow " << e.what(); + return maxOffer(balances); + } + catch (std::exception const& e) + { + JLOG(j_.error()) << "AMMLiquidity::getOffer exception " << e.what(); + } + return std::nullopt; + }(); + + if (offer) + { + if (offer->amount().in > beast::zero && + offer->amount().out > beast::zero) + { + JLOG(j_.trace()) + << "AMMLiquidity::getOffer, created " + << to_string(offer->amount().in) << "/" << issueIn_ << " " + << to_string(offer->amount().out) << "/" << issueOut_; + return offer; + } + + JLOG(j_.error()) << "AMMLiquidity::getOffer, failed"; + } + + return std::nullopt; +} + +template class AMMLiquidity; +template class AMMLiquidity; +template class AMMLiquidity; +template class AMMLiquidity; + +} // namespace ripple diff --git a/src/ripple/app/paths/impl/AMMOffer.cpp b/src/ripple/app/paths/impl/AMMOffer.cpp new file mode 100644 index 00000000000..10b75b78565 --- /dev/null +++ b/src/ripple/app/paths/impl/AMMOffer.cpp @@ -0,0 +1,143 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2023 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//==============================================================================/ +#include + +#include +#include + +namespace ripple { + +template +AMMOffer::AMMOffer( + AMMLiquidity const& ammLiquidity, + TAmounts const& amounts, + std::optional> const& balances, + Quality const& quality) + : ammLiquidity_(ammLiquidity) + , amounts_(amounts) + , balances_(balances) + , quality_(quality) + , consumed_(false) +{ +} + +template +Issue const& +AMMOffer::issueIn() const +{ + return ammLiquidity_.issueIn(); +} + +template +Issue const& +AMMOffer::issueOut() const +{ + return ammLiquidity_.issueOut(); +} + +template +AccountID const& +AMMOffer::owner() const +{ + return ammLiquidity_.ammAccount(); +} + +template +TAmounts const& +AMMOffer::amount() const +{ + return amounts_; +} + +template +void +AMMOffer::consume( + ApplyView& view, + TAmounts const& consumed) +{ + // Consumed offer must be less or equal to the original + if (consumed.in > amounts_.in || consumed.out > amounts_.out) + Throw("Invalid consumed AMM offer."); + // AMM pool is updated when the amounts are transferred + // in BookStep::consumeOffer(). + + consumed_ = true; + + // Let the context know AMM offer is consumed + ammLiquidity_.context().setAMMUsed(); +} + +template +TAmounts +AMMOffer::limitOut( + TAmounts const& offrAmt, + TOut const& limit, + bool fixReducedOffers, + bool roundUp) const +{ + // Change the offer size proportionally to the original offer quality + // to keep the strands quality order unchanged. The taker pays slightly + // more for the offer in this case, which results in a slightly higher + // pool product than the original pool product. I.e. if the original + // pool is poolPays, poolGets and the offer is assetIn, assetOut then + // poolPays * poolGets < (poolPays - assetOut) * (poolGets + assetIn) + if (ammLiquidity_.multiPath()) + { + if (fixReducedOffers) + // It turns out that the ceil_out implementation has some slop in + // it. ceil_out_strict removes that slop. But removing that slop + // affects transaction outcomes, so the change must be made using + // an amendment. + return quality().ceil_out_strict(offrAmt, limit, roundUp); + return quality().ceil_out(offrAmt, limit); + } + // Change the offer size according to the conservation function. The offer + // quality is increased in this case, but it doesn't matter since there is + // only one path. + return {swapAssetOut(*balances_, limit, ammLiquidity_.tradingFee()), limit}; +} + +template +TAmounts +AMMOffer::limitIn( + TAmounts const& offrAmt, + TIn const& limit) const +{ + // See the comments above in limitOut(). + if (ammLiquidity_.multiPath()) + return quality().ceil_in(offrAmt, limit); + return {limit, swapAssetIn(*balances_, limit, ammLiquidity_.tradingFee())}; +} + +template +QualityFunction +AMMOffer::getQualityFunc() const +{ + if (ammLiquidity_.multiPath()) + return QualityFunction{quality(), QualityFunction::CLOBLikeTag{}}; + return QualityFunction{ + *balances_, ammLiquidity_.tradingFee(), QualityFunction::AMMTag{}}; +} + +template class AMMOffer; +template class AMMOffer; +template class AMMOffer; +template class AMMOffer; + +} // namespace ripple diff --git a/src/ripple/app/paths/impl/BookStep.cpp b/src/ripple/app/paths/impl/BookStep.cpp index 555d90fac8c..e82acbde817 100644 --- a/src/ripple/app/paths/impl/BookStep.cpp +++ b/src/ripple/app/paths/impl/BookStep.cpp @@ -17,7 +17,9 @@ */ //============================================================================== -#include +#include +#include +#include #include #include #include @@ -42,6 +44,8 @@ template class BookStep : public StepImp> { protected: + enum class OfferType { AMM, CLOB }; + uint32_t const maxOffersToConsume_; Book book_; AccountID strandSrc_; @@ -59,6 +63,10 @@ class BookStep : public StepImp> be partially consumed multiple times during a payment. */ std::uint32_t offersUsed_ = 0; + // If set, AMM liquidity might be available + // if AMM offer quality is better than CLOB offer + // quality or there is no CLOB offer. + std::optional> ammLiquidity_; beast::Journal const j_; struct Cache @@ -91,6 +99,15 @@ class BookStep : public StepImp> , ownerPaysTransferFee_(ctx.ownerPaysTransferFee) , j_(ctx.j) { + if (auto const ammSle = ctx.view.read(keylet::amm(in, out))) + ammLiquidity_.emplace( + ctx.view, + (*ammSle)[sfAccount], + getTradingFee(ctx.view, *ammSle, ctx.ammContext.account()), + in, + out, + ctx.ammContext, + ctx.j); } Book const& @@ -132,6 +149,9 @@ class BookStep : public StepImp> qualityUpperBound(ReadView const& v, DebtDirection prevStepDir) const override; + std::pair, DebtDirection> + getQualityFunc(ReadView const& v, DebtDirection prevStepDir) const override; + std::uint32_t offersUsed() const override; @@ -205,13 +225,36 @@ class BookStep : public StepImp> DebtDirection prevStepDebtDir, Callback& callback) const; + // Offer is either TOffer or AMMOffer + template