diff --git a/.travis.yml b/.travis.yml index 311311567e9..744965c596c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -408,7 +408,8 @@ cache: - $CACHE_DIR before_install: - - if [ "$(uname)" = "Darwin" ] ; then export NUM_PROCESSORS=$(sysctl -n hw.physicalcpu); else export NUM_PROCESSORS=$(nproc); fi + # NUM_PROCESSORS was set to 1 due to problems in parallel launch of unit tests on Mac platform + - if [ "$(uname)" = "Darwin" ] ; then export NUM_PROCESSORS=1; else export NUM_PROCESSORS=$(nproc); fi - echo "NUM PROC is ${NUM_PROCESSORS}" - if [ "$(uname)" = "Linux" ] ; then docker pull ${DOCKER_IMAGE}; fi - if [ "${MATRIX_EVAL}" != "" ] ; then eval "${MATRIX_EVAL}"; fi diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 6382cfe5990..5445b092540 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -844,6 +844,7 @@ target_sources (rippled PRIVATE #]===============================] src/test/nodestore/Backend_test.cpp src/test/nodestore/Basics_test.cpp + src/test/nodestore/DatabaseShard_test.cpp src/test/nodestore/Database_test.cpp src/test/nodestore/Timing_test.cpp src/test/nodestore/import_test.cpp diff --git a/Builds/CMake/deps/Findlibarchive_pc.cmake b/Builds/CMake/deps/Findlibarchive_pc.cmake index 421fed86e3c..8f248b28704 100644 --- a/Builds/CMake/deps/Findlibarchive_pc.cmake +++ b/Builds/CMake/deps/Findlibarchive_pc.cmake @@ -1,5 +1,5 @@ find_package (PkgConfig REQUIRED) -pkg_search_module (libarchive_PC QUIET libarchive>=3.3.3) +pkg_search_module (libarchive_PC QUIET libarchive>=3.4.3) if(static) set(LIBARCHIVE_LIB libarchive.a) diff --git a/Builds/CMake/deps/Findlz4.cmake b/Builds/CMake/deps/Findlz4.cmake index 412b5a857ea..835f5989dfa 100644 --- a/Builds/CMake/deps/Findlz4.cmake +++ b/Builds/CMake/deps/Findlz4.cmake @@ -1,6 +1,6 @@ find_package (PkgConfig) if (PKG_CONFIG_FOUND) - pkg_search_module (lz4_PC QUIET liblz4>=1.8) + pkg_search_module (lz4_PC QUIET liblz4>=1.9) endif () if(static) diff --git a/Builds/CMake/deps/Libarchive.cmake b/Builds/CMake/deps/Libarchive.cmake index db2716f64ee..760f6403afd 100644 --- a/Builds/CMake/deps/Libarchive.cmake +++ b/Builds/CMake/deps/Libarchive.cmake @@ -29,7 +29,7 @@ if (NOT local_libarchive) endif () else () ## now try searching using the minimal find module that cmake provides - find_package(LibArchive 3.3.3 QUIET) + find_package(LibArchive 3.4.3 QUIET) if (LibArchive_FOUND) if (static) # find module doesn't find static libs currently, so we re-search @@ -70,7 +70,7 @@ if (local_libarchive) ExternalProject_Add (libarchive PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/libarchive/libarchive.git - GIT_TAG v3.3.3 + GIT_TAG v3.4.3 CMAKE_ARGS # passing the compiler seems to be needed for windows CI, sadly -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/Builds/CMake/deps/Lz4.cmake b/Builds/CMake/deps/Lz4.cmake index cc3101f1cfc..ce42f2dcffb 100644 --- a/Builds/CMake/deps/Lz4.cmake +++ b/Builds/CMake/deps/Lz4.cmake @@ -21,7 +21,7 @@ else() ExternalProject_Add (lz4 PREFIX ${nih_cache_path} GIT_REPOSITORY https://github.com/lz4/lz4.git - GIT_TAG v1.8.2 + GIT_TAG v1.9.2 SOURCE_SUBDIR contrib/cmake_unofficial CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} diff --git a/Builds/containers/gitlab-ci/pkgbuild.yml b/Builds/containers/gitlab-ci/pkgbuild.yml index 2ab10852df3..701e6ba2a01 100644 --- a/Builds/containers/gitlab-ci/pkgbuild.yml +++ b/Builds/containers/gitlab-ci/pkgbuild.yml @@ -282,7 +282,7 @@ tag_bld_images: variables: docker_driver: overlay2 image: - name: docker:latest + name: docker:19.03.8 services: # workaround for TLS issues - consider going back # back to unversioned `dind` when issues are resolved diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.cpp b/src/ripple/nodestore/impl/DatabaseShardImp.cpp index 8f555649992..d9a771d66d0 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseShardImp.cpp @@ -135,13 +135,12 @@ DatabaseShardImp::init() std::make_unique(app_, *this, shardIndex, j_)}; if (!shard->open(scheduler_, *ctx_)) { - if (!shard->isLegacy()) - return false; - - // Remove legacy shard + // Remove corrupted or legacy shard shard->removeOnDestroy(); JLOG(j_.warn()) - << "shard " << shardIndex << " removed, legacy shard"; + << "shard " << shardIndex << " removed, " + << (shard->isLegacy() ? "legacy" : "corrupted") + << " shard"; continue; } @@ -276,11 +275,11 @@ DatabaseShardImp::prepareShard(std::uint32_t shardIndex) // is greater or equal to the current shard. auto seqCheck = [&](std::uint32_t seq) { // seq will be greater than zero if valid - if (seq > earliestLedgerSeq() && shardIndex >= seqToShardIndex(seq)) + if (seq >= earliestLedgerSeq() && shardIndex >= seqToShardIndex(seq)) return fail("has an invalid index"); return true; }; - if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex()) || + if (!seqCheck(app_.getLedgerMaster().getValidLedgerIndex() + 1) || !seqCheck(app_.getLedgerMaster().getCurrentLedgerIndex())) { return false; @@ -1100,6 +1099,9 @@ DatabaseShardImp::initConfig(std::lock_guard&) ledgersPerShard_ = get(section, "ledgers_per_shard"); if (ledgersPerShard_ == 0 || ledgersPerShard_ % 256 != 0) return fail("'ledgers_per_shard' must be a multiple of 256"); + + earliestShardIndex_ = seqToShardIndex(earliestLedgerSeq()); + avgShardFileSz_ = ledgersPerShard_ * kilobytes(192); } // NuDB is the default and only supported permanent storage backend diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.h b/src/ripple/nodestore/impl/DatabaseShardImp.h index 85aec3642d1..425353d404a 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.h +++ b/src/ripple/nodestore/impl/DatabaseShardImp.h @@ -234,7 +234,7 @@ class DatabaseShardImp : public DatabaseShard std::uint32_t ledgersPerShard_ = ledgersPerShardDefault; // The earliest shard index - std::uint32_t const earliestShardIndex_; + std::uint32_t earliestShardIndex_; // Average storage space required by a shard (in bytes) std::uint64_t avgShardFileSz_; diff --git a/src/ripple/nodestore/impl/codec.h b/src/ripple/nodestore/impl/codec.h index 15d5f909266..21c68ff5843 100644 --- a/src/ripple/nodestore/impl/codec.h +++ b/src/ripple/nodestore/impl/codec.h @@ -51,13 +51,12 @@ lz4_decompress(void const* in, std::size_t in_size, BufferFactory&& bf) Throw("lz4 decompress: n == 0"); void* const out = bf(result.second); result.first = out; - if (LZ4_decompress_fast( + if (LZ4_decompress_safe( reinterpret_cast(in) + n, reinterpret_cast(out), - result.second) + - n != - in_size) - Throw("lz4 decompress: LZ4_decompress_fast"); + in_size - n, + result.second) != result.second) + Throw("lz4 decompress: LZ4_decompress_safe"); return result; } diff --git a/src/ripple/overlay/impl/OverlayImpl.cpp b/src/ripple/overlay/impl/OverlayImpl.cpp index 44912c9ce25..68e5fa3ce66 100644 --- a/src/ripple/overlay/impl/OverlayImpl.cpp +++ b/src/ripple/overlay/impl/OverlayImpl.cpp @@ -554,14 +554,9 @@ OverlayImpl::onPrepare() for (auto const& addr : addresses) { if (addr.port() == 0) - { - Throw( - "Port not specified for " - "address:" + - addr.to_string()); - } - - ips.push_back(to_string(addr)); + ips.push_back(to_string(addr.at_port(DEFAULT_PEER_PORT))); + else + ips.push_back(to_string(addr)); } std::string const base("config: "); @@ -577,8 +572,19 @@ OverlayImpl::onPrepare() [this]( std::string const& name, std::vector const& addresses) { - if (!addresses.empty()) - m_peerFinder->addFixedPeer(name, addresses); + std::vector ips; + ips.reserve(addresses.size()); + + for (auto& addr : addresses) + { + if (addr.port() == 0) + ips.emplace_back(addr.address(), DEFAULT_PEER_PORT); + else + ips.emplace_back(addr); + } + + if (!ips.empty()) + m_peerFinder->addFixedPeer(name, ips); }); } } @@ -1075,11 +1081,98 @@ OverlayImpl::processValidatorList( return true; } +bool +OverlayImpl::processHealth(http_request_type const& req, Handoff& handoff) +{ + if (req.target() != "/health") + return false; + boost::beast::http::response msg; + msg.version(req.version()); + msg.insert("Server", BuildInfo::getFullVersionString()); + msg.insert("Content-Type", "application/json"); + msg.insert("Connection", "close"); + + auto info = getServerInfo(); + + int last_validated_ledger_age = std::numeric_limits::max(); + if (info.isMember("validated_ledger")) + last_validated_ledger_age = info["validated_ledger"]["age"].asInt(); + bool amendment_blocked = false; + if (info.isMember("amendment_blocked")) + amendment_blocked = true; + int number_peers = info["peers"].asInt(); + std::string server_state = info["server_state"].asString(); + auto load_factor = info["load_factor"].asDouble(); + + enum { healthy, warning, critical }; + int health = healthy; + auto set_health = [&health](int state) { + if (health < state) + health = state; + }; + + if (last_validated_ledger_age >= 7) + { + msg.body()[jss::info]["validated_ledger"] = last_validated_ledger_age; + if (last_validated_ledger_age < 20) + set_health(warning); + else + set_health(critical); + } + + if (amendment_blocked) + { + msg.body()[jss::info]["amendment_blocked"] = true; + set_health(critical); + } + + if (number_peers <= 7) + { + msg.body()[jss::info]["peers"] = number_peers; + if (number_peers != 0) + set_health(warning); + else + set_health(critical); + } + + if (!(server_state == "full" || server_state == "validating" || + server_state == "proposing")) + { + msg.body()[jss::info]["server_state"] = server_state; + if (server_state == "syncing" || server_state == "tracking" || + server_state == "connected") + { + set_health(warning); + } + else + set_health(critical); + } + + if (load_factor > 100) + { + msg.body()[jss::info]["load_factor"] = load_factor; + if (load_factor < 1000) + set_health(warning); + else + set_health(critical); + } + + if (health != critical) + msg.result(boost::beast::http::status::ok); + else + msg.result(boost::beast::http::status::service_unavailable); + + msg.prepare_payload(); + handoff.response = std::make_shared(msg); + return true; +} + bool OverlayImpl::processRequest(http_request_type const& req, Handoff& handoff) { // Take advantage of || short-circuiting - return processCrawl(req, handoff) || processValidatorList(req, handoff); + return processCrawl(req, handoff) || processValidatorList(req, handoff) || + processHealth(req, handoff); } Overlay::PeerSequence diff --git a/src/ripple/overlay/impl/OverlayImpl.h b/src/ripple/overlay/impl/OverlayImpl.h index b36004d51fb..099b90556db 100644 --- a/src/ripple/overlay/impl/OverlayImpl.h +++ b/src/ripple/overlay/impl/OverlayImpl.h @@ -396,6 +396,14 @@ class OverlayImpl : public Overlay bool processValidatorList(http_request_type const& req, Handoff& handoff); + /** Handles health requests. Health returns information about the + health of the node. + + @return true if the request was handled. + */ + bool + processHealth(http_request_type const& req, Handoff& handoff); + /** Handles non-peer protocol requests. @return true if the request was handled. diff --git a/src/ripple/protocol/SystemParameters.h b/src/ripple/protocol/SystemParameters.h index dcadaab247c..a74155a6a32 100644 --- a/src/ripple/protocol/SystemParameters.h +++ b/src/ripple/protocol/SystemParameters.h @@ -61,4 +61,7 @@ static std::uint32_t constexpr XRP_LEDGER_EARLIEST_SEQ{32570}; } // namespace ripple +/** Default peer port (IANA registered) */ +inline std::uint16_t constexpr DEFAULT_PEER_PORT{2459}; + #endif diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index d484e67c0b0..9cf30d2945c 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.6.0-b6" +char const* const versionString = "1.6.0-b7" // clang-format on #if defined(DEBUG) || defined(SANITIZER) diff --git a/src/ripple/rpc/handlers/Connect.cpp b/src/ripple/rpc/handlers/Connect.cpp index 4c6cddbc817..5863fe5e4d8 100644 --- a/src/ripple/rpc/handlers/Connect.cpp +++ b/src/ripple/rpc/handlers/Connect.cpp @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -53,7 +54,7 @@ doConnect(RPC::JsonContext& context) if (context.params.isMember(jss::port)) iPort = context.params[jss::port].asInt(); else - iPort = 6561; + iPort = DEFAULT_PEER_PORT; auto ip = beast::IP::Endpoint::from_string(context.params[jss::ip].asString()); diff --git a/src/test/app/AccountTxPaging_test.cpp b/src/test/app/AccountTxPaging_test.cpp index ec2c66809d1..0169c5609c3 100644 --- a/src/test/app/AccountTxPaging_test.cpp +++ b/src/test/app/AccountTxPaging_test.cpp @@ -1871,9 +1871,7 @@ class AccountTxPaging_test : public beast::unit_test::suite if (!BEAST_EXPECT(status.error_code() == 0)) return; - if (!BEAST_EXPECT( - res.transactions().size() == - std::extent::value)) + if (!BEAST_EXPECT(res.transactions().size() == std::size(txCheck))) return; for (int i = 0; i < res.transactions().size(); ++i) { diff --git a/src/test/core/SociDB_test.cpp b/src/test/core/SociDB_test.cpp index 68cbd2a83bb..42c939ed07b 100644 --- a/src/test/core/SociDB_test.cpp +++ b/src/test/core/SociDB_test.cpp @@ -321,11 +321,8 @@ class SociDB_test final : public TestSuite LedgerSeq BIGINT UNSIGNED \ );", "CREATE INDEX SeqLedger ON Ledgers(LedgerSeq);"}; - int dbInitCount = std::extent::value; - for (int i = 0; i < dbInitCount; ++i) - { - s << dbInit[i]; - } + for (auto const c : dbInit) + s << c; char lh[65]; memset(lh, 'a', 64); lh[64] = '\0'; diff --git a/src/test/jtx/Env.h b/src/test/jtx/Env.h index 77a096cf30d..f06cfbf7a9c 100644 --- a/src/test/jtx/Env.h +++ b/src/test/jtx/Env.h @@ -325,8 +325,10 @@ class Env The Application network time is set to the close time of the resulting ledger. + + @return true if no error, false if error */ - void + bool close( NetClock::time_point closeTime, boost::optional consensusDelay = @@ -336,25 +338,29 @@ class Env The time is calculated as the duration from the previous ledger closing time. + + @return true if no error, false if error */ template - void + bool close(std::chrono::duration const& elapsed) { // VFALCO Is this the correct time? - close(now() + elapsed); + return close(now() + elapsed); } /** Close and advance the ledger. The time is calculated as five seconds from the previous ledger closing time. + + @return true if no error, false if error */ - void + bool close() { // VFALCO Is this the correct time? - close(std::chrono::seconds(5)); + return close(std::chrono::seconds(5)); } /** Turn on JSON tracing. diff --git a/src/test/jtx/impl/Env.cpp b/src/test/jtx/impl/Env.cpp index 6d4f4f61fb7..a9b7c3430ff 100644 --- a/src/test/jtx/impl/Env.cpp +++ b/src/test/jtx/impl/Env.cpp @@ -107,13 +107,14 @@ Env::closed() return app().getLedgerMaster().getClosedLedger(); } -void +bool Env::close( NetClock::time_point closeTime, boost::optional consensusDelay) { // Round up to next distinguishable value using namespace std::chrono_literals; + bool res = true; closeTime += closed()->info().closeTimeResolution - 1s; timeKeeper().set(closeTime); // Go through the rpc interface unless we need to simulate @@ -122,10 +123,17 @@ Env::close( app().getOPs().acceptLedger(consensusDelay); else { - rpc("ledger_accept"); - // VFALCO No error check? + auto resp = rpc("ledger_accept"); + if (resp["result"]["status"] != std::string("success")) + { + JLOG(journal.error()) + << "Env::close() failed: " << resp["result"]["status"] + << std::endl; + res = false; + } } timeKeeper().set(closed()->info().closeTime); + return res; } void diff --git a/src/test/nodestore/DatabaseShard_test.cpp b/src/test/nodestore/DatabaseShard_test.cpp new file mode 100644 index 00000000000..7e0b746cb62 --- /dev/null +++ b/src/test/nodestore/DatabaseShard_test.cpp @@ -0,0 +1,1019 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace NodeStore { + +// Tests DatabaseShard class +// +class DatabaseShard_test : public TestBase +{ + static constexpr std::uint32_t maxSizeGb = 10; + static constexpr std::uint32_t ledgersPerShard = 256; + static constexpr std::uint32_t earliestSeq = ledgersPerShard + 1; + static constexpr std::uint32_t dataSizeMax = 4; + static constexpr std::uint32_t iniAmount = 1000000; + static constexpr std::uint32_t nTestShards = 4; + static constexpr std::chrono::seconds shardStoreTimeout = + std::chrono::seconds(60); + test::SuiteJournal journal_; + beast::temp_dir defNodeDir; + + struct TestData + { + /* ring used to generate pseudo-random sequence */ + beast::xor_shift_engine rng_; + /* number of shards to generate */ + int nShards_; + /* vector of accounts used to send test transactions */ + std::vector accounts_; + /* nAccounts_[i] is the number of these accounts existed before i-th + * ledger */ + std::vector nAccounts_; + /* payAccounts_[i][j] = {from, to} is the pair which consists of two + * number of acoounts: source and destinations, which participate in + * j-th payment on i-th ledger */ + std::vector>> payAccounts_; + /* xrpAmount_[i] is the amount for all payments on i-th ledger */ + std::vector xrpAmount_; + /* ledgers_[i] is the i-th ledger which contains the above described + * accounts and payments */ + std::vector> ledgers_; + + TestData( + std::uint64_t const seedValue, + int dataSize = dataSizeMax, + int nShards = 1) + : rng_(seedValue), nShards_(nShards) + { + std::uint32_t n = 0; + std::uint32_t nLedgers = ledgersPerShard * nShards; + + nAccounts_.reserve(nLedgers); + payAccounts_.reserve(nLedgers); + xrpAmount_.reserve(nLedgers); + + for (std::uint32_t i = 0; i < nLedgers; ++i) + { + int p; + if (n >= 2) + p = rand_int(rng_, 2 * dataSize); + else + p = 0; + + std::vector> pay; + pay.reserve(p); + + for (int j = 0; j < p; ++j) + { + int from, to; + do + { + from = rand_int(rng_, n - 1); + to = rand_int(rng_, n - 1); + } while (from == to); + + pay.push_back(std::make_pair(from, to)); + } + + n += !rand_int(rng_, nLedgers / dataSize); + + if (n > accounts_.size()) + { + char str[9]; + for (int j = 0; j < 8; ++j) + str[j] = 'a' + rand_int(rng_, 'z' - 'a'); + str[8] = 0; + accounts_.emplace_back(str); + } + + nAccounts_.push_back(n); + payAccounts_.push_back(std::move(pay)); + xrpAmount_.push_back(rand_int(rng_, 90) + 10); + } + } + + bool + isNewAccounts(int seq) + { + return nAccounts_[seq] > (seq ? nAccounts_[seq - 1] : 0); + } + + void + makeLedgerData(test::jtx::Env& env_, std::uint32_t seq) + { + using namespace test::jtx; + + if (isNewAccounts(seq)) + env_.fund(XRP(iniAmount), accounts_[nAccounts_[seq] - 1]); + + for (std::uint32_t i = 0; i < payAccounts_[seq].size(); ++i) + { + env_( + pay(accounts_[payAccounts_[seq][i].first], + accounts_[payAccounts_[seq][i].second], + XRP(xrpAmount_[seq]))); + } + } + + bool + makeLedgers(test::jtx::Env& env_) + { + for (std::uint32_t i = 3; i <= ledgersPerShard; ++i) + { + if (!env_.close()) + return false; + std::shared_ptr ledger = + env_.app().getLedgerMaster().getClosedLedger(); + if (ledger->info().seq != i) + return false; + } + + for (std::uint32_t i = 0; i < ledgersPerShard * nShards_; ++i) + { + makeLedgerData(env_, i); + if (!env_.close()) + return false; + std::shared_ptr ledger = + env_.app().getLedgerMaster().getClosedLedger(); + if (ledger->info().seq != i + ledgersPerShard + 1) + return false; + ledgers_.push_back(ledger); + } + + return true; + } + }; + + void + testLedgerData( + TestData& data, + std::shared_ptr ledger, + std::uint32_t seq) + { + using namespace test::jtx; + + auto rootCount{0}; + auto accCount{0}; + auto sothCount{0}; + for (auto const& sles : ledger->sles) + { + if (sles->getType() == ltACCOUNT_ROOT) + { + int sq = sles->getFieldU32(sfSequence); + int reqsq = -1; + const auto id = sles->getAccountID(sfAccount); + + for (int i = 0; i < data.accounts_.size(); ++i) + { + if (id == data.accounts_[i].id()) + { + reqsq = ledgersPerShard + 1; + for (int j = 0; j <= seq; ++j) + if (data.nAccounts_[j] > i + 1 || + (data.nAccounts_[j] == i + 1 && + !data.isNewAccounts(j))) + { + for (int k = 0; k < data.payAccounts_[j].size(); + ++k) + if (data.payAccounts_[j][k].first == i) + reqsq++; + } + else + reqsq++; + ++accCount; + break; + } + } + if (reqsq == -1) + { + reqsq = data.nAccounts_[seq] + 1; + ++rootCount; + } + BEAST_EXPECT(sq == reqsq); + } + else + ++sothCount; + } + BEAST_EXPECT(rootCount == 1); + BEAST_EXPECT(accCount == data.nAccounts_[seq]); + BEAST_EXPECT(sothCount == 3); + + auto iniCount{0}; + auto setCount{0}; + auto payCount{0}; + auto tothCount{0}; + for (auto const& tx : ledger->txs) + { + if (tx.first->getTxnType() == ttPAYMENT) + { + std::int64_t xrpAmount = + tx.first->getFieldAmount(sfAmount).xrp().decimalXRP(); + if (xrpAmount == iniAmount) + ++iniCount; + else + { + ++payCount; + BEAST_EXPECT(xrpAmount == data.xrpAmount_[seq]); + } + } + else if (tx.first->getTxnType() == ttACCOUNT_SET) + ++setCount; + else + ++tothCount; + } + int newacc = data.isNewAccounts(seq) ? 1 : 0; + BEAST_EXPECT(iniCount == newacc); + BEAST_EXPECT(setCount == newacc); + BEAST_EXPECT(payCount == data.payAccounts_[seq].size()); + BEAST_EXPECT(tothCount == !seq); + } + + bool + saveLedger( + Database& db, + Ledger const& ledger, + std::shared_ptr const& next = {}) + { + // Store header + { + Serializer s(128); + s.add32(HashPrefix::ledgerMaster); + addRaw(ledger.info(), s); + db.store( + hotLEDGER, + std::move(s.modData()), + ledger.info().hash, + ledger.info().seq); + } + + // Store the state map + auto visitAcc = [&](SHAMapAbstractNode& node) { + Serializer s; + node.addRaw(s, snfPREFIX); + db.store( + node.getType() == SHAMapAbstractNode::TNType::tnINNER + ? hotUNKNOWN + : hotACCOUNT_NODE, + std::move(s.modData()), + node.getNodeHash().as_uint256(), + ledger.info().seq); + return true; + }; + + if (ledger.stateMap().getHash().isNonZero()) + { + if (!ledger.stateMap().isValid()) + return false; + if (next && next->info().parentHash == ledger.info().hash) + { + auto have = next->stateMap().snapShot(false); + ledger.stateMap().snapShot(false)->visitDifferences( + &(*have), visitAcc); + } + else + ledger.stateMap().snapShot(false)->visitNodes(visitAcc); + } + + // Store the transaction map + auto visitTx = [&](SHAMapAbstractNode& node) { + Serializer s; + node.addRaw(s, snfPREFIX); + db.store( + node.getType() == SHAMapAbstractNode::TNType::tnINNER + ? hotUNKNOWN + : hotTRANSACTION_NODE, + std::move(s.modData()), + node.getNodeHash().as_uint256(), + ledger.info().seq); + return true; + }; + + if (ledger.info().txHash.isNonZero()) + { + if (!ledger.txMap().isValid()) + return false; + ledger.txMap().snapShot(false)->visitNodes(visitTx); + } + + return true; + } + + void + checkLedger(TestData& data, DatabaseShard& db, Ledger const& ledger) + { + auto fetched = db.fetchLedger(ledger.info().hash, ledger.info().seq); + if (!BEAST_EXPECT(fetched)) + return; + + testLedgerData(data, fetched, ledger.info().seq - ledgersPerShard - 1); + + // verify the metadata/header info by serializing to json + BEAST_EXPECT( + getJson( + LedgerFill{ledger, LedgerFill::full | LedgerFill::expand}) == + getJson( + LedgerFill{*fetched, LedgerFill::full | LedgerFill::expand})); + + BEAST_EXPECT( + getJson( + LedgerFill{ledger, LedgerFill::full | LedgerFill::binary}) == + getJson( + LedgerFill{*fetched, LedgerFill::full | LedgerFill::binary})); + + // walk shamap and validate each node + auto fcompAcc = [&](SHAMapAbstractNode& node) -> bool { + Serializer s; + node.addRaw(s, snfPREFIX); + auto nSrc{NodeObject::createObject( + node.getType() == SHAMapAbstractNode::TNType::tnINNER + ? hotUNKNOWN + : hotACCOUNT_NODE, + std::move(s.modData()), + node.getNodeHash().as_uint256())}; + if (!BEAST_EXPECT(nSrc)) + return false; + + auto nDst = + db.fetch(node.getNodeHash().as_uint256(), ledger.info().seq); + if (!BEAST_EXPECT(nDst)) + return false; + + BEAST_EXPECT(isSame(nSrc, nDst)); + + return true; + }; + if (ledger.stateMap().getHash().isNonZero()) + ledger.stateMap().snapShot(false)->visitNodes(fcompAcc); + + auto fcompTx = [&](SHAMapAbstractNode& node) -> bool { + Serializer s; + node.addRaw(s, snfPREFIX); + auto nSrc{NodeObject::createObject( + node.getType() == SHAMapAbstractNode::TNType::tnINNER + ? hotUNKNOWN + : hotTRANSACTION_NODE, + std::move(s.modData()), + node.getNodeHash().as_uint256())}; + if (!BEAST_EXPECT(nSrc)) + return false; + + auto nDst = + db.fetch(node.getNodeHash().as_uint256(), ledger.info().seq); + if (!BEAST_EXPECT(nDst)) + return false; + + BEAST_EXPECT(isSame(nSrc, nDst)); + + return true; + }; + if (ledger.info().txHash.isNonZero()) + ledger.txMap().snapShot(false)->visitNodes(fcompTx); + } + + std::string + bitmask2Rangeset(std::uint64_t bitmask) + { + std::string set; + if (!bitmask) + return set; + bool empty = true; + + for (std::uint32_t i = 0; i < 64 && bitmask; i++) + { + if (bitmask & (1ll << i)) + { + if (!empty) + set += ","; + set += std::to_string(i); + empty = false; + } + } + + RangeSet rs; + from_string(rs, set); + return to_string(rs); + } + + std::unique_ptr + testConfig( + std::string const& testName, + std::string const& backendType, + std::string const& shardDir, + std::string const& nodeDir = std::string()) + { + using namespace test::jtx; + + if (testName != "") + { + std::string caseName = + "DatabaseShard " + testName + " with backend " + backendType; + testcase(caseName); + } + + return envconfig([&](std::unique_ptr cfg) { + cfg->overwrite(ConfigSection::shardDatabase(), "type", backendType); + cfg->overwrite(ConfigSection::shardDatabase(), "path", shardDir); + cfg->overwrite( + ConfigSection::shardDatabase(), + "max_size_gb", + std::to_string(maxSizeGb)); + cfg->overwrite( + ConfigSection::shardDatabase(), + "ledgers_per_shard", + std::to_string(ledgersPerShard)); + cfg->overwrite( + ConfigSection::shardDatabase(), + "earliest_seq", + std::to_string(earliestSeq)); + cfg->overwrite(ConfigSection::nodeDatabase(), "type", backendType); + cfg->overwrite( + ConfigSection::nodeDatabase(), + "max_size_gb", + std::to_string(maxSizeGb)); + cfg->overwrite( + ConfigSection::nodeDatabase(), + "earliest_seq", + std::to_string(earliestSeq)); + if (nodeDir.empty()) + cfg->overwrite( + ConfigSection::nodeDatabase(), "path", defNodeDir.path()); + else + cfg->overwrite(ConfigSection::nodeDatabase(), "path", nodeDir); + return cfg; + }); + } + + std::optional + waitShard( + DatabaseShard& db, + int shardNumber, + std::chrono::seconds timeout = shardStoreTimeout) + { + RangeSet rs; + auto start = std::chrono::system_clock::now(); + auto end = start + timeout; + while (!from_string(rs, db.getCompleteShards()) || + !boost::icl::contains(rs, shardNumber)) + { + if (!BEAST_EXPECT(std::chrono::system_clock::now() < end)) + return {}; + std::this_thread::yield(); + } + + return shardNumber; + } + + std::optional + createShard(TestData& data, DatabaseShard& db, int maxShardNumber) + { + int shardNumber = -1; + + for (std::uint32_t i = 0; i < ledgersPerShard; ++i) + { + auto ind = db.prepareLedger((maxShardNumber + 1) * ledgersPerShard); + if (!BEAST_EXPECT(ind != boost::none)) + return {}; + shardNumber = db.seqToShardIndex(*ind); + int arrInd = *ind - ledgersPerShard - 1; + BEAST_EXPECT( + arrInd >= 0 && arrInd < maxShardNumber * ledgersPerShard); + BEAST_EXPECT(saveLedger(db, *data.ledgers_[arrInd])); + if (arrInd % ledgersPerShard == (ledgersPerShard - 1)) + { + uint256 const finalKey_{0}; + Serializer s; + s.add32(Shard::version); + s.add32(db.firstLedgerSeq(shardNumber)); + s.add32(db.lastLedgerSeq(shardNumber)); + s.addRaw(data.ledgers_[arrInd]->info().hash.data(), 256 / 8); + db.store(hotUNKNOWN, std::move(s.modData()), finalKey_, *ind); + } + db.setStored(data.ledgers_[arrInd]); + } + + return waitShard(db, shardNumber); + } + + void + testStandalone(std::string const& backendType) + { + using namespace test::jtx; + + beast::temp_dir shardDir; + Env env{*this, testConfig("standalone", backendType, shardDir.path())}; + DummyScheduler scheduler; + RootStoppable parent("TestRootStoppable"); + + std::unique_ptr db = + make_ShardStore(env.app(), parent, scheduler, 2, journal_); + + BEAST_EXPECT(db); + BEAST_EXPECT(db->ledgersPerShard() == db->ledgersPerShardDefault); + BEAST_EXPECT(db->init()); + BEAST_EXPECT(db->ledgersPerShard() == ledgersPerShard); + BEAST_EXPECT(db->seqToShardIndex(ledgersPerShard + 1) == 1); + BEAST_EXPECT(db->seqToShardIndex(2 * ledgersPerShard) == 1); + BEAST_EXPECT(db->seqToShardIndex(2 * ledgersPerShard + 1) == 2); + BEAST_EXPECT( + db->earliestShardIndex() == (earliestSeq - 1) / ledgersPerShard); + BEAST_EXPECT(db->firstLedgerSeq(1) == ledgersPerShard + 1); + BEAST_EXPECT(db->lastLedgerSeq(1) == 2 * ledgersPerShard); + BEAST_EXPECT(db->getRootDir().string() == shardDir.path()); + } + + void + testCreateShard( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + beast::temp_dir shardDir; + Env env{*this, testConfig("createShard", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + if (!createShard(data, *db, 1)) + return; + + for (std::uint32_t i = 0; i < ledgersPerShard; ++i) + checkLedger(data, *db, *data.ledgers_[i]); + } + + void + testReopenDatabase( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + beast::temp_dir shardDir; + { + Env env{ + *this, + testConfig("reopenDatabase", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4, 2); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + for (std::uint32_t i = 0; i < 2; ++i) + if (!createShard(data, *db, 2)) + return; + } + { + Env env{*this, testConfig("", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4, 2); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + for (std::uint32_t i = 1; i <= 2; ++i) + waitShard(*db, i); + + for (std::uint32_t i = 0; i < 2 * ledgersPerShard; ++i) + checkLedger(data, *db, *data.ledgers_[i]); + } + } + + void + testGetCompleteShards( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + beast::temp_dir shardDir; + Env env{ + *this, + testConfig("getCompleteShards", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 2, nTestShards); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + BEAST_EXPECT(db->getCompleteShards() == ""); + + std::uint64_t bitMask = 0; + + for (std::uint32_t i = 0; i < nTestShards; ++i) + { + auto n = createShard(data, *db, nTestShards); + if (!BEAST_EXPECT(n && *n >= 1 && *n <= nTestShards)) + return; + bitMask |= 1ll << *n; + BEAST_EXPECT(db->getCompleteShards() == bitmask2Rangeset(bitMask)); + } + } + + void + testPrepareShard( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + beast::temp_dir shardDir; + Env env{ + *this, testConfig("prepareShard", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 1, nTestShards); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + std::uint64_t bitMask = 0; + BEAST_EXPECT(db->getPreShards() == ""); + + for (std::uint32_t i = 0; i < nTestShards * 2; ++i) + { + std::uint32_t n = rand_int(data.rng_, nTestShards - 1) + 1; + if (bitMask & (1ll << n)) + { + db->removePreShard(n); + bitMask &= ~(1ll << n); + } + else + { + db->prepareShard(n); + bitMask |= 1ll << n; + } + BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); + } + + // test illegal cases + // adding shards with too large number + db->prepareShard(0); + BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); + db->prepareShard(nTestShards + 1); + BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); + db->prepareShard(nTestShards + 2); + BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); + + // create shards which are not prepared for import + BEAST_EXPECT(db->getCompleteShards() == ""); + + std::uint64_t bitMask2 = 0; + + for (std::uint32_t i = 0; i < nTestShards; ++i) + { + auto n = createShard(data, *db, nTestShards); + if (!BEAST_EXPECT(n && *n >= 1 && *n <= nTestShards)) + return; + bitMask2 |= 1ll << *n; + BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(bitMask)); + BEAST_EXPECT(db->getCompleteShards() == bitmask2Rangeset(bitMask2)); + BEAST_EXPECT((bitMask & bitMask2) == 0); + if ((bitMask | bitMask2) == ((1ll << nTestShards) - 1) << 1) + break; + } + + // try to create another shard + BEAST_EXPECT( + db->prepareLedger((nTestShards + 1) * ledgersPerShard) == + boost::none); + } + + void + testImportShard( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + beast::temp_dir importDir; + TestData data(seedValue, 2); + + { + Env env{ + *this, + testConfig("importShard", backendType, importDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + if (!createShard(data, *db, 1)) + return; + + for (std::uint32_t i = 0; i < ledgersPerShard; ++i) + checkLedger(data, *db, *data.ledgers_[i]); + + data.ledgers_.clear(); + } + + boost::filesystem::path importPath(importDir.path()); + importPath /= "1"; + + { + beast::temp_dir shardDir; + Env env{*this, testConfig("", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + db->prepareShard(1); + BEAST_EXPECT(db->getPreShards() == bitmask2Rangeset(2)); + if (!BEAST_EXPECT(db->importShard(1, importPath))) + return; + BEAST_EXPECT(db->getPreShards() == ""); + + auto n = waitShard(*db, 1); + if (!BEAST_EXPECT(n && *n == 1)) + return; + + for (std::uint32_t i = 0; i < ledgersPerShard; ++i) + checkLedger(data, *db, *data.ledgers_[i]); + } + } + + void + testCorruptedDatabase( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + beast::temp_dir shardDir; + { + TestData data(seedValue, 4, 2); + { + Env env{ + *this, + testConfig( + "corruptedDatabase", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + for (std::uint32_t i = 0; i < 2; ++i) + if (!BEAST_EXPECT(createShard(data, *db, 2))) + return; + } + + boost::filesystem::path path = shardDir.path(); + path /= std::string("2"); + path /= backendType + ".dat"; + + FILE* f = fopen(path.string().c_str(), "r+b"); + if (!BEAST_EXPECT(f)) + return; + char buf[256]; + beast::rngfill(buf, sizeof(buf), data.rng_); + BEAST_EXPECT(fwrite(buf, 1, 256, f) == 256); + fclose(f); + } + { + Env env{*this, testConfig("", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4, 2); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + for (std::uint32_t i = 1; i <= 1; ++i) + waitShard(*db, i); + + BEAST_EXPECT(db->getCompleteShards() == bitmask2Rangeset(0x2)); + + for (std::uint32_t i = 0; i < 1 * ledgersPerShard; ++i) + checkLedger(data, *db, *data.ledgers_[i]); + } + } + + void + testIllegalFinalKey( + std::string const& backendType, + std::uint64_t const seedValue) + { + using namespace test::jtx; + + for (int i = 0; i < 5; ++i) + { + beast::temp_dir shardDir; + { + Env env{ + *this, + testConfig( + (i == 0 ? "illegalFinalKey" : ""), + backendType, + shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue + i, 2); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + int shardNumber = -1; + for (std::uint32_t j = 0; j < ledgersPerShard; ++j) + { + auto ind = db->prepareLedger(2 * ledgersPerShard); + if (!BEAST_EXPECT(ind != boost::none)) + return; + shardNumber = db->seqToShardIndex(*ind); + int arrInd = *ind - ledgersPerShard - 1; + BEAST_EXPECT(arrInd >= 0 && arrInd < ledgersPerShard); + BEAST_EXPECT(saveLedger(*db, *data.ledgers_[arrInd])); + if (arrInd % ledgersPerShard == (ledgersPerShard - 1)) + { + uint256 const finalKey_{0}; + Serializer s; + s.add32(Shard::version + (i == 0)); + s.add32(db->firstLedgerSeq(shardNumber) + (i == 1)); + s.add32(db->lastLedgerSeq(shardNumber) - (i == 3)); + s.addRaw( + data.ledgers_[arrInd - (i == 4)] + ->info() + .hash.data(), + 256 / 8); + db->store( + hotUNKNOWN, + std::move(s.modData()), + finalKey_, + *ind); + } + db->setStored(data.ledgers_[arrInd]); + } + + if (i == 2) + waitShard(*db, shardNumber); + else + { + boost::filesystem::path path(shardDir.path()); + path /= "1"; + boost::system::error_code ec; + auto start = std::chrono::system_clock::now(); + auto end = start + shardStoreTimeout; + while (std::chrono::system_clock::now() < end && + boost::filesystem::exists(path, ec)) + { + std::this_thread::yield(); + } + } + + BEAST_EXPECT( + db->getCompleteShards() == + bitmask2Rangeset(i == 2 ? 2 : 0)); + } + + { + Env env{*this, testConfig("", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue + i, 2); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + if (i == 2) + waitShard(*db, 1); + + BEAST_EXPECT( + db->getCompleteShards() == + bitmask2Rangeset(i == 2 ? 2 : 0)); + + if (i == 2) + { + for (std::uint32_t j = 0; j < ledgersPerShard; ++j) + checkLedger(data, *db, *data.ledgers_[j]); + } + } + } + } + + void + testImport(std::string const& backendType, std::uint64_t const seedValue) + { + using namespace test::jtx; + + beast::temp_dir shardDir; + { + beast::temp_dir nodeDir; + Env env{ + *this, + testConfig( + "import", backendType, shardDir.path(), nodeDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + Database& ndb = env.app().getNodeStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4, 2); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + for (std::uint32_t i = 0; i < 2 * ledgersPerShard; ++i) + BEAST_EXPECT(saveLedger(ndb, *data.ledgers_[i])); + + BEAST_EXPECT(db->getCompleteShards() == bitmask2Rangeset(0)); + db->import(ndb); + for (std::uint32_t i = 1; i <= 2; ++i) + waitShard(*db, i); + BEAST_EXPECT(db->getCompleteShards() == bitmask2Rangeset(0x6)); + } + { + Env env{*this, testConfig("", backendType, shardDir.path())}; + DatabaseShard* db = env.app().getShardStore(); + BEAST_EXPECT(db); + + TestData data(seedValue, 4, 2); + if (!BEAST_EXPECT(data.makeLedgers(env))) + return; + + for (std::uint32_t i = 1; i <= 2; ++i) + waitShard(*db, i); + + BEAST_EXPECT(db->getCompleteShards() == bitmask2Rangeset(0x6)); + + for (std::uint32_t i = 0; i < 2 * ledgersPerShard; ++i) + checkLedger(data, *db, *data.ledgers_[i]); + } + } + + void + testAll(std::string const& backendType) + { + std::uint64_t const seedValue = 51; + testStandalone(backendType); + testCreateShard(backendType, seedValue); + testReopenDatabase(backendType, seedValue + 5); + testGetCompleteShards(backendType, seedValue + 10); + testPrepareShard(backendType, seedValue + 20); + testImportShard(backendType, seedValue + 30); + testCorruptedDatabase(backendType, seedValue + 40); + testIllegalFinalKey(backendType, seedValue + 50); + testImport(backendType, seedValue + 60); + } + +public: + DatabaseShard_test() : journal_("DatabaseShard_test", *this) + { + } + + void + run() override + { + testAll("nudb"); + +#if RIPPLE_ROCKSDB_AVAILABLE +// testAll ("rocksdb"); +#endif + +#if RIPPLE_ENABLE_SQLITE_BACKEND_TESTS + testAll("sqlite"); +#endif + } +}; + +BEAST_DEFINE_TESTSUITE(DatabaseShard, NodeStore, ripple); + +} // namespace NodeStore +} // namespace ripple diff --git a/src/test/rpc/AccountTx_test.cpp b/src/test/rpc/AccountTx_test.cpp index 030fce99e10..534dd8235f8 100644 --- a/src/test/rpc/AccountTx_test.cpp +++ b/src/test/rpc/AccountTx_test.cpp @@ -445,12 +445,9 @@ class AccountTx_test : public beast::unit_test::suite // clang-format on BEAST_EXPECT( - std::extent::value == - result[jss::result][jss::transactions].size()); + std::size(sanity) == result[jss::result][jss::transactions].size()); - for (unsigned int index{0}; - index < std::extent::value; - ++index) + for (unsigned int index{0}; index < std::size(sanity); ++index) { checkSanity(txs[index], sanity[index]); } @@ -531,14 +528,14 @@ class AccountTx_test : public beast::unit_test::suite // The first two transactions listed in sanity haven't happened yet. constexpr unsigned int beckyDeletedOffest = 2; BEAST_EXPECT( - std::extent::value == + std::size(sanity) == result[jss::result][jss::transactions].size() + beckyDeletedOffest); Json::Value const& txs{result[jss::result][jss::transactions]}; for (unsigned int index = beckyDeletedOffest; - index < std::extent::value; + index < std::size(sanity); ++index) { checkSanity(txs[index - beckyDeletedOffest], sanity[index]); @@ -576,14 +573,11 @@ class AccountTx_test : public beast::unit_test::suite BEAST_EXPECT(result[jss::result][jss::transactions].isArray()); BEAST_EXPECT( - std::extent::value == - result[jss::result][jss::transactions].size()); + std::size(sanity) == result[jss::result][jss::transactions].size()); Json::Value const& txs{result[jss::result][jss::transactions]}; - for (unsigned int index = 0; - index < std::extent::value; - ++index) + for (unsigned int index = 0; index < std::size(sanity); ++index) { checkSanity(txs[index], sanity[index]); } diff --git a/src/test/rpc/ShardArchiveHandler_test.cpp b/src/test/rpc/ShardArchiveHandler_test.cpp index 497acf3d1c5..a25fb70c8f2 100644 --- a/src/test/rpc/ShardArchiveHandler_test.cpp +++ b/src/test/rpc/ShardArchiveHandler_test.cpp @@ -167,6 +167,9 @@ class ShardArchiveHandler_test : public beast::unit_test::suite section.set("path", tempDir.path()); section.set("max_size_gb", "100"); section.set("ledgers_per_shard", "256"); + section.set("earliest_seq", "257"); + auto& sectionNode = c->section(ConfigSection::nodeDatabase()); + sectionNode.set("earliest_seq", "257"); c->setupControl(true, true, true); jtx::Env env(*this, std::move(c)); @@ -262,6 +265,9 @@ class ShardArchiveHandler_test : public beast::unit_test::suite section.set("path", tempDir.path()); section.set("max_size_gb", "100"); section.set("ledgers_per_shard", "256"); + section.set("earliest_seq", "257"); + auto& sectionNode = c->section(ConfigSection::nodeDatabase()); + sectionNode.set("earliest_seq", "257"); c->setupControl(true, true, true); jtx::Env env(*this, std::move(c)); @@ -358,6 +364,9 @@ class ShardArchiveHandler_test : public beast::unit_test::suite section.set("ledgers_per_shard", "256"); section.set("shard_verification_retry_interval", "1"); section.set("shard_verification_max_attempts", "10000"); + section.set("earliest_seq", "257"); + auto& sectionNode = c->section(ConfigSection::nodeDatabase()); + sectionNode.set("earliest_seq", "257"); c->setupControl(true, true, true); jtx::Env env(*this, std::move(c));