From 1e349e2a7b9674769fd7ca9d2a13fdc5626110d4 Mon Sep 17 00:00:00 2001 From: Brad Chase Date: Fri, 15 Dec 2017 12:28:29 -0500 Subject: [PATCH] [FOLD] Require increasing full validation sequence numbers: With the preferred ledger by branch strategy for determing the proper ledger, we can now leverage partial validations to detect when validators recognize they are on the wrong branch and recover. In some cases, this could mean issuing a partial validation for an earlier sequenced ledger. However, for full validations, we do not want a case where a validator validates two different ledgers with the same sequence number. This changeset prevents validators from issuing such validations as part of consensus, and also prevents receiving validations that violate this invariant. --- src/ripple/app/consensus/RCLConsensus.cpp | 10 ++++++- src/ripple/app/consensus/RCLConsensus.h | 3 ++ src/ripple/app/consensus/RCLValidations.cpp | 6 ++++ src/ripple/consensus/Validations.h | 31 +++++++++++++++++---- src/test/consensus/Validations_test.cpp | 27 ++++++------------ src/test/csf/Peer.h | 20 ++++++++++--- 6 files changed, 67 insertions(+), 30 deletions(-) diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index 6092774a97b..2a14bc1b470 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -831,9 +831,14 @@ RCLConsensus::Adaptor::validate(RCLCxLedger const& ledger, bool proposing) validationTime = lastValidationTime_ + 1s; lastValidationTime_ = validationTime; + // Can only fully validate later sequenced ledgers + const bool isFull = proposing && ledger.seq() > largestFullValidationSeq_; + largestFullValidationSeq_ = + std::max(largestFullValidationSeq_, ledger.seq()); + // Build validation auto v = std::make_shared( - ledger.id(), validationTime, valPublic_, proposing); + ledger.id(), validationTime, valPublic_, isFull); v->setFieldU32(sfLedgerSequence, ledger.seq()); // Add our load fee to the validation @@ -948,6 +953,9 @@ RCLConsensus::Adaptor::preStartRound(RCLCxLedger const & prevLgr) prevLgr.seq() >= app_.getMaxDisallowedLedger() && !app_.getOPs().isAmendmentBlocked(); + largestFullValidationSeq_ = + std::max(largestFullValidationSeq_, app_.getMaxDisallowedLedger()); + const bool synced = app_.getOPs().getOperatingMode() == NetworkOPs::omFULL; if (validating_) diff --git a/src/ripple/app/consensus/RCLConsensus.h b/src/ripple/app/consensus/RCLConsensus.h index c200589b070..5dcb3fcc284 100644 --- a/src/ripple/app/consensus/RCLConsensus.h +++ b/src/ripple/app/consensus/RCLConsensus.h @@ -69,6 +69,9 @@ class RCLConsensus // The timestamp of the last validation we used NetClock::time_point lastValidationTime_; + // Largest sequence number fully validated + LedgerIndex largestFullValidationSeq_ = 0; + // These members are queried via public accesors and are atomic for // thread safety. std::atomic validating_{false}; diff --git a/src/ripple/app/consensus/RCLValidations.cpp b/src/ripple/app/consensus/RCLValidations.cpp index 9cb4d73b367..98e4132de8b 100644 --- a/src/ripple/app/consensus/RCLValidations.cpp +++ b/src/ripple/app/consensus/RCLValidations.cpp @@ -211,6 +211,12 @@ handleNewValidation(Application& app, if(j.debug()) dmp(j.debug(), to_string(outcome)); + if(outcome == ValStatus::badFullSeq && j.warn()) + { + auto const seq = val->getFieldU32(sfLedgerSequence); + dmp(j.warn(), " already fully validated sequence past " + to_string(seq)); + } + if (val->isTrusted() && outcome == ValStatus::current) { app.getLedgerMaster().checkAccept( diff --git a/src/ripple/consensus/Validations.h b/src/ripple/consensus/Validations.h index 2e27a2eca53..76cabf7e548 100644 --- a/src/ripple/consensus/Validations.h +++ b/src/ripple/consensus/Validations.h @@ -142,7 +142,9 @@ enum class ValStatus { /// Already had this exact same validation repeat, /// Not current or was older than current from this node - stale + stale, + /// A validation was marked full but it violates increasing seq requirement + badFullSeq }; inline std::string @@ -156,6 +158,8 @@ to_string(ValStatus m) return "repeat"; case ValStatus::stale: return "stale"; + case ValStatus::badFullSeq: + return "badFullSeq"; default: return "unknown"; } @@ -263,8 +267,8 @@ class Validations // Manages concurrent access to current_ and byLedger_ Mutex mutex_; - //! For the most recent validation, we also want to store the ID - //! of the ledger it replaces + // For the most recent validation, we also want to store the ID + // of the ledger it replaces struct ValidationAndPrevID { ValidationAndPrevID(Validation const& v) : val{v}, prevLedgerID{0} @@ -275,10 +279,13 @@ class Validations ID prevLedgerID; }; - //! The latest validation from each node + // Validations from currently listed and trusted nodes (partial and full) hash_map current_; - //! Recent validations from nodes, indexed by ledger identifier + // Sequence of the largest full validation received from each node + hash_map largestFullValidation_; + + // Validations from listed nodes, indexed by ledger id (partial and full) beast::aged_unordered_map< ID, hash_map, @@ -286,7 +293,7 @@ class Validations beast::uhash<>> byLedger_; - //! Parameters to determine validation staleness + // Parameters to determine validation staleness ValidationParms const parms_; // Adaptor instance @@ -415,6 +422,18 @@ class Validations { ScopedLock lock{mutex_}; + // Ensure full validations are for increasing sequence numbers + if (val.full() && val.seq() != Seq{0}) + { + auto const ins = largestFullValidation_.emplace(key, val.seq()); + if (!ins.second) + { + if (val.seq() <= ins.first->second) + return ValStatus::badFullSeq; + ins.first->second = val.seq(); + } + } + // This validation is a repeat if we already have // one with the same id for this key auto const ret = byLedger_[val.ledgerID()].emplace(key, val); diff --git a/src/test/consensus/Validations_test.cpp b/src/test/consensus/Validations_test.cpp index f5dbf900d88..6d71d88c10f 100644 --- a/src/test/consensus/Validations_test.cpp +++ b/src/test/consensus/Validations_test.cpp @@ -307,7 +307,7 @@ class Validations_test : public beast::unit_test::suite BEAST_EXPECT(ValStatus::current == harness.add(v)); // Re-adding is repeat - BEAST_EXPECT(ValStatus::repeat == harness.add(v)); + BEAST_EXPECT(ValStatus::badFullSeq == harness.add(v)); } { @@ -340,31 +340,27 @@ class Validations_test : public beast::unit_test::suite BEAST_EXPECT(harness.vals().getNodesAfter(Ledger::ID{2}) == 0); BEAST_EXPECT( - ValStatus::stale == + ValStatus::badFullSeq == harness.add(a.validate(Ledger::Seq{2}, Ledger::ID{20}))); - BEAST_EXPECT(harness.vals().numTrustedForLedger(Ledger::ID{2}) == 1); - // THIS FAILS pending filtering on increasing seq #'s - BEAST_EXPECT(harness.vals().numTrustedForLedger(Ledger::ID{20}) == 0); + BEAST_EXPECT( + harness.vals().numTrustedForLedger(Ledger::ID{2}) == 1); + BEAST_EXPECT( + harness.vals().numTrustedForLedger(Ledger::ID{20}) == 0); } { // Processing validations out of order should ignore the older harness.clock().advance(2s); - auto const val3 = a.validate(Ledger::Seq{3}, Ledger::ID{3}); + auto const val3 = a.validate(Ledger::Seq{4}, Ledger::ID{4}); harness.clock().advance(4s); - auto const val4 = a.validate(Ledger::Seq{4}, Ledger::ID{4}); + auto const val4 = a.validate(Ledger::Seq{3}, Ledger::ID{3}); BEAST_EXPECT(ValStatus::current == harness.add(val4)); BEAST_EXPECT(ValStatus::stale == harness.add(val3)); - // re-issued should not be added - auto const val4reissue = - a.validate(Ledger::Seq{4}, Ledger::ID{44}); - - BEAST_EXPECT(ValStatus::stale == harness.add(val4reissue)); } { // Process validations out of order with shifted times @@ -381,13 +377,6 @@ class Validations_test : public beast::unit_test::suite ValStatus::stale == harness.add( a.validate(Ledger::Seq{9}, Ledger::ID{9}, -1s, -1s))); - - // Process a validation that has an "earlier" seq but later sign - // time - BEAST_EXPECT( - ValStatus::current == - harness.add( - a.validate(Ledger::Seq{7}, Ledger::ID{7}, 1s, 1s))); } { // Test stale on arrival validations diff --git a/src/test/csf/Peer.h b/src/test/csf/Peer.h index 1e7e7e5b164..afbf3d27cda 100644 --- a/src/test/csf/Peer.h +++ b/src/test/csf/Peer.h @@ -236,6 +236,9 @@ struct Peer //! Whether to simulate running as validator or a tracking node bool runAsValidator = true; + //! Sequence number of largest full validation thus far + Ledger::Seq largestFullValidation{0}; + //TODO: Consider removing these two, they are only a convenience for tests // Number of proposers in the prior round std::size_t prevProposers = 0; @@ -537,7 +540,10 @@ struct Peer ConsensusMode const& mode, Json::Value&& consensusJson) { - schedule(delays.ledgerAccept, [&]() { + schedule(delays.ledgerAccept, [=]() { + const bool proposing = mode == ConsensusMode::proposing; + const bool consensusFail = result.state == ConsensusState::MovedOn; + TxSet const acceptedTxs = injectTxs(prevLedger, result.set); Ledger const newLedger = oracle.accept( prevLedger, @@ -562,16 +568,22 @@ struct Peer bool const isCompatible = newLedger.isAncestor(fullyValidatedLedger); - if (runAsValidator && isCompatible) + if (runAsValidator && isCompatible && !consensusFail) { + // Can only send one fully validated ledger per seq + bool isFull = + proposing && newLedger.seq() > largestFullValidation; + largestFullValidation = + std::max(largestFullValidation, newLedger.seq()); + Validation v{newLedger.id(), newLedger.seq(), now(), now(), key, id, - false}; - // share is not trusted + isFull}; + // share thew new validation; it is trusted by the receiver share(v); // we trust ourselves addTrustedValidation(v);