Skip to content

Commit

Permalink
[FOLD] Require increasing full validation sequence numbers:
Browse files Browse the repository at this point in the history
With the preferred ledger by branch strategy for determing the proper
ledger, we can now leverage partial validations to detect when
validators recognize they are on the wrong branch and recover. In some
cases, this could mean issuing a partial validation for an earlier
sequenced ledger.

However, for full validations, we do not want a case where a validator
validates two different ledgers with the same sequence number. This
changeset prevents validators from issuing such validations as part of
consensus, and also prevents receiving validations that violate this
invariant.
  • Loading branch information
bachase committed Dec 15, 2017
1 parent 38603d4 commit 1e349e2
Show file tree
Hide file tree
Showing 6 changed files with 67 additions and 30 deletions.
10 changes: 9 additions & 1 deletion src/ripple/app/consensus/RCLConsensus.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -831,9 +831,14 @@ RCLConsensus::Adaptor::validate(RCLCxLedger const& ledger, bool proposing)
validationTime = lastValidationTime_ + 1s;
lastValidationTime_ = validationTime;

// Can only fully validate later sequenced ledgers
const bool isFull = proposing && ledger.seq() > largestFullValidationSeq_;
largestFullValidationSeq_ =
std::max(largestFullValidationSeq_, ledger.seq());

// Build validation
auto v = std::make_shared<STValidation>(
ledger.id(), validationTime, valPublic_, proposing);
ledger.id(), validationTime, valPublic_, isFull);
v->setFieldU32(sfLedgerSequence, ledger.seq());

// Add our load fee to the validation
Expand Down Expand Up @@ -948,6 +953,9 @@ RCLConsensus::Adaptor::preStartRound(RCLCxLedger const & prevLgr)
prevLgr.seq() >= app_.getMaxDisallowedLedger() &&
!app_.getOPs().isAmendmentBlocked();

largestFullValidationSeq_ =
std::max(largestFullValidationSeq_, app_.getMaxDisallowedLedger());

const bool synced = app_.getOPs().getOperatingMode() == NetworkOPs::omFULL;

if (validating_)
Expand Down
3 changes: 3 additions & 0 deletions src/ripple/app/consensus/RCLConsensus.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,9 @@ class RCLConsensus
// The timestamp of the last validation we used
NetClock::time_point lastValidationTime_;

// Largest sequence number fully validated
LedgerIndex largestFullValidationSeq_ = 0;

// These members are queried via public accesors and are atomic for
// thread safety.
std::atomic<bool> validating_{false};
Expand Down
6 changes: 6 additions & 0 deletions src/ripple/app/consensus/RCLValidations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,12 @@ handleNewValidation(Application& app,
if(j.debug())
dmp(j.debug(), to_string(outcome));

if(outcome == ValStatus::badFullSeq && j.warn())
{
auto const seq = val->getFieldU32(sfLedgerSequence);
dmp(j.warn(), " already fully validated sequence past " + to_string(seq));
}

if (val->isTrusted() && outcome == ValStatus::current)
{
app.getLedgerMaster().checkAccept(
Expand Down
31 changes: 25 additions & 6 deletions src/ripple/consensus/Validations.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,9 @@ enum class ValStatus {
/// Already had this exact same validation
repeat,
/// Not current or was older than current from this node
stale
stale,
/// A validation was marked full but it violates increasing seq requirement
badFullSeq
};

inline std::string
Expand All @@ -156,6 +158,8 @@ to_string(ValStatus m)
return "repeat";
case ValStatus::stale:
return "stale";
case ValStatus::badFullSeq:
return "badFullSeq";
default:
return "unknown";
}
Expand Down Expand Up @@ -263,8 +267,8 @@ class Validations
// Manages concurrent access to current_ and byLedger_
Mutex mutex_;

//! For the most recent validation, we also want to store the ID
//! of the ledger it replaces
// For the most recent validation, we also want to store the ID
// of the ledger it replaces
struct ValidationAndPrevID
{
ValidationAndPrevID(Validation const& v) : val{v}, prevLedgerID{0}
Expand All @@ -275,18 +279,21 @@ class Validations
ID prevLedgerID;
};

//! The latest validation from each node
// Validations from currently listed and trusted nodes (partial and full)
hash_map<NodeKey, ValidationAndPrevID> current_;

//! Recent validations from nodes, indexed by ledger identifier
// Sequence of the largest full validation received from each node
hash_map<NodeKey, Seq> largestFullValidation_;

// Validations from listed nodes, indexed by ledger id (partial and full)
beast::aged_unordered_map<
ID,
hash_map<NodeKey, Validation>,
std::chrono::steady_clock,
beast::uhash<>>
byLedger_;

//! Parameters to determine validation staleness
// Parameters to determine validation staleness
ValidationParms const parms_;

// Adaptor instance
Expand Down Expand Up @@ -415,6 +422,18 @@ class Validations
{
ScopedLock lock{mutex_};

// Ensure full validations are for increasing sequence numbers
if (val.full() && val.seq() != Seq{0})
{
auto const ins = largestFullValidation_.emplace(key, val.seq());
if (!ins.second)
{
if (val.seq() <= ins.first->second)
return ValStatus::badFullSeq;
ins.first->second = val.seq();
}
}

// This validation is a repeat if we already have
// one with the same id for this key
auto const ret = byLedger_[val.ledgerID()].emplace(key, val);
Expand Down
27 changes: 8 additions & 19 deletions src/test/consensus/Validations_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ class Validations_test : public beast::unit_test::suite
BEAST_EXPECT(ValStatus::current == harness.add(v));

// Re-adding is repeat
BEAST_EXPECT(ValStatus::repeat == harness.add(v));
BEAST_EXPECT(ValStatus::badFullSeq == harness.add(v));
}

{
Expand Down Expand Up @@ -340,31 +340,27 @@ class Validations_test : public beast::unit_test::suite
BEAST_EXPECT(harness.vals().getNodesAfter(Ledger::ID{2}) == 0);

BEAST_EXPECT(
ValStatus::stale ==
ValStatus::badFullSeq ==
harness.add(a.validate(Ledger::Seq{2}, Ledger::ID{20})));

BEAST_EXPECT(harness.vals().numTrustedForLedger(Ledger::ID{2}) == 1);
// THIS FAILS pending filtering on increasing seq #'s
BEAST_EXPECT(harness.vals().numTrustedForLedger(Ledger::ID{20}) == 0);
BEAST_EXPECT(
harness.vals().numTrustedForLedger(Ledger::ID{2}) == 1);
BEAST_EXPECT(
harness.vals().numTrustedForLedger(Ledger::ID{20}) == 0);
}

{
// Processing validations out of order should ignore the older
harness.clock().advance(2s);
auto const val3 = a.validate(Ledger::Seq{3}, Ledger::ID{3});
auto const val3 = a.validate(Ledger::Seq{4}, Ledger::ID{4});

harness.clock().advance(4s);
auto const val4 = a.validate(Ledger::Seq{4}, Ledger::ID{4});
auto const val4 = a.validate(Ledger::Seq{3}, Ledger::ID{3});

BEAST_EXPECT(ValStatus::current == harness.add(val4));

BEAST_EXPECT(ValStatus::stale == harness.add(val3));

// re-issued should not be added
auto const val4reissue =
a.validate(Ledger::Seq{4}, Ledger::ID{44});

BEAST_EXPECT(ValStatus::stale == harness.add(val4reissue));
}
{
// Process validations out of order with shifted times
Expand All @@ -381,13 +377,6 @@ class Validations_test : public beast::unit_test::suite
ValStatus::stale ==
harness.add(
a.validate(Ledger::Seq{9}, Ledger::ID{9}, -1s, -1s)));

// Process a validation that has an "earlier" seq but later sign
// time
BEAST_EXPECT(
ValStatus::current ==
harness.add(
a.validate(Ledger::Seq{7}, Ledger::ID{7}, 1s, 1s)));
}
{
// Test stale on arrival validations
Expand Down
20 changes: 16 additions & 4 deletions src/test/csf/Peer.h
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,9 @@ struct Peer
//! Whether to simulate running as validator or a tracking node
bool runAsValidator = true;

//! Sequence number of largest full validation thus far
Ledger::Seq largestFullValidation{0};

//TODO: Consider removing these two, they are only a convenience for tests
// Number of proposers in the prior round
std::size_t prevProposers = 0;
Expand Down Expand Up @@ -537,7 +540,10 @@ struct Peer
ConsensusMode const& mode,
Json::Value&& consensusJson)
{
schedule(delays.ledgerAccept, [&]() {
schedule(delays.ledgerAccept, [=]() {
const bool proposing = mode == ConsensusMode::proposing;
const bool consensusFail = result.state == ConsensusState::MovedOn;

TxSet const acceptedTxs = injectTxs(prevLedger, result.set);
Ledger const newLedger = oracle.accept(
prevLedger,
Expand All @@ -562,16 +568,22 @@ struct Peer
bool const isCompatible =
newLedger.isAncestor(fullyValidatedLedger);

if (runAsValidator && isCompatible)
if (runAsValidator && isCompatible && !consensusFail)
{
// Can only send one fully validated ledger per seq
bool isFull =
proposing && newLedger.seq() > largestFullValidation;
largestFullValidation =
std::max(largestFullValidation, newLedger.seq());

Validation v{newLedger.id(),
newLedger.seq(),
now(),
now(),
key,
id,
false};
// share is not trusted
isFull};
// share thew new validation; it is trusted by the receiver
share(v);
// we trust ourselves
addTrustedValidation(v);
Expand Down

0 comments on commit 1e349e2

Please sign in to comment.