Skip to content

Commit

Permalink
Merge branch 'ef-tests-electra' of https://github.com/sigp/lighthouse
Browse files Browse the repository at this point in the history
…into electra-devnet-1
  • Loading branch information
realbigsean committed Jun 25, 2024
2 parents 0168124 + 7d243f9 commit eb8a2c2
Show file tree
Hide file tree
Showing 41 changed files with 507 additions and 287 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

36 changes: 7 additions & 29 deletions beacon_node/beacon_chain/src/attestation_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,9 @@ use std::borrow::Cow;
use strum::AsRefStr;
use tree_hash::TreeHash;
use types::{
Attestation, AttestationRef, BeaconCommittee,
BeaconStateError::{self, NoCommitteeFound},
ChainSpec, CommitteeIndex, Epoch, EthSpec, ForkName, Hash256, IndexedAttestation,
SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
Attestation, AttestationRef, BeaconCommittee, BeaconStateError::NoCommitteeFound, ChainSpec,
CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, SelectionProof,
SignedAggregateAndProof, Slot, SubnetId,
};

pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations};
Expand Down Expand Up @@ -266,30 +265,9 @@ pub enum Error {
BeaconChainError(BeaconChainError),
}

// TODO(electra) the error conversion changes here are to get a test case to pass
// this could easily be cleaned up
impl From<BeaconChainError> for Error {
fn from(e: BeaconChainError) -> Self {
match &e {
BeaconChainError::BeaconStateError(beacon_state_error) => {
if let BeaconStateError::AggregatorNotInCommittee { aggregator_index } =
beacon_state_error
{
Self::AggregatorNotInCommittee {
aggregator_index: *aggregator_index,
}
} else if let BeaconStateError::InvalidSelectionProof { aggregator_index } =
beacon_state_error
{
Self::InvalidSelectionProof {
aggregator_index: *aggregator_index,
}
} else {
Error::BeaconChainError(e)
}
}
_ => Error::BeaconChainError(e),
}
Self::BeaconChainError(e)
}
}

Expand Down Expand Up @@ -1169,7 +1147,7 @@ pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>(

let current_fork =
spec.fork_name_at_slot::<E>(slot_clock.now().ok_or(BeaconChainError::UnableToReadSlot)?);
let earliest_permissible_slot = if current_fork < ForkName::Deneb {
let earliest_permissible_slot = if !current_fork.deneb_enabled() {
one_epoch_prior
// EIP-7045
} else {
Expand Down Expand Up @@ -1414,11 +1392,11 @@ pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
/// Runs the `map_fn` with the committee and committee count per slot for the given `attestation`.
///
/// This function exists in this odd "map" pattern because efficiently obtaining the committees for
/// an attestations slot can be complex. It might involve reading straight from the
/// an attestation's slot can be complex. It might involve reading straight from the
/// `beacon_chain.shuffling_cache` or it might involve reading it from a state from the DB. Due to
/// the complexities of `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here.
///
/// If the committees for an `attestation`'s slot isn't found in the `shuffling_cache`, we will read a state
/// If the committees for an `attestation`'s slot aren't found in the `shuffling_cache`, we will read a state
/// from disk and then update the `shuffling_cache`.
///
/// Committees are sorted by ascending index order 0..committees_per_slot
Expand Down
12 changes: 10 additions & 2 deletions beacon_node/beacon_chain/tests/block_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@

use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock};
use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
},
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock,
};
use beacon_chain::{
Expand Down Expand Up @@ -1210,8 +1212,14 @@ async fn block_gossip_verification() {
#[tokio::test]
async fn verify_block_for_gossip_slashing_detection() {
let slasher_dir = tempdir().unwrap();
let spec = Arc::new(test_spec::<E>());
let slasher = Arc::new(
Slasher::open(SlasherConfig::new(slasher_dir.path().into()), test_logger()).unwrap(),
Slasher::open(
SlasherConfig::new(slasher_dir.path().into()),
spec,
test_logger(),
)
.unwrap(),
);

let inner_slasher = slasher.clone();
Expand Down
4 changes: 2 additions & 2 deletions beacon_node/operation_pool/src/attestation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -184,8 +184,8 @@ pub fn earliest_attestation_validators<E: EthSpec>(
// Bitfield of validators whose attestations are new/fresh.
let mut new_validators = match attestation.indexed {
CompactIndexedAttestation::Base(indexed_att) => indexed_att.aggregation_bits.clone(),
// TODO(electra) per the comments above, this code path is obsolete post altair fork, so maybe we should just return an empty bitlist here?
CompactIndexedAttestation::Electra(_) => todo!(),
// This code path is obsolete post altair fork, so we just return an empty bitlist here.
CompactIndexedAttestation::Electra(_) => return BitList::with_capacity(0).unwrap(),
};

let state_attestations = if attestation.checkpoint.target_epoch == state.current_epoch() {
Expand Down
24 changes: 11 additions & 13 deletions beacon_node/operation_pool/src/attestation_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,22 +165,22 @@ impl<E: EthSpec> CompactIndexedAttestation<E> {
CompactIndexedAttestation::Electra(this),
CompactIndexedAttestation::Electra(other),
) => this.should_aggregate(other),
// TODO(electra) is a mix of electra and base compact indexed attestations an edge case we need to deal with?
_ => false,
}
}

pub fn aggregate(&mut self, other: &Self) -> Option<()> {
/// Returns `true` if aggregated, otherwise `false`.
pub fn aggregate(&mut self, other: &Self) -> bool {
match (self, other) {
(CompactIndexedAttestation::Base(this), CompactIndexedAttestation::Base(other)) => {
this.aggregate(other)
this.aggregate(other);
true
}
(
CompactIndexedAttestation::Electra(this),
CompactIndexedAttestation::Electra(other),
) => this.aggregate_same_committee(other),
// TODO(electra) is a mix of electra and base compact indexed attestations an edge case we need to deal with?
_ => None,
_ => false,
}
}
}
Expand All @@ -192,7 +192,7 @@ impl<E: EthSpec> CompactIndexedAttestationBase<E> {
.is_zero()
}

pub fn aggregate(&mut self, other: &Self) -> Option<()> {
pub fn aggregate(&mut self, other: &Self) {
self.attesting_indices = self
.attesting_indices
.drain(..)
Expand All @@ -201,8 +201,6 @@ impl<E: EthSpec> CompactIndexedAttestationBase<E> {
.collect();
self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits);
self.signature.add_assign_aggregate(&other.signature);

Some(())
}
}

Expand All @@ -216,9 +214,10 @@ impl<E: EthSpec> CompactIndexedAttestationElectra<E> {
.is_zero()
}

pub fn aggregate_same_committee(&mut self, other: &Self) -> Option<()> {
/// Returns `true` if aggregated, otherwise `false`.
pub fn aggregate_same_committee(&mut self, other: &Self) -> bool {
if self.committee_bits != other.committee_bits {
return None;
return false;
}
self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits);
self.attesting_indices = self
Expand All @@ -228,7 +227,7 @@ impl<E: EthSpec> CompactIndexedAttestationElectra<E> {
.dedup()
.collect();
self.signature.add_assign_aggregate(&other.signature);
Some(())
true
}

pub fn aggregate_with_disjoint_committees(&mut self, other: &Self) -> Option<()> {
Expand Down Expand Up @@ -318,8 +317,7 @@ impl<E: EthSpec> AttestationMap<E> {

for existing_attestation in attestations.iter_mut() {
if existing_attestation.should_aggregate(&indexed) {
existing_attestation.aggregate(&indexed);
aggregated = true;
aggregated = existing_attestation.aggregate(&indexed);
} else if *existing_attestation == indexed {
aggregated = true;
}
Expand Down
8 changes: 4 additions & 4 deletions beacon_node/operation_pool/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ use std::ptr;
use types::{
sync_aggregate::Error as SyncAggregateError, typenum::Unsigned, AbstractExecPayload,
Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec,
Epoch, EthSpec, ForkName, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange,
Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange,
SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator,
};

Expand Down Expand Up @@ -316,10 +316,10 @@ impl<E: EthSpec> OperationPool<E> {
)
.inspect(|_| num_curr_valid += 1);

let curr_epoch_limit = if fork_name < ForkName::Electra {
E::MaxAttestations::to_usize()
} else {
let curr_epoch_limit = if fork_name.electra_enabled() {
E::MaxAttestationsElectra::to_usize()
} else {
E::MaxAttestations::to_usize()
};
let prev_epoch_limit = if let BeaconState::Base(base_state) = state {
std::cmp::min(
Expand Down
10 changes: 7 additions & 3 deletions beacon_node/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ impl<E: EthSpec> ProductionBeaconNode<E> {

let builder = ClientBuilder::new(context.eth_spec_instance.clone())
.runtime_context(context)
.chain_spec(spec)
.chain_spec(spec.clone())
.beacon_processor(client_config.beacon_processor.clone())
.http_api_config(client_config.http_api.clone())
.disk_store(
Expand Down Expand Up @@ -113,8 +113,12 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
_ => {}
}
let slasher = Arc::new(
Slasher::open(slasher_config, log.new(slog::o!("service" => "slasher")))
.map_err(|e| format!("Slasher open error: {:?}", e))?,
Slasher::open(
slasher_config,
Arc::new(spec),
log.new(slog::o!("service" => "slasher")),
)
.map_err(|e| format!("Slasher open error: {:?}", e))?,
);
builder.slasher(slasher)
} else {
Expand Down
15 changes: 13 additions & 2 deletions book/src/checkpoint-sync.md
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,19 @@ For more information on historic state storage see the
To manually specify a checkpoint use the following two flags:

* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` blob
* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` blob
* `--checkpoint-state`: accepts an SSZ-encoded `BeaconState` file
* `--checkpoint-block`: accepts an SSZ-encoded `SignedBeaconBlock` file
* `--checkpoint-blobs`: accepts an SSZ-encoded `Blobs` file

The command is as following:

```bash
curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/debug/beacon/states/$SLOT" > state.ssz
curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v2/beacon/blocks/$SLOT" > block.ssz
curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v1/beacon/blob_sidecars/$SLOT" > blobs.ssz
```

where `$SLOT` is the slot number. It can be specified as `head` or `finalized` as well.

_Both_ the state and block must be provided and the state **must** match the block. The
state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary,
Expand Down
1 change: 1 addition & 0 deletions book/src/database-migrations.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ validator client or the slasher**.

| Lighthouse version | Release date | Schema version | Downgrade available? |
|--------------------|--------------|----------------|----------------------|
| v5.2.0 | Jun 2024 | v19 | yes before Deneb |
| v5.1.0 | Mar 2024 | v19 | yes before Deneb |
| v5.0.0 | Feb 2024 | v19 | yes before Deneb |
| v4.6.0 | Dec 2023 | v19 | yes before Deneb |
Expand Down
42 changes: 33 additions & 9 deletions book/src/faq.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
- [My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?](#bn-fork-choice)
- [My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?](#bn-queue-full)
- [My beacon node logs `WARN Failed to finalize deposit cache`, what should I do?](#bn-deposit-cache)
- [My beacon node logs `WARN Could not verify blob sidecar for gossip`, what does it mean?](#bn-blob)

## [Validator](#validator-1)

Expand Down Expand Up @@ -214,6 +215,16 @@ This suggests that the computer resources are being overwhelmed. It could be due

This is a known [bug](https://github.com/sigp/lighthouse/issues/3707) that will fix by itself.

### <a name="bn-blob"></a> My beacon node logs `WARN Could not verify blob sidecar for gossip`, what does it mean?

An example of the full log is shown below:

```text
Jun 07 23:05:12.170 WARN Could not verify blob sidecar for gossip. Ignoring the blob sidecar, commitment: 0xaa97…6f54, index: 1, root: 0x93b8…c47c, slot: 9248017, error: PastFinalizedSlot { blob_slot: Slot(9248017), finalized_slot: Slot(9248032) }, module: network::network_beacon_processor::gossip_methods:720
```

The `PastFinalizedSlot` indicates that the time at which the node received the blob has past the finalization period. This could be due to a peer sending an earlier blob. The log will be gone when Lighthouse eventually drops the peer.

## Validator

### <a name="vc-activation"></a> Why does it take so long for a validator to be activated?
Expand Down Expand Up @@ -327,13 +338,24 @@ The first thing is to ensure both consensus and execution clients are synced wit

You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations).

Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`):
Another cause for missing attestations is the block arriving late, or there are delays during block processing.

An example of the log: (debug logs can be found under `$datadir/beacon/logs`):

```text
DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon
Delayed head block, set_as_head_time_ms: 27, imported_time_ms: 168, attestable_delay_ms: 4209, available_delay_ms: 4186, execution_time_ms: 201, blob_delay_ms: 3815, observed_delay_ms: 3984, total_delay_ms: 4381, slot: 1886014, proposer_index: 733, block_root: 0xa7390baac88d50f1cbb5ad81691915f6402385a12521a670bbbd4cd5f8bf3934, service: beacon, module: beacon_chain::canonical_head:1441
```

The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block.
The field to look for is `attestable_delay`, which defines the time when a block is ready for the validator to attest. If the `attestable_delay` is greater than 4s which has past the window of attestation, the attestation wil fail. In the above example, the delay is mostly caused by late block observed by the node, as shown in `observed_delay`. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). Ideally, `observed_delay` should be less than 3 seconds. In this example, the validator failed to attest the block due to the block arriving late.

Another example of log:

```
DEBG Delayed head block, set_as_head_time_ms: 22, imported_time_ms: 312, attestable_delay_ms: 7052, available_delay_ms: 6874, execution_time_ms: 4694, blob_delay_ms: 2159, observed_delay_ms: 2179, total_delay_ms: 7209, slot: 1885922, proposer_index: 606896, block_root: 0x9966df24d24e722d7133068186f0caa098428696e9f441ac416d0aca70cc0a23, service: beacon, module: beacon_chain::canonical_head:1441
/159.69.68.247/tcp/9000, service: libp2p, module: lighthouse_network::service:1811
```

In this example, we see that the `execution_time_ms` is 4694ms. The `execution_time_ms` is how long the node took to process the block. The `execution_time_ms` of larger than 1 second suggests that there is slowness in processing the block. If the `execution_time_ms` is high, it could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes.

### <a name="vc-head-vote"></a> Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?

Expand Down Expand Up @@ -514,21 +536,23 @@ If you would still like to subscribe to all subnets, you can use the flag `subsc

### <a name="net-quic"></a> How to know how many of my peers are connected via QUIC?

With `--metrics` enabled in the beacon node, you can find the number of peers connected via QUIC using:
With `--metrics` enabled in the beacon node, the [Grafana Network dashboard](https://github.com/sigp/lighthouse-metrics/blob/master/dashboards/Network.json) displays the connected by transport, which will show the number of peers connected via QUIC.

Alternatively, you can find the number of peers connected via QUIC manually using:

```bash
curl -s "http://localhost:5054/metrics" | grep libp2p_quic_peers
curl -s "http://localhost:5054/metrics" | grep 'transport="quic"'
```

A response example is:

```text
# HELP libp2p_quic_peers Count of libp2p peers currently connected via QUIC
# TYPE libp2p_quic_peers gauge
libp2p_quic_peers 4
libp2p_peers_multi{direction="inbound",transport="quic"} 27
libp2p_peers_multi{direction="none",transport="quic"} 0
libp2p_peers_multi{direction="outbound",transport="quic"} 9
```

which shows that there are 4 peers connected via QUIC.
which shows that there are a total of 36 peers connected via QUIC.

## Miscellaneous

Expand Down
4 changes: 2 additions & 2 deletions book/src/slasher.md
Original file line number Diff line number Diff line change
Expand Up @@ -114,13 +114,13 @@ changed after initialization.

* Flag: `--slasher-max-db-size GIGABYTES`
* Argument: maximum size of the database in gigabytes
* Default: 256 GB
* Default: 512 GB

Both database backends LMDB and MDBX place a hard limit on the size of the database
file. You can use the `--slasher-max-db-size` flag to set this limit. It can be adjusted after
initialization if the limit is reached.

By default the limit is set to accommodate the default history length and around 600K validators (with about 30% headroom) but
By default the limit is set to accommodate the default history length and around 1 million validators but
you can set it lower if running with a reduced history length. The space required scales
approximately linearly in validator count and history length, i.e. if you halve either you can halve
the space required.
Expand Down
4 changes: 2 additions & 2 deletions book/src/slashing-protection.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ Once you have the slashing protection database from your existing client, you ca
using this command:

```bash
lighthouse account validator slashing-protection import <my_interchange.json>
lighthouse account validator slashing-protection import filename.json
```

When importing an interchange file, you still need to import the validator keystores themselves
Expand All @@ -86,7 +86,7 @@ separately, using the instructions for [import validator keys](./mainnet-validat
You can export Lighthouse's database for use with another client with this command:

```
lighthouse account validator slashing-protection export <lighthouse_interchange.json>
lighthouse account validator slashing-protection export filename.json
```

The validator client needs to be stopped in order to export, to guarantee that the data exported is
Expand Down
Loading

0 comments on commit eb8a2c2

Please sign in to comment.