diff --git a/Makefile b/Makefile index e3d889bfeab..19988fc2c64 100644 --- a/Makefile +++ b/Makefile @@ -215,7 +215,8 @@ lint: -A clippy::upper-case-acronyms \ -A clippy::vec-init-then-push \ -A clippy::question-mark \ - -A clippy::uninlined-format-args + -A clippy::uninlined-format-args \ + -A clippy::enum_variant_names # Lints the code using Clippy and automatically fix some simple compiler warnings. lint-fix: diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index c6d81275a5e..0a98a452b8b 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -16,7 +16,6 @@ pub const EXPORT_CMD: &str = "export"; pub const IMPORT_FILE_ARG: &str = "IMPORT-FILE"; pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE"; -pub const MINIFY_FLAG: &str = "minify"; pub const PUBKEYS_FLAG: &str = "pubkeys"; pub fn cli_app<'a, 'b>() -> App<'a, 'b> { @@ -31,16 +30,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .value_name("FILE") .help("The slashing protection interchange file to import (.json)"), ) - .arg( - Arg::with_name(MINIFY_FLAG) - .long(MINIFY_FLAG) - .takes_value(true) - .possible_values(&["false", "true"]) - .help( - "Deprecated: Lighthouse no longer requires minification on import \ - because it always minifies", - ), - ), ) .subcommand( App::new(EXPORT_CMD) @@ -61,17 +50,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { comma-separated. All known keys will be exported if omitted", ), ) - .arg( - Arg::with_name(MINIFY_FLAG) - .long(MINIFY_FLAG) - .takes_value(true) - .default_value("false") - .possible_values(&["false", "true"]) - .help( - "Minify the output file. This will make it smaller and faster to \ - import, but not faster to generate.", - ), - ), ) } @@ -92,7 +70,6 @@ pub fn cli_run( match matches.subcommand() { (IMPORT_CMD, Some(matches)) => { let import_filename: PathBuf = clap_utils::parse_required(matches, IMPORT_FILE_ARG)?; - let minify: Option = clap_utils::parse_optional(matches, MINIFY_FLAG)?; let import_file = File::open(&import_filename).map_err(|e| { format!( "Unable to open import file at {}: {:?}", @@ -102,23 +79,10 @@ pub fn cli_run( })?; eprint!("Loading JSON file into memory & deserializing"); - let mut interchange = Interchange::from_json_reader(&import_file) + let interchange = Interchange::from_json_reader(&import_file) .map_err(|e| format!("Error parsing file for import: {:?}", e))?; eprintln!(" [done]."); - if let Some(minify) = minify { - eprintln!( - "WARNING: --minify flag is deprecated and will be removed in a future release" - ); - if minify { - eprint!("Minifying input file for faster loading"); - interchange = interchange - .minify() - .map_err(|e| format!("Minification failed: {:?}", e))?; - eprintln!(" [done]."); - } - } - let slashing_protection_database = SlashingDatabase::open_or_create(&slashing_protection_db_path).map_err(|e| { format!( @@ -206,7 +170,6 @@ pub fn cli_run( } (EXPORT_CMD, Some(matches)) => { let export_filename: PathBuf = clap_utils::parse_required(matches, EXPORT_FILE_ARG)?; - let minify: bool = clap_utils::parse_required(matches, MINIFY_FLAG)?; let selected_pubkeys = if let Some(pubkeys) = clap_utils::parse_optional::(matches, PUBKEYS_FLAG)? @@ -237,17 +200,10 @@ pub fn cli_run( ) })?; - let mut interchange = slashing_protection_database + let interchange = slashing_protection_database .export_interchange_info(genesis_validators_root, selected_pubkeys.as_deref()) .map_err(|e| format!("Error during export: {:?}", e))?; - if minify { - eprintln!("Minifying output file"); - interchange = interchange - .minify() - .map_err(|e| format!("Unable to minify output: {:?}", e))?; - } - let output_file = File::create(export_filename) .map_err(|e| format!("Error creating output file: {:?}", e))?; diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 992c7a479e0..abd676d7389 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -5,7 +5,9 @@ use participation_cache::ParticipationCache; use safe_arith::SafeArith; use serde_utils::quoted_u64::Quoted; use slog::debug; -use state_processing::per_epoch_processing::altair::process_inactivity_updates; +use state_processing::per_epoch_processing::altair::{ + process_inactivity_updates, process_justification_and_finalization, +}; use state_processing::{ common::altair::BaseRewardPerIncrement, per_epoch_processing::altair::{participation_cache, rewards_and_penalties::get_flag_weight}, @@ -27,6 +29,7 @@ use state_processing::per_epoch_processing::base::rewards_and_penalties::{ }; use state_processing::per_epoch_processing::base::validator_statuses::InclusionInfo; use state_processing::per_epoch_processing::base::{ + process_justification_and_finalization as process_justification_and_finalization_base, TotalBalances, ValidatorStatus, ValidatorStatuses, }; @@ -67,6 +70,13 @@ impl BeaconChain { let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; validator_statuses.process_attestations(&state)?; + process_justification_and_finalization_base( + &state, + &validator_statuses.total_balances, + spec, + )? + .apply_changes_to_state(&mut state); + let ideal_rewards = self.compute_ideal_rewards_base(&state, &validator_statuses.total_balances)?; @@ -125,6 +135,8 @@ impl BeaconChain { // Calculate ideal_rewards let participation_cache = ParticipationCache::new(&state, spec)?; + process_justification_and_finalization(&state, &participation_cache)? + .apply_changes_to_state(&mut state); process_inactivity_updates(&mut state, &participation_cache, spec)?; let previous_epoch = state.previous_epoch(); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f2378b4f9ed..168bbfca496 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -18,7 +18,7 @@ use crate::block_verification::{ use crate::block_verification_types::{ AsBlock, AvailableExecutedBlock, BlockImportData, ExecutedBlock, RpcBlock, }; -pub use crate::canonical_head::{CanonicalHead, CanonicalHeadRwLock}; +pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, @@ -453,7 +453,7 @@ pub struct BeaconChain { /// A cache of eth1 deposit data at epoch boundaries for deposit finalization pub eth1_finalization_cache: TimeoutRwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. - pub beacon_proposer_cache: Mutex, + pub beacon_proposer_cache: Arc>, /// Caches a map of `validator_index -> validator_pubkey`. pub(crate) validator_pubkey_cache: TimeoutRwLock>, /// A cache used when producing attestations. diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index eae71bd63ea..b51592caddb 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -25,7 +25,7 @@ const CACHE_SIZE: usize = 16; /// This value is fairly unimportant, it's used to avoid heap allocations. The result of it being /// incorrect is non-substantial from a consensus perspective (and probably also from a /// performance perspective). -const TYPICAL_SLOTS_PER_EPOCH: usize = 32; +pub const TYPICAL_SLOTS_PER_EPOCH: usize = 32; /// For some given slot, this contains the proposer index (`index`) and the `fork` that should be /// used to verify their signature. diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 7f1a596ec3c..65cf7a728bc 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -583,29 +583,33 @@ pub fn signature_verify_chain_segment( &chain.spec, )?; + // unzip chain segment and verify kzg in bulk + let (roots, blocks): (Vec<_>, Vec<_>) = chain_segment.into_iter().unzip(); + let maybe_available_blocks = chain + .data_availability_checker + .verify_kzg_for_rpc_blocks(blocks)?; + // zip it back up + let mut signature_verified_blocks = roots + .into_iter() + .zip(maybe_available_blocks) + .map(|(block_root, maybe_available_block)| { + let consensus_context = ConsensusContext::new(maybe_available_block.slot()) + .set_current_block_root(block_root); + SignatureVerifiedBlock { + block: maybe_available_block, + block_root, + parent: None, + consensus_context, + } + }) + .collect::>(); + + // verify signatures let pubkey_cache = get_validator_pubkey_cache(chain)?; let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); - - let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); - - for (block_root, block) in &chain_segment { - let mut consensus_context = - ConsensusContext::new(block.slot()).set_current_block_root(*block_root); - - signature_verifier.include_all_signatures(block.as_block(), &mut consensus_context)?; - - let maybe_available_block = chain - .data_availability_checker - .check_rpc_block_availability(block.clone())?; - - // Save the block and its consensus context. The context will have had its proposer index - // and attesting indices filled in, which can be used to accelerate later block processing. - signature_verified_blocks.push(SignatureVerifiedBlock { - block: maybe_available_block, - block_root: *block_root, - parent: None, - consensus_context, - }); + for svb in &mut signature_verified_blocks { + signature_verifier + .include_all_signatures(svb.block.as_block(), &mut svb.consensus_context)?; } if signature_verifier.verify().is_err() { @@ -1159,10 +1163,7 @@ impl IntoExecutionPendingBlock for Arc IntoExecutionPendingBlock for RpcBlock .map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?; let maybe_available = chain .data_availability_checker - .check_rpc_block_availability(self.clone()) + .verify_kzg_for_rpc_block(self.clone()) .map_err(|e| { BlockSlashInfo::SignatureNotChecked( self.signed_block_header(), diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index d236e94f939..9cd853ba8c5 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -45,6 +45,13 @@ impl RpcBlock { RpcBlockInner::BlockAndBlobs(block, _) => block, } } + + pub fn blobs(&self) -> Option<&BlobSidecarList> { + match &self.block { + RpcBlockInner::Block(_) => None, + RpcBlockInner::BlockAndBlobs(_, blobs) => Some(blobs), + } + } } /// Note: This variant is intentionally private because we want to safely construct the diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index fd8a3f04606..bffb23aeb7e 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,4 +1,5 @@ use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY}; +use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; use crate::eth1_chain::{CachingEth1Backend, SszEth1}; use crate::eth1_finalization_cache::Eth1FinalizationCache; @@ -10,7 +11,7 @@ use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; use crate::timeout_rw_lock::TimeoutRwLock; -use crate::validator_monitor::ValidatorMonitor; +use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; use crate::{ @@ -23,10 +24,10 @@ use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use kzg::{Kzg, TrustedSetup}; use operation_pool::{OperationPool, PersistedOperationPool}; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use slasher::Slasher; -use slog::{crit, debug, error, info, Logger}; +use slog::{crit, debug, error, info, o, Logger}; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_slot_processing; use std::marker::PhantomData; @@ -35,8 +36,8 @@ use std::time::Duration; use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp}; use task_executor::{ShutdownReason, TaskExecutor}; use types::{ - BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, - PublicKeyBytes, Signature, SignedBeaconBlock, Slot, + BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, Signature, + SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing @@ -93,12 +94,12 @@ pub struct BeaconChainBuilder { log: Option, graffiti: Graffiti, slasher: Option>>, - validator_monitor: Option>, // Pending I/O batch that is constructed during building and should be executed atomically // alongside `PersistedBeaconChain` storage when `BeaconChainBuilder::build` is called. pending_io_batch: Vec, trusted_setup: Option, task_executor: Option, + validator_monitor_config: Option, } impl @@ -135,10 +136,10 @@ where log: None, graffiti: Graffiti::default(), slasher: None, - validator_monitor: None, pending_io_batch: vec![], trusted_setup: None, task_executor: None, + validator_monitor_config: None, } } @@ -623,19 +624,8 @@ where /// Register some validators for additional monitoring. /// /// `validators` is a comma-separated string of 0x-formatted BLS pubkeys. - pub fn monitor_validators( - mut self, - auto_register: bool, - validators: Vec, - individual_metrics_threshold: usize, - log: Logger, - ) -> Self { - self.validator_monitor = Some(ValidatorMonitor::new( - validators, - auto_register, - individual_metrics_threshold, - log.clone(), - )); + pub fn validator_monitor_config(mut self, config: ValidatorMonitorConfig) -> Self { + self.validator_monitor_config = Some(config); self } @@ -671,11 +661,16 @@ where let genesis_state_root = self .genesis_state_root .ok_or("Cannot build without a genesis state root")?; - let mut validator_monitor = self - .validator_monitor - .ok_or("Cannot build without a validator monitor")?; + let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); let head_tracker = Arc::new(self.head_tracker.unwrap_or_default()); + let beacon_proposer_cache: Arc> = <_>::default(); + let mut validator_monitor = ValidatorMonitor::new( + validator_monitor_config, + beacon_proposer_cache.clone(), + log.new(o!("service" => "val_mon")), + ); + let current_slot = if slot_clock .is_prior_to_genesis() .ok_or("Unable to read slot clock")? @@ -911,7 +906,7 @@ where log.clone(), )), eth1_finalization_cache: TimeoutRwLock::new(Eth1FinalizationCache::new(log.clone())), - beacon_proposer_cache: <_>::default(), + beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), @@ -1097,7 +1092,6 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String { mod test { use super::*; use crate::test_utils::EphemeralHarnessType; - use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use ethereum_hashing::hash; use genesis::{ generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH, @@ -1155,12 +1149,6 @@ mod test { .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) - .monitor_validators( - true, - vec![], - DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, - log.clone(), - ) .build() .expect("should build"); diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index a8d077a6d04..ad328077d0e 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -240,9 +240,12 @@ impl DataAvailabilityChecker { .put_pending_executed_block(executed_block) } - /// Checks if a block is available, returns a `MaybeAvailableBlock` that may include the fully - /// available block. - pub fn check_rpc_block_availability( + /// Verifies kzg commitments for an RpcBlock, returns a `MaybeAvailableBlock` that may + /// include the fully available block. + /// + /// WARNING: This function assumes all required blobs are already present, it does NOT + /// check if there are any missing blobs. + pub fn verify_kzg_for_rpc_block( &self, block: RpcBlock, ) -> Result, AvailabilityCheckError> { @@ -279,6 +282,68 @@ impl DataAvailabilityChecker { } } + /// Checks if a vector of blocks are available. Returns a vector of `MaybeAvailableBlock` + /// This is more efficient than calling `verify_kzg_for_rpc_block` in a loop as it does + /// all kzg verification at once + /// + /// WARNING: This function assumes all required blobs are already present, it does NOT + /// check if there are any missing blobs. + pub fn verify_kzg_for_rpc_blocks( + &self, + blocks: Vec>, + ) -> Result>, AvailabilityCheckError> { + let mut results = Vec::with_capacity(blocks.len()); + let all_blobs: BlobSidecarList = blocks + .iter() + .filter(|block| self.blobs_required_for_block(block.as_block())) + // this clone is cheap as it's cloning an Arc + .filter_map(|block| block.blobs().cloned()) + .flatten() + .collect::>() + .into(); + + // verify kzg for all blobs at once + if !all_blobs.is_empty() { + let kzg = self + .kzg + .as_ref() + .ok_or(AvailabilityCheckError::KzgNotInitialized)?; + verify_kzg_for_blob_list(&all_blobs, kzg)?; + } + + for block in blocks { + let (block_root, block, blobs) = block.deconstruct(); + match blobs { + None => { + if self.blobs_required_for_block(&block) { + results.push(MaybeAvailableBlock::AvailabilityPending { block_root, block }) + } else { + results.push(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: None, + })) + } + } + Some(blob_list) => { + let verified_blobs = if self.blobs_required_for_block(&block) { + Some(blob_list) + } else { + None + }; + // already verified kzg for all blobs + results.push(MaybeAvailableBlock::Available(AvailableBlock { + block_root, + block, + blobs: verified_blobs, + })) + } + } + } + + Ok(results) + } + /// Determines the blob requirements for a block. If the block is pre-deneb, no blobs are required. /// If the block's epoch is from prior to the data availability boundary, no blobs are required. fn blobs_required_for_block(&self, block: &SignedBeaconBlock) -> bool { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index e2eef45d250..6033293b825 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -771,12 +771,13 @@ mod test { ) -> Arc, LevelDB>> { let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); + let blobs_path = db_path.path().join("blobs_db"); let config = StoreConfig::default(); HotColdDB::open( &hot_path, &cold_path, - None, + &blobs_path, |_, _, _| Ok(()), config, spec, diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index a23bcdc0b55..0fe68ba19ea 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1033,6 +1033,11 @@ lazy_static! { "beacon_aggregated_attestation_subsets_total", "Count of new aggregated attestations that are subsets of already known aggregates" ); + pub static ref VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL: Result = try_create_int_counter_vec( + "validator_monitor_missed_blocks_total", + "Number of non-finalized blocks missed", + &["validator"] + ); /* * Kzg related metrics diff --git a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs index 6c2f07ff593..f16f38bad55 100644 --- a/beacon_node/beacon_chain/src/observed_blob_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_blob_sidecars.rs @@ -87,7 +87,7 @@ impl ObservedBlobSidecars { Ok(()) } - /// Prune all values earlier than the given slot. + /// Prune `blob_sidecar` observations for slots less than or equal to the given slot. pub fn prune(&mut self, finalized_slot: Slot) { if finalized_slot == 0 { return; diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 88b5682505d..e42ee20c48d 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,20 +1,14 @@ //! Utilities for managing database schema changes. -mod migration_schema_v12; -mod migration_schema_v13; -mod migration_schema_v14; -mod migration_schema_v15; -mod migration_schema_v16; mod migration_schema_v17; mod migration_schema_v18; -use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; -use crate::eth1_chain::SszEth1; +use crate::beacon_chain::BeaconChainTypes; use crate::types::ChainSpec; -use slog::{warn, Logger}; +use slog::Logger; use std::sync::Arc; use store::hot_cold_store::{HotColdDB, HotColdDBError}; use store::metadata::{SchemaVersion, CURRENT_SCHEMA_VERSION}; -use store::{Error as StoreError, StoreItem}; +use store::Error as StoreError; /// Migrate the database from one schema version to another, applying all requisite mutations. #[allow(clippy::only_used_in_recursion)] // spec is not used but likely to be used in future @@ -57,92 +51,8 @@ pub fn migrate_schema( } // - // Migrations from before SchemaVersion(11) are deprecated. + // Migrations from before SchemaVersion(16) are deprecated. // - - // Upgrade from v11 to v12 to store richer metadata in the attestation op pool. - (SchemaVersion(11), SchemaVersion(12)) => { - let ops = migration_schema_v12::upgrade_to_v12::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - // Downgrade from v12 to v11 to drop richer metadata from the attestation op pool. - (SchemaVersion(12), SchemaVersion(11)) => { - let ops = migration_schema_v12::downgrade_from_v12::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(12), SchemaVersion(13)) => { - let mut ops = vec![]; - if let Some(persisted_eth1_v1) = db.get_item::(Ð1_CACHE_DB_KEY)? { - let upgraded_eth1_cache = - match migration_schema_v13::update_eth1_cache(persisted_eth1_v1) { - Ok(upgraded_eth1) => upgraded_eth1, - Err(e) => { - warn!(log, "Failed to deserialize SszEth1CacheV1"; "error" => ?e); - warn!(log, "Reinitializing eth1 cache"); - migration_schema_v13::reinitialized_eth1_cache_v13( - deposit_contract_deploy_block, - ) - } - }; - ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - (SchemaVersion(13), SchemaVersion(12)) => { - let mut ops = vec![]; - if let Some(persisted_eth1_v13) = db.get_item::(Ð1_CACHE_DB_KEY)? { - let downgraded_eth1_cache = match migration_schema_v13::downgrade_eth1_cache( - persisted_eth1_v13, - ) { - Ok(Some(downgraded_eth1)) => downgraded_eth1, - Ok(None) => { - warn!(log, "Unable to downgrade eth1 cache from newer version: reinitializing eth1 cache"); - migration_schema_v13::reinitialized_eth1_cache_v1( - deposit_contract_deploy_block, - ) - } - Err(e) => { - warn!(log, "Unable to downgrade eth1 cache from newer version: failed to deserialize SszEth1CacheV13"; "error" => ?e); - warn!(log, "Reinitializing eth1 cache"); - migration_schema_v13::reinitialized_eth1_cache_v1( - deposit_contract_deploy_block, - ) - } - }; - ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); - } - - db.store_schema_version_atomically(to, ops)?; - - Ok(()) - } - (SchemaVersion(13), SchemaVersion(14)) => { - let ops = migration_schema_v14::upgrade_to_v14::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(14), SchemaVersion(13)) => { - let ops = migration_schema_v14::downgrade_from_v14::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(14), SchemaVersion(15)) => { - let ops = migration_schema_v15::upgrade_to_v15::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(15), SchemaVersion(14)) => { - let ops = migration_schema_v15::downgrade_from_v15::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(15), SchemaVersion(16)) => { - let ops = migration_schema_v16::upgrade_to_v16::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } - (SchemaVersion(16), SchemaVersion(15)) => { - let ops = migration_schema_v16::downgrade_from_v16::(db.clone(), log)?; - db.store_schema_version_atomically(to, ops) - } (SchemaVersion(16), SchemaVersion(17)) => { let ops = migration_schema_v17::upgrade_to_v17::(db.clone(), log)?; db.store_schema_version_atomically(to, ops) diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs deleted file mode 100644 index ab267796e14..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ /dev/null @@ -1,222 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}; -use crate::persisted_fork_choice::PersistedForkChoiceV11; -use operation_pool::{PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5}; -use slog::{debug, info, Logger}; -use state_processing::{ - common::get_indexed_attestation, per_block_processing::is_valid_indexed_attestation, - VerifyOperation, VerifySignatures, -}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; - -pub fn upgrade_to_v12( - db: Arc>, - log: Logger, -) -> Result, Error> { - let spec = db.get_chain_spec(); - - // Load a V5 op pool and transform it to V12. - let Some(PersistedOperationPoolV5 { - attestations_v5, - sync_contributions, - attester_slashings_v5, - proposer_slashings_v5, - voluntary_exits_v5, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - // Load the persisted fork choice so we can grab the state of the justified block and use - // it to verify the stored attestations, slashings and exits. - let fork_choice = db - .get_item::(&FORK_CHOICE_DB_KEY)? - .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; - let justified_block_root = fork_choice - .fork_choice_store - .unrealized_justified_checkpoint - .root; - let justified_block = db - .get_blinded_block(&justified_block_root)? - .ok_or_else(|| { - Error::SchemaMigrationError(format!( - "unrealized justified block missing for migration: {justified_block_root:?}", - )) - })?; - let justified_state_root = justified_block.state_root(); - let mut state = db - .get_state(&justified_state_root, Some(justified_block.slot()))? - .ok_or_else(|| { - Error::SchemaMigrationError(format!( - "justified state missing for migration: {justified_state_root:?}" - )) - })?; - state.build_all_committee_caches(spec).map_err(|e| { - Error::SchemaMigrationError(format!("unable to build committee caches: {e:?}")) - })?; - - // Re-verify attestations while adding attesting indices. - let attestations = attestations_v5 - .into_iter() - .flat_map(|(_, attestations)| attestations) - .filter_map(|attestation| { - let res = state - .get_beacon_committee(attestation.data.slot, attestation.data.index) - .map_err(Into::into) - .and_then(|committee| get_indexed_attestation(committee.committee, &attestation)) - .and_then(|indexed_attestation| { - is_valid_indexed_attestation( - &state, - &indexed_attestation, - VerifySignatures::True, - spec, - )?; - Ok(indexed_attestation) - }); - - match res { - Ok(indexed) => Some((attestation, indexed.attesting_indices.into())), - Err(e) => { - debug!( - log, - "Dropping attestation on migration"; - "err" => ?e, - "head_block" => ?attestation.data.beacon_block_root, - ); - None - } - } - }) - .collect::>(); - - let attester_slashings = attester_slashings_v5 - .iter() - .filter_map(|(slashing, _)| { - slashing - .clone() - .validate(&state, spec) - .map_err(|e| { - debug!( - log, - "Dropping attester slashing on migration"; - "err" => ?e, - "slashing" => ?slashing, - ); - }) - .ok() - }) - .collect::>(); - - let proposer_slashings = proposer_slashings_v5 - .iter() - .filter_map(|slashing| { - slashing - .clone() - .validate(&state, spec) - .map_err(|e| { - debug!( - log, - "Dropping proposer slashing on migration"; - "err" => ?e, - "slashing" => ?slashing, - ); - }) - .ok() - }) - .collect::>(); - - let voluntary_exits = voluntary_exits_v5 - .iter() - .filter_map(|exit| { - exit.clone() - .validate(&state, spec) - .map_err(|e| { - debug!( - log, - "Dropping voluntary exit on migration"; - "err" => ?e, - "exit" => ?exit, - ); - }) - .ok() - }) - .collect::>(); - - debug!( - log, - "Migrated op pool"; - "attestations" => attestations.len(), - "attester_slashings" => attester_slashings.len(), - "proposer_slashings" => proposer_slashings.len(), - "voluntary_exits" => voluntary_exits.len() - ); - - let v12 = PersistedOperationPool::V12(PersistedOperationPoolV12 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - }); - Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v12( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V12 op pool and transform it to V5. - let Some(PersistedOperationPoolV12:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - info!( - log, - "Dropping attestations from pool"; - "count" => attestations.len(), - ); - - let attester_slashings_v5 = attester_slashings - .into_iter() - .filter_map(|slashing| { - let fork_version = slashing.first_fork_verified_against()?; - Some((slashing.into_inner(), fork_version)) - }) - .collect::>(); - - let proposer_slashings_v5 = proposer_slashings - .into_iter() - .map(|slashing| slashing.into_inner()) - .collect::>(); - - let voluntary_exits_v5 = voluntary_exits - .into_iter() - .map(|exit| exit.into_inner()) - .collect::>(); - - info!( - log, - "Migrated slashings and exits"; - "attester_slashings" => attester_slashings_v5.len(), - "proposer_slashings" => proposer_slashings_v5.len(), - "voluntary_exits" => voluntary_exits_v5.len(), - ); - - let v5 = PersistedOperationPoolV5 { - attestations_v5: vec![], - sync_contributions, - attester_slashings_v5, - proposer_slashings_v5, - voluntary_exits_v5, - }; - Ok(vec![v5.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs deleted file mode 100644 index d4ac9746032..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v13.rs +++ /dev/null @@ -1,150 +0,0 @@ -use crate::eth1_chain::SszEth1; -use eth1::{BlockCache, SszDepositCacheV1, SszDepositCacheV13, SszEth1CacheV1, SszEth1CacheV13}; -use ssz::{Decode, Encode}; -use state_processing::common::DepositDataTree; -use store::Error; -use types::DEPOSIT_TREE_DEPTH; - -pub fn update_eth1_cache(persisted_eth1_v1: SszEth1) -> Result { - if persisted_eth1_v1.use_dummy_backend { - // backend_bytes is empty when using dummy backend - return Ok(persisted_eth1_v1); - } - - let SszEth1 { - use_dummy_backend, - backend_bytes, - } = persisted_eth1_v1; - - let ssz_eth1_cache_v1 = SszEth1CacheV1::from_ssz_bytes(&backend_bytes)?; - let SszEth1CacheV1 { - block_cache, - deposit_cache: deposit_cache_v1, - last_processed_block, - } = ssz_eth1_cache_v1; - - let SszDepositCacheV1 { - logs, - leaves, - deposit_contract_deploy_block, - deposit_roots, - } = deposit_cache_v1; - - let deposit_cache_v13 = SszDepositCacheV13 { - logs, - leaves, - deposit_contract_deploy_block, - finalized_deposit_count: 0, - finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), - deposit_tree_snapshot: None, - deposit_roots, - }; - - let ssz_eth1_cache_v13 = SszEth1CacheV13 { - block_cache, - deposit_cache: deposit_cache_v13, - last_processed_block, - }; - - let persisted_eth1_v13 = SszEth1 { - use_dummy_backend, - backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), - }; - - Ok(persisted_eth1_v13) -} - -pub fn downgrade_eth1_cache(persisted_eth1_v13: SszEth1) -> Result, Error> { - if persisted_eth1_v13.use_dummy_backend { - // backend_bytes is empty when using dummy backend - return Ok(Some(persisted_eth1_v13)); - } - - let SszEth1 { - use_dummy_backend, - backend_bytes, - } = persisted_eth1_v13; - - let ssz_eth1_cache_v13 = SszEth1CacheV13::from_ssz_bytes(&backend_bytes)?; - let SszEth1CacheV13 { - block_cache, - deposit_cache: deposit_cache_v13, - last_processed_block, - } = ssz_eth1_cache_v13; - - let SszDepositCacheV13 { - logs, - leaves, - deposit_contract_deploy_block, - finalized_deposit_count, - finalized_block_height: _, - deposit_tree_snapshot, - deposit_roots, - } = deposit_cache_v13; - - if finalized_deposit_count == 0 && deposit_tree_snapshot.is_none() { - // This tree was never finalized and can be directly downgraded to v1 without re-initializing - let deposit_cache_v1 = SszDepositCacheV1 { - logs, - leaves, - deposit_contract_deploy_block, - deposit_roots, - }; - let ssz_eth1_cache_v1 = SszEth1CacheV1 { - block_cache, - deposit_cache: deposit_cache_v1, - last_processed_block, - }; - return Ok(Some(SszEth1 { - use_dummy_backend, - backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), - })); - } - // deposit cache was finalized; can't downgrade - Ok(None) -} - -pub fn reinitialized_eth1_cache_v13(deposit_contract_deploy_block: u64) -> SszEth1 { - let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let deposit_cache_v13 = SszDepositCacheV13 { - logs: vec![], - leaves: vec![], - deposit_contract_deploy_block, - finalized_deposit_count: 0, - finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), - deposit_tree_snapshot: empty_tree.get_snapshot(), - deposit_roots: vec![empty_tree.root()], - }; - - let ssz_eth1_cache_v13 = SszEth1CacheV13 { - block_cache: BlockCache::default(), - deposit_cache: deposit_cache_v13, - last_processed_block: None, - }; - - SszEth1 { - use_dummy_backend: false, - backend_bytes: ssz_eth1_cache_v13.as_ssz_bytes(), - } -} - -pub fn reinitialized_eth1_cache_v1(deposit_contract_deploy_block: u64) -> SszEth1 { - let empty_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let deposit_cache_v1 = SszDepositCacheV1 { - logs: vec![], - leaves: vec![], - deposit_contract_deploy_block, - deposit_roots: vec![empty_tree.root()], - }; - - let ssz_eth1_cache_v1 = SszEth1CacheV1 { - block_cache: BlockCache::default(), - deposit_cache: deposit_cache_v1, - last_processed_block: None, - }; - - SszEth1 { - use_dummy_backend: false, - backend_bytes: ssz_eth1_cache_v1.as_ssz_bytes(), - } -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs deleted file mode 100644 index 52a990dc6e5..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs +++ /dev/null @@ -1,118 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; -use operation_pool::{ - PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14, -}; -use slog::{debug, error, info, Logger}; -use slot_clock::SlotClock; -use std::sync::Arc; -use std::time::Duration; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; -use types::{EthSpec, Hash256, Slot}; - -/// The slot clock isn't usually available before the database is initialized, so we construct a -/// temporary slot clock by reading the genesis state. It should always exist if the database is -/// initialized at a prior schema version, however we still handle the lack of genesis state -/// gracefully. -fn get_slot_clock( - db: &HotColdDB, - log: &Logger, -) -> Result, Error> { - let spec = db.get_chain_spec(); - let Some(genesis_block) = db.get_blinded_block(&Hash256::zero())? else { - error!(log, "Missing genesis block"); - return Ok(None); - }; - let Some(genesis_state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? else { - error!(log, "Missing genesis state"; "state_root" => ?genesis_block.state_root()); - return Ok(None); - }; - Ok(Some(T::SlotClock::new( - spec.genesis_slot, - Duration::from_secs(genesis_state.genesis_time()), - Duration::from_secs(spec.seconds_per_slot), - ))) -} - -pub fn upgrade_to_v14( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V12 op pool and transform it to V14. - let Some(PersistedOperationPoolV12:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - // initialize with empty vector - let bls_to_execution_changes = vec![]; - let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - }); - Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v14( - db: Arc>, - log: Logger, -) -> Result, Error> { - // We cannot downgrade from V14 once the Capella fork has been reached because there will - // be HistoricalSummaries stored in the database instead of HistoricalRoots and prior versions - // of Lighthouse can't handle that. - if let Some(capella_fork_epoch) = db.get_chain_spec().capella_fork_epoch { - let current_epoch = get_slot_clock::(&db, &log)? - .and_then(|clock| clock.now()) - .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) - .ok_or(Error::SlotClockUnavailableForMigration)?; - - if current_epoch >= capella_fork_epoch { - error!( - log, - "Capella already active: v14+ is mandatory"; - "current_epoch" => current_epoch, - "capella_fork_epoch" => capella_fork_epoch, - ); - return Err(Error::UnableToDowngrade); - } - } - - // Load a V14 op pool and transform it to V12. - let Some(PersistedOperationPoolV14:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - info!( - log, - "Dropping bls_to_execution_changes from pool"; - "count" => bls_to_execution_changes.len(), - ); - - let v12 = PersistedOperationPoolV12 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - }; - Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs deleted file mode 100644 index 0eb2c5fa3fc..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs +++ /dev/null @@ -1,74 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY}; -use operation_pool::{ - PersistedOperationPool, PersistedOperationPoolV14, PersistedOperationPoolV15, -}; -use slog::{debug, info, Logger}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; - -pub fn upgrade_to_v15( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V14 op pool and transform it to V15. - let Some(PersistedOperationPoolV14:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - let v15 = PersistedOperationPool::V15(PersistedOperationPoolV15 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - // Initialize with empty set - capella_bls_change_broadcast_indices: <_>::default(), - }); - Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) -} - -pub fn downgrade_from_v15( - db: Arc>, - log: Logger, -) -> Result, Error> { - // Load a V15 op pool and transform it to V14. - let Some(PersistedOperationPoolV15:: { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - capella_bls_change_broadcast_indices, - }) = db.get_item(&OP_POOL_DB_KEY)? - else { - debug!(log, "Nothing to do, no operation pool stored"); - return Ok(vec![]); - }; - - info!( - log, - "Forgetting address changes for Capella broadcast"; - "count" => capella_bls_change_broadcast_indices.len(), - ); - - let v14 = PersistedOperationPoolV14 { - attestations, - sync_contributions, - attester_slashings, - proposer_slashings, - voluntary_exits, - bls_to_execution_changes, - }; - Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) -} diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs deleted file mode 100644 index 230573b0288..00000000000 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs +++ /dev/null @@ -1,46 +0,0 @@ -use crate::beacon_chain::{BeaconChainTypes, FORK_CHOICE_DB_KEY}; -use crate::persisted_fork_choice::PersistedForkChoiceV11; -use slog::{debug, Logger}; -use std::sync::Arc; -use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem}; - -pub fn upgrade_to_v16( - db: Arc>, - log: Logger, -) -> Result, Error> { - drop_balances_cache::(db, log) -} - -pub fn downgrade_from_v16( - db: Arc>, - log: Logger, -) -> Result, Error> { - drop_balances_cache::(db, log) -} - -/// Drop the balances cache from the fork choice store. -/// -/// There aren't any type-level changes in this schema migration, however the -/// way that we compute the `JustifiedBalances` has changed due to: -/// https://github.com/sigp/lighthouse/pull/3962 -pub fn drop_balances_cache( - db: Arc>, - log: Logger, -) -> Result, Error> { - let mut persisted_fork_choice = db - .get_item::(&FORK_CHOICE_DB_KEY)? - .ok_or_else(|| Error::SchemaMigrationError("fork choice missing from database".into()))?; - - debug!( - log, - "Dropping fork choice balances cache"; - "item_count" => persisted_fork_choice.fork_choice_store.balances_cache.items.len() - ); - - // Drop all items in the balances cache. - persisted_fork_choice.fork_choice_store.balances_cache = <_>::default(); - - let kv_op = persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY); - - Ok(vec![kv_op]) -} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 333318f52f2..23af0c81261 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -6,7 +6,7 @@ pub use crate::{ beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, sync_committee_verification::Error as SyncCommitteeError, - validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, + validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification, }; use crate::{ @@ -181,6 +181,7 @@ pub struct Builder { execution_layer: Option>, mock_execution_layer: Option>, testing_slot_clock: Option, + validator_monitor_config: Option, runtime: TestRuntime, log: Logger, } @@ -316,6 +317,7 @@ where execution_layer: None, mock_execution_layer: None, testing_slot_clock: None, + validator_monitor_config: None, runtime, log, } @@ -388,6 +390,14 @@ where self } + pub fn validator_monitor_config( + mut self, + validator_monitor_config: ValidatorMonitorConfig, + ) -> Self { + self.validator_monitor_config = Some(validator_monitor_config); + self + } + /// Purposefully replace the `store_mutator`. pub fn override_store_mutator(mut self, mutator: BoxedMutator) -> Self { assert!(self.store_mutator.is_some(), "store mutator not set"); @@ -494,11 +504,13 @@ where let validator_keypairs = self .validator_keypairs .expect("cannot build without validator keypairs"); - let chain_config = self.chain_config.unwrap_or_default(); let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) .unwrap(); + let validator_monitor_config = self.validator_monitor_config.unwrap_or_default(); + + let chain_config = self.chain_config.unwrap_or_default(); let mut builder = BeaconChainBuilder::new(self.eth_spec_instance) .logger(log.clone()) .custom_spec(spec) @@ -518,7 +530,7 @@ where log.clone(), 5, ))) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) + .validator_monitor_config(validator_monitor_config) .trusted_setup(trusted_setup); builder = if let Some(mutator) = self.initial_mutator { diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 396aac71b07..8cea9c07693 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -2,10 +2,14 @@ //! //! This component should not affect consensus. +use crate::beacon_proposer_cache::{BeaconProposerCache, TYPICAL_SLOTS_PER_EPOCH}; use crate::metrics; -use parking_lot::RwLock; -use slog::{crit, debug, info, Logger}; +use itertools::Itertools; +use parking_lot::{Mutex, RwLock}; +use serde::{Deserialize, Serialize}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; +use smallvec::SmallVec; use state_processing::per_epoch_processing::{ errors::EpochProcessingError, EpochProcessingSummary, }; @@ -14,6 +18,7 @@ use std::convert::TryFrom; use std::io; use std::marker::PhantomData; use std::str::Utf8Error; +use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::AbstractExecPayload; use types::{ @@ -35,7 +40,34 @@ pub const HISTORIC_EPOCHS: usize = 10; /// Once the validator monitor reaches this number of validators it will stop /// tracking their metrics/logging individually in an effort to reduce /// Prometheus cardinality and log volume. -pub const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64; +const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64; + +/// Lag slots used in detecting missed blocks for the monitored validators +pub const MISSED_BLOCK_LAG_SLOTS: usize = 4; + +/// The number of epochs to look back when determining if a validator has missed a block. This value is used with +/// the beacon_proposer_cache to determine if a validator has missed a block. +/// And so, setting this value to anything higher than 1 is likely going to be problematic because the beacon_proposer_cache +/// is only populated for the current and the previous epoch. +pub const MISSED_BLOCK_LOOKBACK_EPOCHS: u64 = 1; + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +// Initial configuration values for the `ValidatorMonitor`. +pub struct ValidatorMonitorConfig { + pub auto_register: bool, + pub validators: Vec, + pub individual_tracking_threshold: usize, +} + +impl Default for ValidatorMonitorConfig { + fn default() -> Self { + Self { + auto_register: false, + validators: vec![], + individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, + } + } +} #[derive(Debug)] pub enum Error { @@ -323,6 +355,13 @@ impl MonitoredValidator { } } +#[derive(PartialEq, Hash, Eq)] +struct MissedBlock { + slot: Slot, + parent_root: Hash256, + validator_index: u64, +} + /// Holds a collection of `MonitoredValidator` and is notified about a variety of events on the P2P /// network, HTTP API and `BeaconChain`. /// @@ -343,26 +382,37 @@ pub struct ValidatorMonitor { /// large validator counts causing infeasibly high cardinailty for /// Prometheus and high log volumes. individual_tracking_threshold: usize, + /// A Map representing the (non-finalized) missed blocks by epoch, validator_index(state.validators) and slot + missed_blocks: HashSet, + // A beacon proposer cache + beacon_proposer_cache: Arc>, log: Logger, _phantom: PhantomData, } impl ValidatorMonitor { pub fn new( - pubkeys: Vec, - auto_register: bool, - individual_tracking_threshold: usize, + config: ValidatorMonitorConfig, + beacon_proposer_cache: Arc>, log: Logger, ) -> Self { + let ValidatorMonitorConfig { + auto_register, + validators, + individual_tracking_threshold, + } = config; + let mut s = Self { validators: <_>::default(), indices: <_>::default(), auto_register, individual_tracking_threshold, + missed_blocks: <_>::default(), + beacon_proposer_cache, log, _phantom: PhantomData, }; - for pubkey in pubkeys { + for pubkey in validators { s.add_validator_pubkey(pubkey) } s @@ -411,6 +461,9 @@ impl ValidatorMonitor { self.indices.insert(i, validator.pubkey); }); + // Add missed non-finalized blocks for the monitored validators + self.add_validators_missed_blocks(state); + // Update metrics for individual validators. for monitored_validator in self.validators.values() { if let Some(i) = monitored_validator.index { @@ -489,6 +542,116 @@ impl ValidatorMonitor { } } } + + // Prune missed blocks that are prior to last finalized epochs - MISSED_BLOCK_LOOKBACK_EPOCHS + let finalized_epoch = state.finalized_checkpoint().epoch; + self.missed_blocks.retain(|missed_block| { + let epoch = missed_block.slot.epoch(T::slots_per_epoch()); + epoch + Epoch::new(MISSED_BLOCK_LOOKBACK_EPOCHS) >= finalized_epoch + }); + } + + /// Add missed non-finalized blocks for the monitored validators + fn add_validators_missed_blocks(&mut self, state: &BeaconState) { + // Define range variables + let current_slot = state.slot(); + let current_epoch = current_slot.epoch(T::slots_per_epoch()); + // start_slot needs to be coherent with what can be retrieved from the beacon_proposer_cache + let start_slot = current_epoch.start_slot(T::slots_per_epoch()) + - Slot::new(MISSED_BLOCK_LOOKBACK_EPOCHS * T::slots_per_epoch()); + + let end_slot = current_slot.saturating_sub(MISSED_BLOCK_LAG_SLOTS).as_u64(); + + // List of proposers per epoch from the beacon_proposer_cache + let mut proposers_per_epoch: Option> = None; + + for (prev_slot, slot) in (start_slot.as_u64()..=end_slot) + .map(Slot::new) + .tuple_windows() + { + // Condition for missed_block is defined such as block_root(slot) == block_root(slot - 1) + // where the proposer who missed the block is the proposer of the block at block_root(slot) + if let (Ok(block_root), Ok(prev_block_root)) = + (state.get_block_root(slot), state.get_block_root(prev_slot)) + { + // Found missed block + if block_root == prev_block_root { + let slot_epoch = slot.epoch(T::slots_per_epoch()); + let prev_slot_epoch = prev_slot.epoch(T::slots_per_epoch()); + + if let Ok(shuffling_decision_block) = + state.proposer_shuffling_decision_root_at_epoch(slot_epoch, *block_root) + { + // Only update the cache if it needs to be initialised or because + // slot is at epoch + 1 + if proposers_per_epoch.is_none() || slot_epoch != prev_slot_epoch { + proposers_per_epoch = self.get_proposers_by_epoch_from_cache( + slot_epoch, + shuffling_decision_block, + ); + } + + // Only add missed blocks for the proposer if it's in the list of monitored validators + let slot_in_epoch = slot % T::slots_per_epoch(); + if let Some(proposer_index) = proposers_per_epoch + .as_deref() + .and_then(|proposers| proposers.get(slot_in_epoch.as_usize())) + { + let i = *proposer_index as u64; + if let Some(pub_key) = self.indices.get(&i) { + if let Some(validator) = self.validators.get(pub_key) { + let missed_block = MissedBlock { + slot, + parent_root: *prev_block_root, + validator_index: i, + }; + // Incr missed block counter for the validator only if it doesn't already exist in the hashset + if self.missed_blocks.insert(missed_block) { + self.aggregatable_metric(&validator.id, |label| { + metrics::inc_counter_vec( + &metrics::VALIDATOR_MONITOR_MISSED_BLOCKS_TOTAL, + &[label], + ); + }); + error!( + self.log, + "Validator missed a block"; + "index" => i, + "slot" => slot, + "parent block root" => ?prev_block_root, + ); + } + } + } else { + warn!( + self.log, + "Missing validator index"; + "info" => "potentially inconsistency in the validator manager", + "index" => i, + ) + } + } else { + debug!( + self.log, + "Could not get proposers from cache"; + "epoch" => ?slot_epoch + ); + } + } + } + } + } + } + + fn get_proposers_by_epoch_from_cache( + &mut self, + epoch: Epoch, + shuffling_decision_block: Hash256, + ) -> Option> { + let mut cache = self.beacon_proposer_cache.lock(); + cache + .get_epoch::(shuffling_decision_block, epoch) + .cloned() } /// Run `func` with the `TOTAL_LABEL` and optionally the @@ -822,6 +985,17 @@ impl ValidatorMonitor { } } + pub fn get_monitored_validator_missed_block_count(&self, validator_index: u64) -> u64 { + self.missed_blocks + .iter() + .filter(|missed_block| missed_block.validator_index == validator_index) + .count() as u64 + } + + pub fn get_beacon_proposer_cache(&self) -> Arc> { + self.beacon_proposer_cache.clone() + } + /// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`. /// Otherwise, do nothing. pub fn auto_register_local_validator(&mut self, validator_index: u64) { diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index a8ad75304b4..fdc37b55296 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -140,7 +140,7 @@ async fn produces_attestations() { available_block, ) = chain .data_availability_checker - .check_rpc_block_availability(rpc_block) + .verify_kzg_for_rpc_block(rpc_block) .unwrap() else { panic!("block should be available") @@ -218,7 +218,7 @@ async fn early_attester_cache_old_request() { harness .chain .data_availability_checker - .check_rpc_block_availability(rpc_block) + .verify_kzg_for_rpc_block(rpc_block) .unwrap() else { panic!("block should be available") diff --git a/beacon_node/beacon_chain/tests/main.rs b/beacon_node/beacon_chain/tests/main.rs index 332f6a48298..e0564e1510b 100644 --- a/beacon_node/beacon_chain/tests/main.rs +++ b/beacon_node/beacon_chain/tests/main.rs @@ -10,3 +10,4 @@ mod rewards; mod store_tests; mod sync_committee_verification; mod tests; +mod validator_monitor; diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index f4af490710d..f6cf40a3962 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -29,12 +29,13 @@ fn get_store(db_path: &TempDir) -> Arc { let spec = test_spec::(); let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); + let blobs_path = db_path.path().join("blobs_db"); let config = StoreConfig::default(); let log = NullLoggerBuilder.build().expect("logger should build"); HotColdDB::open( &hot_path, &cold_path, - None, + &blobs_path, |_, _, _| Ok(()), config, spec, diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 7c8f01cf55c..a78463ef5d7 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -219,6 +219,156 @@ async fn test_verify_attestation_rewards_base_inactivity_leak() { assert_eq!(expected_balances, balances); } +#[tokio::test] +async fn test_verify_attestation_rewards_base_inactivity_leak_justification_epoch() { + let spec = E::default_spec(); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2; + + // advance until beginning of epoch N + 2 + harness + .extend_chain( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(half_validators.clone()), + ) + .await; + + // advance to create first justification epoch and get initial balances + harness.extend_slots(E::slots_per_epoch() as usize).await; + target_epoch += 1; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + //assert previous_justified_checkpoint matches 0 as we were in inactivity leak from beginning + assert_eq!( + 0, + harness + .get_current_state() + .previous_justified_checkpoint() + .epoch + .as_u64() + ); + + // extend slots to beginning of epoch N + 1 + harness.extend_slots(E::slots_per_epoch() as usize).await; + + //assert target epoch and previous_justified_checkpoint match + assert_eq!( + target_epoch, + harness + .get_current_state() + .previous_justified_checkpoint() + .epoch + .as_u64() + ); + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert we successfully get ideal rewards for justified epoch out of inactivity leak + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation rewards to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + assert_eq!(expected_balances, balances); +} + +#[tokio::test] +async fn test_verify_attestation_rewards_altair() { + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec.clone()); + let target_epoch = 0; + + // advance until epoch N + 1 and get initial balances + harness + .extend_slots((E::slots_per_epoch() * (target_epoch + 1)) as usize) + .await; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map: HashMap = HashMap::new(); + let mut sync_committee_rewards_map: HashMap = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward( + signed_block.message(), + signed_block.canonical_root(), + &mut state, + ) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .get(&beacon_block_reward.proposer_index) + .unwrap_or(&0u64) + + beacon_block_reward.total; + + proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + reward_payload.iter().for_each(|reward| { + let mut amount = *sync_committee_rewards_map + .get(&reward.validator_index) + .unwrap_or(&0); + amount += reward.reward; + sync_committee_rewards_map.insert(reward.validator_index, amount); + }); + + harness.extend_slots(1).await; + } + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert ideal rewards are greater than 0 + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); + let expected_balances = + apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + + assert_eq!(expected_balances, balances); +} + #[tokio::test] async fn test_verify_attestation_rewards_altair_inactivity_leak() { let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); @@ -313,6 +463,115 @@ async fn test_verify_attestation_rewards_altair_inactivity_leak() { assert_eq!(expected_balances, balances); } +#[tokio::test] +async fn test_verify_attestation_rewards_altair_inactivity_leak_justification_epoch() { + let spec = ForkName::Altair.make_genesis_spec(E::default_spec()); + let harness = get_harness(spec.clone()); + + let half = VALIDATOR_COUNT / 2; + let half_validators: Vec = (0..half).collect(); + // target epoch is the epoch where the chain enters inactivity leak + 1 + let mut target_epoch = &spec.min_epochs_to_inactivity_penalty + 2; + + // advance until beginning of epoch N + 1 + harness + .extend_slots_some_validators( + (E::slots_per_epoch() * (target_epoch + 1)) as usize, + half_validators.clone(), + ) + .await; + + let validator_inactivity_score = harness + .get_current_state() + .get_inactivity_score(VALIDATOR_COUNT - 1) + .unwrap(); + + //assert to ensure we are in inactivity leak + assert_eq!(4, validator_inactivity_score); + + // advance for first justification epoch and get balances + harness.extend_slots(E::slots_per_epoch() as usize).await; + target_epoch += 1; + let initial_balances: Vec = harness.get_current_state().balances().clone().into(); + + // advance until epoch N + 2 and build proposal rewards map + let mut proposal_rewards_map: HashMap = HashMap::new(); + let mut sync_committee_rewards_map: HashMap = HashMap::new(); + for _ in 0..E::slots_per_epoch() { + let state = harness.get_current_state(); + let slot = state.slot() + Slot::new(1); + + // calculate beacon block rewards / penalties + let ((signed_block, _maybe_blob_sidecars), mut state) = + harness.make_block_return_pre_state(state, slot).await; + let beacon_block_reward = harness + .chain + .compute_beacon_block_reward( + signed_block.message(), + signed_block.canonical_root(), + &mut state, + ) + .unwrap(); + + let total_proposer_reward = proposal_rewards_map + .get(&beacon_block_reward.proposer_index) + .unwrap_or(&0u64) + + beacon_block_reward.total; + + proposal_rewards_map.insert(beacon_block_reward.proposer_index, total_proposer_reward); + + // calculate sync committee rewards / penalties + let reward_payload = harness + .chain + .compute_sync_committee_rewards(signed_block.message(), &mut state) + .unwrap(); + + reward_payload.iter().for_each(|reward| { + let mut amount = *sync_committee_rewards_map + .get(&reward.validator_index) + .unwrap_or(&0); + amount += reward.reward; + sync_committee_rewards_map.insert(reward.validator_index, amount); + }); + + harness.extend_slots(1).await; + } + + //assert target epoch and previous_justified_checkpoint match + assert_eq!( + target_epoch, + harness + .get_current_state() + .previous_justified_checkpoint() + .epoch + .as_u64() + ); + + // compute reward deltas for all validators in epoch N + let StandardAttestationRewards { + ideal_rewards, + total_rewards, + } = harness + .chain + .compute_attestation_rewards(Epoch::new(target_epoch), vec![]) + .unwrap(); + + // assert ideal rewards are greater than 0 + assert!(ideal_rewards + .iter() + .all(|reward| reward.head > 0 && reward.target > 0 && reward.source > 0)); + + // apply attestation, proposal, and sync committee rewards and penalties to initial balances + let expected_balances = apply_attestation_rewards(&initial_balances, total_rewards); + let expected_balances = apply_beacon_block_rewards(&proposal_rewards_map, expected_balances); + let expected_balances = + apply_sync_committee_rewards(&sync_committee_rewards_map, expected_balances); + + // verify expected balances against actual balances + let balances: Vec = harness.get_current_state().balances().clone().into(); + assert_eq!(expected_balances, balances); +} + #[tokio::test] async fn test_verify_attestation_rewards_base_subset_only() { let harness = get_harness(E::default_spec()); diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 59a4c1decca..9f7199cf3cf 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -8,7 +8,6 @@ use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, }; -use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use beacon_chain::{ data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, @@ -32,7 +31,7 @@ use store::{ chunked_vector::{chunk_key, Field}, get_key_for_col, iter::{BlockRootsIterator, StateRootsIterator}, - DBColumn, HotColdDB, KeyValueStore, KeyValueStoreOp, LevelDB, StoreConfig, + BlobInfo, DBColumn, HotColdDB, KeyValueStore, KeyValueStoreOp, LevelDB, StoreConfig, }; use tempfile::{tempdir, TempDir}; use tokio::time::sleep; @@ -63,12 +62,13 @@ fn get_store_generic( ) -> Arc, LevelDB>> { let hot_path = db_path.path().join("hot_db"); let cold_path = db_path.path().join("cold_db"); + let blobs_path = db_path.path().join("blobs_db"); let log = test_logger(); HotColdDB::open( &hot_path, &cold_path, - None, + &blobs_path, |_, _, _| Ok(()), config, spec, @@ -2358,6 +2358,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { Duration::from_secs(seconds_per_slot), ); slot_clock.set_slot(harness.get_current_slot().as_u64()); + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) .store(store.clone()) .custom_spec(test_spec::()) @@ -2376,7 +2377,6 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { 1, ))) .execution_layer(Some(mock.el)) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) .trusted_setup(trusted_setup) .build() .expect("should build"); @@ -2465,10 +2465,10 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { if let MaybeAvailableBlock::Available(block) = harness .chain .data_availability_checker - .check_rpc_block_availability( + .verify_kzg_for_rpc_block( RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), ) - .expect("should check availability") + .expect("should verify kzg") { available_blocks.push(block); } @@ -2967,10 +2967,8 @@ async fn schema_downgrade_to_min_version() { // Can't downgrade beyond V18 once Deneb is reached, for simplicity don't test that // at all if Deneb is enabled. SchemaVersion(18) - } else if harness.spec.capella_fork_epoch.is_some() { - SchemaVersion(14) } else { - SchemaVersion(11) + SchemaVersion(16) }; // Save the slot clock so that the new harness doesn't revert in time. @@ -3279,6 +3277,40 @@ async fn deneb_prune_blobs_margin_test(margin: u64) { check_blob_existence(&harness, oldest_blob_slot, harness.head_slot(), true); } +/// Check that a database with `blobs_db=false` can be upgraded to `blobs_db=true` before Deneb. +#[tokio::test] +async fn change_to_separate_blobs_db_before_deneb() { + let db_path = tempdir().unwrap(); + let store = get_store(&db_path); + + // Only run this test on forks prior to Deneb. If the blobs database already has blobs, we can't + // move it. + if store.get_chain_spec().deneb_fork_epoch.is_some() { + return; + } + + let init_blob_info = store.get_blob_info(); + assert!( + init_blob_info.blobs_db, + "separate blobs DB should be the default" + ); + + // Change to `blobs_db=false` to emulate legacy Deneb DB. + let legacy_blob_info = BlobInfo { + blobs_db: false, + ..init_blob_info + }; + store + .compare_and_set_blob_info_with_write(init_blob_info.clone(), legacy_blob_info.clone()) + .unwrap(); + assert_eq!(store.get_blob_info(), legacy_blob_info); + + // Re-open the DB and check that `blobs_db` gets changed back to true. + drop(store); + let store = get_store(&db_path); + assert_eq!(store.get_blob_info(), init_blob_info); +} + /// Check that there are blob sidecars (or not) at every slot in the range. fn check_blob_existence( harness: &TestHarness, diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs new file mode 100644 index 00000000000..5bc6b758c2d --- /dev/null +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -0,0 +1,299 @@ +use lazy_static::lazy_static; + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, +}; +use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; +use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; + +// Should ideally be divisible by 3. +pub const VALIDATOR_COUNT: usize = 48; + +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); +} + +type E = MainnetEthSpec; + +fn get_harness( + validator_count: usize, + validator_indexes_to_monitor: Vec, +) -> BeaconChainHarness> { + let harness = BeaconChainHarness::builder(MainnetEthSpec) + .default_spec() + .keypairs(KEYPAIRS[0..validator_count].to_vec()) + .fresh_ephemeral_store() + .mock_execution_layer() + .validator_monitor_config(ValidatorMonitorConfig { + validators: validator_indexes_to_monitor + .iter() + .map(|i| PublicKeyBytes::from(KEYPAIRS[*i].pk.clone())) + .collect(), + ..<_>::default() + }) + .build(); + + harness.advance_slot(); + + harness +} + +#[tokio::test] +async fn produces_missed_blocks() { + let validator_count = 16; + + let slots_per_epoch = E::slots_per_epoch(); + + let nb_epoch_to_simulate = Epoch::new(2); + + // Generate 63 slots (2 epochs * 32 slots per epoch - 1) + let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1; + + // The validator index of the validator that is 'supposed' to miss a block + let mut validator_index_to_monitor = 1; + + // 1st scenario // + // + // Missed block happens when slot and prev_slot are in the same epoch + let harness1 = get_harness(validator_count, vec![validator_index_to_monitor]); + harness1 + .extend_chain( + initial_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut _state = &mut harness1.get_current_state(); + let mut epoch = _state.current_epoch(); + + // We have a total of 63 slots and we want slot 57 to be a missed block + // and this is slot=25 in epoch=1 + let mut idx = initial_blocks - 6; + let mut slot = Slot::new(idx); + let mut slot_in_epoch = slot % slots_per_epoch; + let mut prev_slot = Slot::new(idx - 1); + let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap(); + let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap(); + let mut validator_index = validator_indexes[slot_in_epoch.as_usize()]; + let mut proposer_shuffling_decision_root = _state + .proposer_shuffling_decision_root(duplicate_block_root) + .unwrap(); + + let beacon_proposer_cache = harness1 + .chain + .validator_monitor + .read() + .get_beacon_proposer_cache(); + + // Let's fill the cache with the proposers for the current epoch + // and push the duplicate_block_root to the block_roots vector + assert_eq!( + beacon_proposer_cache.lock().insert( + epoch, + proposer_shuffling_decision_root, + validator_indexes.into_iter().collect::>(), + _state.fork() + ), + Ok(()) + ); + + // Modify the block root of the previous slot to be the same as the block root of the current slot + // in order to simulate a missed block + assert_eq!( + _state.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + { + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + let mut validator_monitor = harness1.chain.validator_monitor.write(); + validator_monitor.process_valid_state(nb_epoch_to_simulate, _state); + + // We should have one entry in the missed blocks map + assert_eq!( + validator_monitor.get_monitored_validator_missed_block_count(validator_index as u64), + 1 + ); + } + + // 2nd scenario // + // + // Missed block happens when slot and prev_slot are not in the same epoch + // making sure that the cache reloads when the epoch changes + // in that scenario the slot that missed a block is the first slot of the epoch + validator_index_to_monitor = 7; + // We are adding other validators to monitor as thoses one will miss a block depending on + // the fork name specified when running the test as the proposer cache differs depending on the fork name (cf. seed) + let validator_index_to_monitor_altair = 2; + // Same as above but for the merge upgrade + let validator_index_to_monitor_merge = 4; + // Same as above but for the capella upgrade + let validator_index_to_monitor_capella = 11; + // Same as above but for the deneb upgrade + let validator_index_to_monitor_deneb = 3; + let harness2 = get_harness( + validator_count, + vec![ + validator_index_to_monitor, + validator_index_to_monitor_altair, + validator_index_to_monitor_merge, + validator_index_to_monitor_capella, + validator_index_to_monitor_deneb, + ], + ); + let advance_slot_by = 9; + harness2 + .extend_chain( + (initial_blocks + advance_slot_by) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut _state2 = &mut harness2.get_current_state(); + epoch = _state2.current_epoch(); + + // We have a total of 72 slots and we want slot 64 to be the missed block + // and this is slot=64 in epoch=2 + idx = initial_blocks + (advance_slot_by) - 8; + slot = Slot::new(idx); + prev_slot = Slot::new(idx - 1); + slot_in_epoch = slot % slots_per_epoch; + duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); + validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); + validator_index = validator_indexes[slot_in_epoch.as_usize()]; + + let beacon_proposer_cache = harness2 + .chain + .validator_monitor + .read() + .get_beacon_proposer_cache(); + + // Let's fill the cache with the proposers for the current epoch + // and push the duplicate_block_root to the block_roots vector + assert_eq!( + beacon_proposer_cache.lock().insert( + epoch, + duplicate_block_root, + validator_indexes.into_iter().collect::>(), + _state2.fork() + ), + Ok(()) + ); + + assert_eq!( + _state2.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + { + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + let mut validator_monitor2 = harness2.chain.validator_monitor.write(); + validator_monitor2.process_valid_state(epoch, _state2); + // We should have one entry in the missed blocks map + assert_eq!( + validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64), + 1 + ); + + // 3rd scenario // + // + // A missed block happens but the validator is not monitored + // it should not be flagged as a missed block + idx = initial_blocks + (advance_slot_by) - 7; + slot = Slot::new(idx); + prev_slot = Slot::new(idx - 1); + slot_in_epoch = slot % slots_per_epoch; + duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); + validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); + let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()]; + + assert_eq!( + _state2.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + validator_monitor2.process_valid_state(epoch, _state2); + + // We shouldn't have any entry in the missed blocks map + assert_ne!(validator_index, not_monitored_validator_index); + assert_eq!( + validator_monitor2 + .get_monitored_validator_missed_block_count(not_monitored_validator_index as u64), + 0 + ); + } + + // 4th scenario // + // + // A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH + // it shouldn't be flagged as a missed block + let harness3 = get_harness(validator_count, vec![validator_index_to_monitor]); + harness3 + .extend_chain( + slots_per_epoch as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let mut _state3 = &mut harness3.get_current_state(); + epoch = _state3.current_epoch(); + + // We have a total of 32 slots and we want slot 30 to be a missed block + // and this is slot=30 in epoch=0 + idx = slots_per_epoch - MISSED_BLOCK_LAG_SLOTS as u64 + 2; + slot = Slot::new(idx); + slot_in_epoch = slot % slots_per_epoch; + prev_slot = Slot::new(idx - 1); + duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap(); + validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap(); + validator_index = validator_indexes[slot_in_epoch.as_usize()]; + proposer_shuffling_decision_root = _state3 + .proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root) + .unwrap(); + + let beacon_proposer_cache = harness3 + .chain + .validator_monitor + .read() + .get_beacon_proposer_cache(); + + // Let's fill the cache with the proposers for the current epoch + // and push the duplicate_block_root to the block_roots vector + assert_eq!( + beacon_proposer_cache.lock().insert( + epoch, + proposer_shuffling_decision_root, + validator_indexes.into_iter().collect::>(), + _state3.fork() + ), + Ok(()) + ); + + // Modify the block root of the previous slot to be the same as the block root of the current slot + // in order to simulate a missed block + assert_eq!( + _state3.set_block_root(prev_slot, duplicate_block_root), + Ok(()) + ); + + { + // Let's validate the state which will call the function responsible for + // adding the missed blocks to the validator monitor + let mut validator_monitor3 = harness3.chain.validator_monitor.write(); + validator_monitor3.process_valid_state(epoch, _state3); + + // We shouldn't have one entry in the missed blocks map + assert_eq!( + validator_monitor3.get_monitored_validator_missed_block_count(validator_index as u64), + 0 + ); + } +} diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index d1184cf75de..cedf347b9a8 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -191,15 +191,7 @@ where .graffiti(graffiti) .event_handler(event_handler) .execution_layer(execution_layer) - .monitor_validators( - config.validator_monitor_auto, - config.validator_monitor_pubkeys.clone(), - config.validator_monitor_individual_tracking_threshold, - runtime_context - .service_context("val_mon".to_string()) - .log() - .clone(), - ); + .validator_monitor_config(config.validator_monitor.clone()); let builder = if let Some(slasher) = self.slasher.clone() { builder.slasher(slasher) @@ -909,7 +901,7 @@ where mut self, hot_path: &Path, cold_path: &Path, - blobs_path: Option, + blobs_path: &Path, config: StoreConfig, log: Logger, ) -> Result { diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 8b47d0fc622..20afdb948bb 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,4 +1,4 @@ -use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; +use beacon_chain::validator_monitor::ValidatorMonitorConfig; use beacon_chain::TrustedSetup; use beacon_processor::BeaconProcessorConfig; use directory::DEFAULT_ROOT_DIR; @@ -9,9 +9,12 @@ use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; use std::time::Duration; -use types::{Graffiti, PublicKeyBytes}; +use types::Graffiti; + /// Default directory name for the freezer database under the top-level data dir. const DEFAULT_FREEZER_DB_DIR: &str = "freezer_db"; +/// Default directory name for the blobs database under the top-level data dir. +const DEFAULT_BLOBS_DB_DIR: &str = "blobs_db"; /// Defines how the client should initialize the `BeaconChain` and other components. #[derive(Debug, Clone, Serialize, Deserialize, Default)] @@ -56,15 +59,7 @@ pub struct Config { pub sync_eth1_chain: bool, /// Graffiti to be inserted everytime we create a block. pub graffiti: Graffiti, - /// When true, automatically monitor validators using the HTTP API. - pub validator_monitor_auto: bool, - /// A list of validator pubkeys to monitor. - pub validator_monitor_pubkeys: Vec, - /// Once the number of monitored validators goes above this threshold, we - /// will stop tracking metrics on a per-validator basis. This prevents large - /// validator counts causing infeasibly high cardinailty for Prometheus and - /// high log volumes. - pub validator_monitor_individual_tracking_threshold: usize, + pub validator_monitor: ValidatorMonitorConfig, #[serde(skip)] /// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined /// via the CLI at runtime, instead of from a configuration file saved to disk. @@ -107,9 +102,7 @@ impl Default for Config { http_metrics: <_>::default(), monitoring_api: None, slasher: None, - validator_monitor_auto: false, - validator_monitor_pubkeys: vec![], - validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, + validator_monitor: <_>::default(), logger_config: LoggerConfig::default(), beacon_processor: <_>::default(), genesis_state_url: <_>::default(), @@ -156,12 +149,19 @@ impl Config { .unwrap_or_else(|| self.default_freezer_db_path()) } + /// Fetch default path to use for the blobs database. + fn default_blobs_db_path(&self) -> PathBuf { + self.get_data_dir().join(DEFAULT_BLOBS_DB_DIR) + } + /// Returns the path to which the client may initialize the on-disk blobs database. /// /// Will attempt to use the user-supplied path from e.g. the CLI, or will default /// to None. - pub fn get_blobs_db_path(&self) -> Option { - self.blobs_db_path.clone() + pub fn get_blobs_db_path(&self) -> PathBuf { + self.blobs_db_path + .clone() + .unwrap_or_else(|| self.default_blobs_db_path()) } /// Get the freezer DB path, creating it if necessary. @@ -170,11 +170,8 @@ impl Config { } /// Get the blobs DB path, creating it if necessary. - pub fn create_blobs_db_path(&self) -> Result, String> { - match self.get_blobs_db_path() { - Some(blobs_db_path) => Ok(Some(ensure_dir_exists(blobs_db_path)?)), - None => Ok(None), - } + pub fn create_blobs_db_path(&self) -> Result { + ensure_dir_exists(self.get_blobs_db_path()) } /// Returns the "modern" path to the data_dir. diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 3f75d0042de..3993e442ad7 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -93,21 +93,15 @@ impl TryFrom> for ProvenancedPayload) -> Result { let block_proposal_contents = match value { BuilderBid::Merge(builder_bid) => BlockProposalContents::Payload { - payload: ExecutionPayloadHeader::Merge(builder_bid.header) - .try_into() - .map_err(|_| Error::InvalidPayloadConversion)?, + payload: ExecutionPayloadHeader::Merge(builder_bid.header).into(), block_value: builder_bid.value, }, BuilderBid::Capella(builder_bid) => BlockProposalContents::Payload { - payload: ExecutionPayloadHeader::Capella(builder_bid.header) - .try_into() - .map_err(|_| Error::InvalidPayloadConversion)?, + payload: ExecutionPayloadHeader::Capella(builder_bid.header).into(), block_value: builder_bid.value, }, BuilderBid::Deneb(builder_bid) => BlockProposalContents::PayloadAndBlobs { - payload: ExecutionPayloadHeader::Deneb(builder_bid.header) - .try_into() - .map_err(|_| Error::InvalidPayloadConversion)?, + payload: ExecutionPayloadHeader::Deneb(builder_bid.header).into(), block_value: builder_bid.value, kzg_commitments: builder_bid.blinded_blobs_bundle.commitments, blobs: BlobItems::::try_from_blob_roots( diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index cf0faf655a3..713ebb670c3 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -655,14 +655,17 @@ pub fn load_test_blobs_bundle() -> Result<(KzgCommitment, KzgProof, Ok(( commitments - .get(0) + .first() .cloned() .ok_or("commitment missing in test bundle")?, proofs - .get(0) + .first() .cloned() .ok_or("proof missing in test bundle")?, - blobs.get(0).cloned().ok_or("blob missing in test bundle")?, + blobs + .first() + .cloned() + .ok_or("blob missing in test bundle")?, )) } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index b7134e37c4a..fdba9f4741c 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -1,4 +1,4 @@ -pub use crate::{common::genesis_deposits, interop::interop_genesis_state}; +pub use crate::common::genesis_deposits; pub use eth1::Config as Eth1Config; use eth1::{DepositLog, Eth1Block, Service as Eth1Service}; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 309db204ae2..a6f9d9ffcec 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -569,12 +569,12 @@ pub fn serve( chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { let (root, execution_optimistic, finalized) = state_id.root(&chain)?; - Ok(root) - .map(api_types::RootData::from) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) + Ok(api_types::GenericResponse::from(api_types::RootData::from( + root, + ))) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -1940,8 +1940,8 @@ pub fn serve( .naive_aggregation_pool .read() .iter() - .cloned() - .filter(|att| query_filter(&att.data)), + .filter(|&att| query_filter(&att.data)) + .cloned(), ); Ok(api_types::GenericResponse::from(attestations)) }) @@ -2318,11 +2318,9 @@ pub fn serve( task_spawner.blocking_json_task(Priority::P1, move || { let (rewards, execution_optimistic, finalized) = standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; - Ok(rewards) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); @@ -2435,8 +2433,7 @@ pub fn serve( let execution_optimistic = chain.is_optimistic_or_invalid_head().unwrap_or_default(); - Ok(attestation_rewards) - .map(api_types::GenericResponse::from) + Ok(api_types::GenericResponse::from(attestation_rewards)) .map(|resp| resp.add_execution_optimistic(execution_optimistic)) }) }, @@ -2462,11 +2459,9 @@ pub fn serve( chain, block_id, validators, log, )?; - Ok(rewards) - .map(api_types::GenericResponse::from) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 708df39b4d6..c31dd9b1faa 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -97,12 +97,12 @@ fn try_proposer_duties_from_cache( let head = chain.canonical_head.cached_head(); let head_block = &head.snapshot.beacon_block; let head_block_root = head.head_block_root(); + let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); let head_decision_root = head .snapshot .beacon_state .proposer_shuffling_decision_root(head_block_root) .map_err(warp_utils::reject::beacon_state_error)?; - let head_epoch = head_block.slot().epoch(T::EthSpec::slots_per_epoch()); let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) .map_err(warp_utils::reject::beacon_chain_error)?; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d532859c798..c0aa4fe58b7 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -38,6 +38,8 @@ use types::{ MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot, }; +use eth2::types::ForkVersionedBeaconBlockType::{Blinded, Full}; + type E = MainnetEthSpec; const SECONDS_PER_SLOT: u64 = 12; @@ -2604,6 +2606,98 @@ impl ApiTester { self } + pub async fn test_block_production_v3_ssz(self) -> Self { + let fork = self.chain.canonical_head.cached_head().head_fork(); + let genesis_validators_root = self.chain.genesis_validators_root; + + for _ in 0..E::slots_per_epoch() * 3 { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let proposer_pubkey_bytes = self + .client + .get_validator_duties_proposer(epoch) + .await + .unwrap() + .data + .into_iter() + .find(|duty| duty.slot == slot) + .map(|duty| duty.pubkey) + .unwrap(); + let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap(); + + let sk = self + .validator_keypairs() + .iter() + .find(|kp| kp.pk == proposer_pubkey) + .map(|kp| kp.sk.clone()) + .unwrap(); + + let randao_reveal = { + let domain = self.chain.spec.get_domain( + epoch, + Domain::Randao, + &fork, + genesis_validators_root, + ); + let message = epoch.signing_root(domain); + sk.sign(message).into() + }; + + let (fork_version_response_bytes, is_blinded_payload) = self + .client + .get_validator_blocks_v3_ssz::(slot, &randao_reveal, None) + .await + .unwrap(); + + if is_blinded_payload { + let block_contents = >>::from_ssz_bytes( + &fork_version_response_bytes.unwrap(), + &self.chain.spec, + ) + .expect("block contents bytes can be decoded"); + + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blocks_ssz(&signed_block_contents) + .await + .unwrap(); + + // This converts the generic `Payload` to a concrete type for comparison. + let signed_block = signed_block_contents.deconstruct().0; + let head_block = SignedBeaconBlock::from(signed_block.clone()); + assert_eq!(head_block, signed_block); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } else { + let block_contents = >>::from_ssz_bytes( + &fork_version_response_bytes.unwrap(), + &self.chain.spec, + ) + .expect("block contents bytes can be decoded"); + + let signed_block_contents = + block_contents.sign(&sk, &fork, genesis_validators_root, &self.chain.spec); + + self.client + .post_beacon_blocks_ssz(&signed_block_contents) + .await + .unwrap(); + + assert_eq!( + self.chain.head_beacon_block().as_ref(), + signed_block_contents.signed_block() + ); + + self.chain.slot_clock.set_slot(slot.as_u64() + 1); + } + } + + self + } + pub async fn test_block_production_no_verify_randao(self) -> Self { for _ in 0..E::slots_per_epoch() { let slot = self.chain.slot().unwrap(); @@ -3360,6 +3454,36 @@ impl ApiTester { (proposer_index, randao_reveal) } + pub async fn test_payload_v3_respects_registration(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type { + Blinded(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Full(_) => panic!("Expecting a blinded payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 11_111_111); + + self + } + pub async fn test_payload_respects_registration(self) -> Self { let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); @@ -3434,6 +3558,42 @@ impl ApiTester { self } + pub async fn test_payload_v3_accepts_mutated_gas_limit(self) -> Self { + // Mutate gas limit. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::GasLimit(30_000_000)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type { + Blinded(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Full(_) => panic!("Expecting a blinded payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + assert_eq!(payload.gas_limit(), 30_000_000); + + self + } + pub async fn test_payload_accepts_changed_fee_recipient(self) -> Self { let test_fee_recipient = "0x4242424242424242424242424242424242424242" .parse::
() @@ -3475,6 +3635,44 @@ impl ApiTester { self } + pub async fn test_payload_v3_accepts_changed_fee_recipient(self) -> Self { + let test_fee_recipient = "0x4242424242424242424242424242424242424242" + .parse::
() + .unwrap(); + + // Mutate fee recipient. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::FeeRecipient(test_fee_recipient)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: BlindedPayload = match payload_type { + Blinded(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Full(_) => panic!("Expecting a blinded payload"), + }; + + assert_eq!(payload.fee_recipient(), test_fee_recipient); + + self + } + pub async fn test_payload_rejects_invalid_parent_hash(self) -> Self { let invalid_parent_hash = "0x4242424242424242424242424242424242424242424242424242424242424242" @@ -3524,6 +3722,52 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_parent_hash(self) -> Self { + let invalid_parent_hash = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate parent hash. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::ParentHash(invalid_parent_hash)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_parent_hash = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_hash(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type { + Full(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Blinded(_) => panic!("Expecting a blinded payload"), + }; + + assert_eq!(payload.parent_hash(), expected_parent_hash); + + self + } + pub async fn test_payload_rejects_invalid_prev_randao(self) -> Self { let invalid_prev_randao = "0x4242424242424242424242424242424242424242424242424242424242424242" @@ -3571,6 +3815,50 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_prev_randao(self) -> Self { + let invalid_prev_randao = + "0x4242424242424242424242424242424242424242424242424242424242424242" + .parse::() + .unwrap(); + + // Mutate prev randao. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::PrevRandao(invalid_prev_randao)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_prev_randao = self + .chain + .canonical_head + .cached_head() + .head_random() + .unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type { + Full(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Blinded(_) => panic!("Expecting a full payload"), + }; + + assert_eq!(payload.prev_randao(), expected_prev_randao); + + self + } + pub async fn test_payload_rejects_invalid_block_number(self) -> Self { let invalid_block_number = 2; @@ -3618,6 +3906,50 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_block_number(self) -> Self { + let invalid_block_number = 2; + + // Mutate block number. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::BlockNumber(invalid_block_number)); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let expected_block_number = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .block_number() + + 1; + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type { + Full(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Blinded(_) => panic!("Expecting a full payload"), + }; + + assert_eq!(payload.block_number(), expected_block_number); + + self + } + pub async fn test_payload_rejects_invalid_timestamp(self) -> Self { let invalid_timestamp = 2; @@ -3664,13 +3996,56 @@ impl ApiTester { self } - pub async fn test_payload_rejects_invalid_signature(self) -> Self { - self.mock_builder.as_ref().unwrap().invalid_signatures(); + pub async fn test_payload_v3_rejects_invalid_timestamp(self) -> Self { + let invalid_timestamp = 2; + + // Mutate timestamp. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Timestamp(invalid_timestamp)); let slot = self.chain.slot().unwrap(); let epoch = self.chain.epoch().unwrap(); - - let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + let min_expected_timestamp = self + .chain + .head_snapshot() + .beacon_state + .latest_execution_payload_header() + .unwrap() + .timestamp(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type { + Full(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Blinded(_) => panic!("Expecting a blinded payload"), + }; + + assert!(payload.timestamp() > min_expected_timestamp); + + self + } + + pub async fn test_payload_rejects_invalid_signature(self) -> Self { + self.mock_builder.as_ref().unwrap().invalid_signatures(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; let payload: BlindedPayload = self .client @@ -3695,6 +4070,28 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_invalid_signature(self) -> Self { + self.mock_builder.as_ref().unwrap().invalid_signatures(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_chain_health_skips(self) -> Self { let slot = self.chain.slot().unwrap(); @@ -3733,6 +4130,35 @@ impl ApiTester { self } + pub async fn test_builder_v3_chain_health_skips(self) -> Self { + let slot = self.chain.slot().unwrap(); + + // Since we are proposing this slot, start the count from the previous slot. + let prev_slot = slot - Slot::new(1); + let head_slot = self.chain.canonical_head.cached_head().head_slot(); + let epoch = self.chain.epoch().unwrap(); + + // Inclusive here to make sure we advance one slot past the threshold. + for _ in (prev_slot - head_slot).as_usize()..=self.chain.config.builder_fallback_skips { + self.harness.advance_slot(); + } + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_chain_health_skips_per_epoch(self) -> Self { // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. for i in 0..E::slots_per_epoch() { @@ -3808,6 +4234,61 @@ impl ApiTester { self } + pub async fn test_builder_v3_chain_health_skips_per_epoch(self) -> Self { + // Fill an epoch with `builder_fallback_skips_per_epoch` skip slots. + for i in 0..E::slots_per_epoch() { + if i == 0 || i as usize > self.chain.config.builder_fallback_skips_per_epoch { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + } + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload_type = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Blinded(_) => (), + Full(_) => panic!("Expecting a blinded payload"), + }; + + // Without proposing, advance into the next slot, this should make us cross the threshold + // number of skips, causing us to use the fallback. + self.harness.advance_slot(); + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload_type = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_chain_health_epochs_since_finalization(self) -> Self { let skips = E::slots_per_epoch() * self.chain.config.builder_fallback_epochs_since_finalization as u64; @@ -3898,6 +4379,76 @@ impl ApiTester { self } + pub async fn test_builder_v3_chain_health_epochs_since_finalization(self) -> Self { + let skips = E::slots_per_epoch() + * self.chain.config.builder_fallback_epochs_since_finalization as u64; + + for _ in 0..skips { + self.harness.advance_slot(); + } + + // Fill the next epoch with blocks, should be enough to justify, not finalize. + for _ in 0..E::slots_per_epoch() { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload_type = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + // Fill another epoch with blocks, should be enough to finalize. (Sneaky plus 1 because this + // scenario starts at an epoch boundary). + for _ in 0..E::slots_per_epoch() + 1 { + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + } + + let next_slot = self.chain.slot().unwrap(); + + let (_, randao_reveal) = self + .get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch())) + .await; + + let payload_type = self + .client + .get_validator_blocks_v3::(next_slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Blinded(_) => (), + Full(_) => panic!("Expecting a blinded payload"), + }; + + self + } + pub async fn test_builder_chain_health_optimistic_head(self) -> Self { // Make sure the next payload verification will return optimistic before advancing the chain. self.harness.mock_execution_layer.as_ref().map(|el| { @@ -3945,6 +4496,49 @@ impl ApiTester { self } + pub async fn test_builder_v3_chain_health_optimistic_head(self) -> Self { + // Make sure the next payload verification will return optimistic before advancing the chain. + self.harness.mock_execution_layer.as_ref().map(|el| { + el.server.all_payloads_syncing(true); + el + }); + self.harness + .extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + self.harness.advance_slot(); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + let payload: FullPayload = match payload_type { + Full(payload) => payload + .data + .block() + .body() + .execution_payload() + .unwrap() + .into(), + Blinded(_) => panic!("Expecting a full payload"), + }; + + let expected_fee_recipient = Address::from_low_u64_be(proposer_index as u64); + assert_eq!(payload.fee_recipient(), expected_fee_recipient); + + self + } + pub async fn test_payload_rejects_inadequate_builder_threshold(self) -> Self { // Mutate value. self.mock_builder @@ -3982,6 +4576,34 @@ impl ApiTester { self } + pub async fn test_payload_v3_rejects_inadequate_builder_threshold(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_BUILDER_THRESHOLD_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_payload_chosen_when_more_profitable(self) -> Self { // Mutate value. self.mock_builder @@ -4019,6 +4641,34 @@ impl ApiTester { self } + pub async fn test_builder_payload_v3_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Blinded(_) => (), + Full(_) => panic!("Expecting a blinded payload"), + }; + + self + } + pub async fn test_local_payload_chosen_when_equally_profitable(self) -> Self { // Mutate value. self.mock_builder @@ -4056,6 +4706,34 @@ impl ApiTester { self } + pub async fn test_local_payload_v3_chosen_when_equally_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_local_payload_chosen_when_more_profitable(self) -> Self { // Mutate value. self.mock_builder @@ -4093,6 +4771,34 @@ impl ApiTester { self } + pub async fn test_local_payload_v3_chosen_when_more_profitable(self) -> Self { + // Mutate value. + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI - 1, + ))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + pub async fn test_builder_works_post_capella(self) -> Self { // Ensure builder payload is chosen self.mock_builder @@ -4142,26 +4848,22 @@ impl ApiTester { let epoch = self.chain.epoch().unwrap(); let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; - let block_contents = self + let payload_type = self .client - .get_validator_blinded_blocks::>(slot, &randao_reveal, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None) .await - .unwrap() - .data; - let (block, maybe_sidecars) = block_contents.deconstruct(); + .unwrap(); + + let block_contents = match payload_type { + Blinded(payload) => payload.data, + Full(_) => panic!("Expecting a blinded payload"), + }; + + let (_, maybe_sidecars) = block_contents.deconstruct(); // Response should contain blob sidecars assert!(maybe_sidecars.is_some()); - // The builder's payload should've been chosen, so this cache should not be populated - let payload: BlindedPayload = block.body().execution_payload().unwrap().into(); - assert!(self - .chain - .execution_layer - .as_ref() - .unwrap() - .get_payload_by_root(&payload.tree_hash_root()) - .is_none()); self } @@ -4206,6 +4908,38 @@ impl ApiTester { self } + pub async fn test_lighthouse_rejects_invalid_withdrawals_root_v3(self) -> Self { + // Ensure builder payload *would be* chosen + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::Value(Uint256::from( + DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI + 1, + ))); + // Set withdrawals root to something invalid + self.mock_builder + .as_ref() + .unwrap() + .add_operation(Operation::WithdrawalsRoot(Hash256::repeat_byte(0x42))); + + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + + let payload_type = self + .client + .get_validator_blocks_v3::(slot, &randao_reveal, None) + .await + .unwrap(); + + match payload_type { + Full(_) => (), + Blinded(_) => panic!("Expecting a full payload"), + }; + + self + } + #[cfg(target_os = "linux")] pub async fn test_get_lighthouse_health(self) -> Self { self.client.get_lighthouse_health().await.unwrap(); @@ -5053,6 +5787,20 @@ async fn block_production_ssz_with_skip_slots() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn block_production_ssz_v3() { + ApiTester::new().await.test_block_production_v3_ssz().await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn block_production_v3_ssz_with_skip_slots() { + ApiTester::new() + .await + .skip_slots(E::slots_per_epoch() * 2) + .test_block_production_v3_ssz() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn blinded_block_production_full_payload_premerge() { ApiTester::new() @@ -5236,6 +5984,14 @@ async fn post_validator_register_valid() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_valid_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_respects_registration() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_gas_limit_mutation() { ApiTester::new_mev_tester() @@ -5244,6 +6000,14 @@ async fn post_validator_register_gas_limit_mutation() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_gas_limit_mutation_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_accepts_mutated_gas_limit() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn post_validator_register_fee_recipient_mutation() { ApiTester::new_mev_tester() @@ -5252,6 +6016,14 @@ async fn post_validator_register_fee_recipient_mutation() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn post_validator_register_fee_recipient_mutation_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_accepts_changed_fee_recipient() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_parent_hash() { ApiTester::new_mev_tester() @@ -5260,6 +6032,14 @@ async fn get_blinded_block_invalid_parent_hash() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_parent_hash_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_parent_hash() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_prev_randao() { ApiTester::new_mev_tester() @@ -5268,6 +6048,14 @@ async fn get_blinded_block_invalid_prev_randao() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_prev_randao_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_prev_randao() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_block_number() { ApiTester::new_mev_tester() @@ -5276,6 +6064,14 @@ async fn get_blinded_block_invalid_block_number() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_block_number_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_block_number() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_timestamp() { ApiTester::new_mev_tester() @@ -5284,6 +6080,14 @@ async fn get_blinded_block_invalid_timestamp() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_timestamp_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_timestamp() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_blinded_block_invalid_signature() { ApiTester::new_mev_tester() @@ -5292,6 +6096,14 @@ async fn get_blinded_block_invalid_signature() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_full_block_invalid_signature_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_invalid_signature() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_skips() { ApiTester::new_mev_tester() @@ -5300,6 +6112,14 @@ async fn builder_chain_health_skips() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_v3() { + ApiTester::new_mev_tester() + .await + .test_builder_v3_chain_health_skips() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_skips_per_epoch() { ApiTester::new_mev_tester() @@ -5308,6 +6128,14 @@ async fn builder_chain_health_skips_per_epoch() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_skips_per_epoch_v3() { + ApiTester::new_mev_tester() + .await + .test_builder_v3_chain_health_skips_per_epoch() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_epochs_since_finalization() { ApiTester::new_mev_tester() @@ -5316,6 +6144,14 @@ async fn builder_chain_health_epochs_since_finalization() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_epochs_since_finalization_v3() { + ApiTester::new_mev_tester() + .await + .test_builder_v3_chain_health_epochs_since_finalization() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_chain_health_optimistic_head() { ApiTester::new_mev_tester() @@ -5324,6 +6160,14 @@ async fn builder_chain_health_optimistic_head() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_chain_health_optimistic_head_v3() { + ApiTester::new_mev_tester() + .await + .test_builder_v3_chain_health_optimistic_head() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_inadequate_builder_threshold() { ApiTester::new_mev_tester() @@ -5332,6 +6176,14 @@ async fn builder_inadequate_builder_threshold() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_inadequate_builder_threshold_v3() { + ApiTester::new_mev_tester() + .await + .test_payload_v3_rejects_inadequate_builder_threshold() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_payload_chosen_by_profit() { ApiTester::new_mev_tester_no_builder_threshold() @@ -5344,6 +6196,18 @@ async fn builder_payload_chosen_by_profit() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn builder_payload_chosen_by_profit_v3() { + ApiTester::new_mev_tester_no_builder_threshold() + .await + .test_builder_payload_v3_chosen_when_more_profitable() + .await + .test_local_payload_v3_chosen_when_equally_profitable() + .await + .test_local_payload_v3_chosen_when_more_profitable() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn builder_works_post_capella() { let mut config = ApiTesterConfig { @@ -5382,6 +6246,8 @@ async fn builder_works_post_deneb() { .test_post_validator_register_validator() .await .test_builder_works_post_deneb() + .await + .test_lighthouse_rejects_invalid_withdrawals_root_v3() .await; } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index 785206b757b..e6e06caa841 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -4,8 +4,6 @@ use lighthouse_metrics::TextEncoder; use lighthouse_network::prometheus_client::encoding::text::encode; use malloc_utils::scrape_allocator_metrics; -pub use lighthouse_metrics::*; - pub fn gather_prometheus_metrics( ctx: &Context, ) -> std::result::Result { diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 8eacabb4d0d..0ec7e2ab7a0 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -1,6 +1,6 @@ //! Helper functions and an extension trait for Ethereum 2 ENRs. -pub use discv5::enr::{self, CombinedKey, EnrBuilder}; +pub use discv5::enr::{CombinedKey, EnrBuilder}; use super::enr_ext::CombinedKeyExt; use super::ENR_FILENAME; diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index 7157a627213..a6bf3ffecce 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1489,7 +1489,7 @@ mod tests { assert!(the_best.is_some()); // Consistency check let best_peers = pdb.best_peers_by_status(PeerInfo::is_connected); - assert_eq!(the_best.unwrap(), best_peers.get(0).unwrap().0); + assert_eq!(the_best.unwrap(), best_peers.first().unwrap().0); } #[test] diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index f1b76674aec..6e83cfc86de 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -378,7 +378,7 @@ fn handle_error( Ok(None) } } - _ => Err(err).map_err(RPCError::from), + _ => Err(RPCError::from(err)), } } diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 0e6c76e222b..f2ca3428097 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -224,7 +224,7 @@ impl NetworkBeaconProcessor { request_id: PeerRequestId, request: BlobsByRootRequest, ) { - let Some(requested_root) = request.blob_ids.get(0).map(|id| id.block_root) else { + let Some(requested_root) = request.blob_ids.first().map(|id| id.block_root) else { // No blob ids requested. return; }; diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 7d6bde63449..d76ce5aadde 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -340,7 +340,7 @@ impl NetworkBeaconProcessor { ); } Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => { - warn!( + debug!( self.log, "Missing components over rpc"; "block_hash" => %block_root, @@ -560,14 +560,10 @@ impl NetworkBeaconProcessor { downloaded_blocks: Vec>, ) -> (usize, Result<(), ChainSegmentFailed>) { let total_blocks = downloaded_blocks.len(); - let available_blocks = match downloaded_blocks - .into_iter() - .map(|block| { - self.chain - .data_availability_checker - .check_rpc_block_availability(block) - }) - .collect::, _>>() + let available_blocks = match self + .chain + .data_availability_checker + .verify_kzg_for_rpc_blocks(downloaded_blocks) { Ok(blocks) => blocks .into_iter() diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 3b8c89a442e..769775a62c8 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -2,7 +2,6 @@ use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::CachingEth1Backend, - validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, BeaconChain, }; use futures::prelude::*; @@ -76,7 +75,6 @@ impl TestBeaconChain { Duration::from_millis(SLOT_DURATION_MILLIS), )) .shutdown_sender(shutdown_tx) - .monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log) .build() .expect("should build"), ); diff --git a/beacon_node/network/src/sync/backfill_sync/mod.rs b/beacon_node/network/src/sync/backfill_sync/mod.rs index e5808469767..0d7e7c16c36 100644 --- a/beacon_node/network/src/sync/backfill_sync/mod.rs +++ b/beacon_node/network/src/sync/backfill_sync/mod.rs @@ -929,7 +929,7 @@ impl BackFillSync { .collect::>(); // Sort peers prioritizing unrelated peers with less active requests. priorized_peers.sort_unstable(); - priorized_peers.get(0).map(|&(_, _, peer)| peer) + priorized_peers.first().map(|&(_, _, peer)| peer) }; if let Some(peer) = new_peer { diff --git a/beacon_node/network/src/sync/block_lookups/mod.rs b/beacon_node/network/src/sync/block_lookups/mod.rs index d13bb8cb88d..c5732069a00 100644 --- a/beacon_node/network/src/sync/block_lookups/mod.rs +++ b/beacon_node/network/src/sync/block_lookups/mod.rs @@ -786,7 +786,7 @@ impl BlockLookups { self.log, "Block component processed for lookup"; "response_type" => ?R::response_type(), - "result" => ?result, + "block_root" => ?root, ); match result { diff --git a/beacon_node/network/src/sync/block_lookups/tests.rs b/beacon_node/network/src/sync/block_lookups/tests.rs index bd1e72ee18d..551f1fdae65 100644 --- a/beacon_node/network/src/sync/block_lookups/tests.rs +++ b/beacon_node/network/src/sync/block_lookups/tests.rs @@ -1635,7 +1635,7 @@ mod deneb_only { self } fn invalidate_blobs_too_many(mut self) -> Self { - let first_blob = self.blobs.get(0).expect("blob").clone(); + let first_blob = self.blobs.first().expect("blob").clone(); self.blobs.push(first_blob); self } diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 1e5bf09b3aa..5a77340e3b5 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -885,7 +885,7 @@ impl SyncingChain { .collect::>(); // Sort peers prioritizing unrelated peers with less active requests. priorized_peers.sort_unstable(); - priorized_peers.get(0).map(|&(_, _, peer)| peer) + priorized_peers.first().map(|&(_, _, peer)| peer) }; if let Some(peer) = new_peer { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 7b9cd757a5b..d76f2f375f4 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -388,12 +388,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { address of this server (e.g., http://localhost:5052).") .takes_value(true), ) - .arg( - Arg::with_name("http-disable-legacy-spec") - .long("http-disable-legacy-spec") - .requires("enable_http") - .hidden(true) - ) .arg( Arg::with_name("http-spec-fork") .long("http-spec-fork") @@ -569,24 +563,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .help("If present, uses an eth1 backend that generates static dummy data.\ Identical to the method used at the 2019 Canada interop.") ) - .arg( - Arg::with_name("eth1-endpoint") - .long("eth1-endpoint") - .value_name("HTTP-ENDPOINT") - .help("Deprecated. Use --eth1-endpoints.") - .takes_value(true) - ) - .arg( - Arg::with_name("eth1-endpoints") - .long("eth1-endpoints") - .value_name("HTTP-ENDPOINTS") - .conflicts_with("eth1-endpoint") - .help("One http endpoint for a web3 connection to an execution node. \ - Note: This flag is now only useful for testing, use `--execution-endpoint` \ - flag to connect to an execution node on mainnet and testnets. - Defaults to http://127.0.0.1:8545.") - .takes_value(true) - ) .arg( Arg::with_name("eth1-purge-cache") .long("eth1-purge-cache") @@ -649,14 +625,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { /* * Execution Layer Integration */ - .arg( - Arg::with_name("merge") - .long("merge") - .help("Deprecated. The feature activates automatically when --execution-endpoint \ - is supplied.") - .takes_value(false) - .hidden(true) - ) .arg( Arg::with_name("execution-endpoint") .long("execution-endpoint") @@ -1200,22 +1168,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .requires("builder") .takes_value(true) ) - .arg( - Arg::with_name("count-unrealized") - .long("count-unrealized") - .hidden(true) - .help("This flag is deprecated and has no effect.") - .takes_value(true) - .default_value("true") - ) - .arg( - Arg::with_name("count-unrealized-full") - .long("count-unrealized-full") - .hidden(true) - .help("This flag is deprecated and has no effect.") - .takes_value(true) - .default_value("false") - ) .arg( Arg::with_name("reset-payload-statuses") .long("reset-payload-statuses") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 94b94677a5f..609626ae88a 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -120,13 +120,6 @@ pub fn get_config( client_config.http_api.allow_origin = Some(allow_origin.to_string()); } - if cli_args.is_present("http-disable-legacy-spec") { - warn!( - log, - "The flag --http-disable-legacy-spec is deprecated and will be removed" - ); - } - if let Some(fork_name) = clap_utils::parse_optional(cli_args, "http-spec-fork")? { client_config.http_api.spec_fork_name = Some(fork_name); } @@ -240,25 +233,6 @@ pub fn get_config( client_config.sync_eth1_chain = true; } - // Defines the URL to reach the eth1 node. - if let Some(endpoint) = cli_args.value_of("eth1-endpoint") { - warn!( - log, - "The --eth1-endpoint flag is deprecated"; - "msg" => "please use --eth1-endpoints instead" - ); - client_config.sync_eth1_chain = true; - - let endpoint = SensitiveUrl::parse(endpoint) - .map_err(|e| format!("eth1-endpoint was an invalid URL: {:?}", e))?; - client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); - } else if let Some(endpoint) = cli_args.value_of("eth1-endpoints") { - client_config.sync_eth1_chain = true; - let endpoint = SensitiveUrl::parse(endpoint) - .map_err(|e| format!("eth1-endpoints contains an invalid URL {:?}", e))?; - client_config.eth1.endpoint = Eth1Endpoint::NoAuth(endpoint); - } - if let Some(val) = cli_args.value_of("eth1-blocks-per-log-query") { client_config.eth1.blocks_per_log_query = val .parse() @@ -275,20 +249,6 @@ pub fn get_config( client_config.eth1.cache_follow_distance = Some(follow_distance); } - if cli_args.is_present("merge") { - if cli_args.is_present("execution-endpoint") { - warn!( - log, - "The --merge flag is deprecated"; - "info" => "the --execution-endpoint flag automatically enables this feature" - ) - } else { - return Err("The --merge flag is deprecated. \ - Supply a value to --execution-endpoint instead." - .into()); - } - } - if let Some(endpoints) = cli_args.value_of("execution-endpoint") { let mut el_config = execution_layer::Config::default(); @@ -364,16 +324,6 @@ pub fn get_config( clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); - // If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and - // use `--execution-endpoint` instead. Also, log a deprecation warning. - if cli_args.is_present("eth1-endpoints") || cli_args.is_present("eth1-endpoint") { - warn!( - log, - "Ignoring --eth1-endpoints flag"; - "info" => "the value for --execution-endpoint will be used instead. \ - --eth1-endpoints has been deprecated for post-merge configurations" - ); - } client_config.eth1.endpoint = Eth1Endpoint::Auth { endpoint: execution_endpoint, jwt_path: secret_file, @@ -719,7 +669,7 @@ pub fn get_config( } if cli_args.is_present("validator-monitor-auto") { - client_config.validator_monitor_auto = true; + client_config.validator_monitor.auto_register = true; } if let Some(pubkeys) = cli_args.value_of("validator-monitor-pubkeys") { @@ -729,7 +679,8 @@ pub fn get_config( .collect::, _>>() .map_err(|e| format!("Invalid --validator-monitor-pubkeys value: {:?}", e))?; client_config - .validator_monitor_pubkeys + .validator_monitor + .validators .extend_from_slice(&pubkeys); } @@ -747,14 +698,17 @@ pub fn get_config( .collect::, _>>() .map_err(|e| format!("Invalid --validator-monitor-file contents: {:?}", e))?; client_config - .validator_monitor_pubkeys + .validator_monitor + .validators .extend_from_slice(&pubkeys); } if let Some(count) = clap_utils::parse_optional(cli_args, "validator-monitor-individual-tracking-threshold")? { - client_config.validator_monitor_individual_tracking_threshold = count; + client_config + .validator_monitor + .individual_tracking_threshold = count; } if cli_args.is_present("disable-lock-timeouts") { @@ -812,22 +766,6 @@ pub fn get_config( client_config.chain.fork_choice_before_proposal_timeout_ms = timeout; } - if !clap_utils::parse_required::(cli_args, "count-unrealized")? { - warn!( - log, - "The flag --count-unrealized is deprecated and will be removed"; - "info" => "any use of the flag will have no effect" - ); - } - - if clap_utils::parse_required::(cli_args, "count-unrealized-full")? { - warn!( - log, - "The flag --count-unrealized-full is deprecated and will be removed"; - "info" => "setting it to `true` has no effect" - ); - } - client_config.chain.always_reset_payload_statuses = cli_args.is_present("reset-payload-statuses"); @@ -850,7 +788,7 @@ pub fn get_config( // Graphical user interface config. if cli_args.is_present("gui") { client_config.http_api.enabled = true; - client_config.validator_monitor_auto = true; + client_config.validator_monitor.auto_register = true; } // Optimistic finalized sync. diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 085a2a78241..cf6d627c304 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -89,7 +89,7 @@ impl ProductionBeaconNode { .disk_store( &db_path, &freezer_db_path, - blobs_db_path, + &blobs_db_path, store_config, log.clone(), )?; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 6e2a2ae583e..43e14c30970 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -35,7 +35,7 @@ use state_processing::{ use std::cmp::min; use std::convert::TryInto; use std::marker::PhantomData; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::sync::Arc; use std::time::Duration; use types::blob_sidecar::BlobSidecarList; @@ -61,7 +61,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// Cold database containing compact historical data. pub cold_db: Cold, /// Database containing blobs. If None, store falls back to use `cold_db`. - pub blobs_db: Option, + pub blobs_db: Cold, /// Hot database containing duplicated but quick-to-access recent data. /// /// The hot database also contains all blocks. @@ -138,7 +138,6 @@ pub enum HotColdDBError { MissingExecutionPayload(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, - MissingPathToBlobsDatabase, BlobsPreviouslyInDefaultStore, HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), @@ -178,7 +177,7 @@ impl HotColdDB, MemoryStore> { anchor_info: RwLock::new(None), blob_info: RwLock::new(BlobInfo::default()), cold_db: MemoryStore::open(), - blobs_db: Some(MemoryStore::open()), + blobs_db: MemoryStore::open(), hot_db: MemoryStore::open(), block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), @@ -202,7 +201,7 @@ impl HotColdDB, LevelDB> { pub fn open( hot_path: &Path, cold_path: &Path, - blobs_db_path: Option, + blobs_db_path: &Path, migrate_schema: impl FnOnce(Arc, SchemaVersion, SchemaVersion) -> Result<(), Error>, config: StoreConfig, spec: ChainSpec, @@ -215,7 +214,7 @@ impl HotColdDB, LevelDB> { anchor_info: RwLock::new(None), blob_info: RwLock::new(BlobInfo::default()), cold_db: LevelDB::open(cold_path)?, - blobs_db: None, + blobs_db: LevelDB::open(blobs_db_path)?, hot_db: LevelDB::open(hot_path)?, block_cache: Mutex::new(BlockCache::new(config.block_cache_size)), state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), @@ -271,37 +270,29 @@ impl HotColdDB, LevelDB> { Some(blob_info) => { // If the oldest block slot is already set do not allow the blob DB path to be // changed (require manual migration). - if blob_info.oldest_blob_slot.is_some() { - if blobs_db_path.is_some() && !blob_info.blobs_db { - return Err(HotColdDBError::BlobsPreviouslyInDefaultStore.into()); - } else if blobs_db_path.is_none() && blob_info.blobs_db { - return Err(HotColdDBError::MissingPathToBlobsDatabase.into()); - } + if blob_info.oldest_blob_slot.is_some() && !blob_info.blobs_db { + return Err(HotColdDBError::BlobsPreviouslyInDefaultStore.into()); } // Set the oldest blob slot to the Deneb fork slot if it is not yet set. + // Always initialize `blobs_db` to true, we no longer support storing the blobs + // in the freezer DB, because the UX is strictly worse for relocating the DB. let oldest_blob_slot = blob_info.oldest_blob_slot.or(deneb_fork_slot); BlobInfo { oldest_blob_slot, - blobs_db: blobs_db_path.is_some(), + blobs_db: true, } } // First start. None => BlobInfo { // Set the oldest blob slot to the Deneb fork slot if it is not yet set. oldest_blob_slot: deneb_fork_slot, - blobs_db: blobs_db_path.is_some(), + blobs_db: true, }, }; - if new_blob_info.blobs_db { - if let Some(path) = &blobs_db_path { - db.blobs_db = Some(LevelDB::open(path.as_path())?); - } - } db.compare_and_set_blob_info_with_write(<_>::default(), new_blob_info.clone())?; info!( db.log, "Blob DB initialized"; - "separate_db" => new_blob_info.blobs_db, "path" => ?blobs_db_path, "oldest_blob_slot" => ?new_blob_info.oldest_blob_slot, ); @@ -575,8 +566,8 @@ impl, Cold: ItemStore> HotColdDB /// Check if the blobs for a block exists on disk. pub fn blobs_exist(&self, block_root: &Hash256) -> Result { - let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); - blobs_db.key_exists(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + self.blobs_db + .key_exists(DBColumn::BeaconBlob.into(), block_root.as_bytes()) } /// Determine whether a block exists in the database. @@ -592,13 +583,12 @@ impl, Cold: ItemStore> HotColdDB .key_delete(DBColumn::BeaconBlock.into(), block_root.as_bytes())?; self.hot_db .key_delete(DBColumn::ExecPayload.into(), block_root.as_bytes())?; - let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); - blobs_db.key_delete(DBColumn::BeaconBlob.into(), block_root.as_bytes()) + self.blobs_db + .key_delete(DBColumn::BeaconBlob.into(), block_root.as_bytes()) } pub fn put_blobs(&self, block_root: &Hash256, blobs: BlobSidecarList) -> Result<(), Error> { - let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); - blobs_db.put_bytes( + self.blobs_db.put_bytes( DBColumn::BeaconBlob.into(), block_root.as_bytes(), &blobs.as_ssz_bytes(), @@ -988,9 +978,9 @@ impl, Cold: ItemStore> HotColdDB let mut guard = self.block_cache.lock(); let blob_cache_ops = blobs_ops.clone(); - let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); // Try to execute blobs store ops. - blobs_db.do_atomically(self.convert_to_kv_batch(blobs_ops)?)?; + self.blobs_db + .do_atomically(self.convert_to_kv_batch(blobs_ops)?)?; let hot_db_cache_ops = hot_db_ops.clone(); // Try to execute hot db store ops. @@ -1018,7 +1008,8 @@ impl, Cold: ItemStore> HotColdDB }; *op = reverse_op; } - blobs_db.do_atomically(self.convert_to_kv_batch(blob_cache_ops)?)?; + self.blobs_db + .do_atomically(self.convert_to_kv_batch(blob_cache_ops)?)?; return Err(e); } @@ -1436,15 +1427,16 @@ impl, Cold: ItemStore> HotColdDB /// Fetch blobs for a given block from the store. pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { - let blobs_db = self.blobs_db.as_ref().unwrap_or(&self.cold_db); - // Check the cache. if let Some(blobs) = self.block_cache.lock().get_blobs(block_root) { metrics::inc_counter(&metrics::BEACON_BLOBS_CACHE_HIT_COUNT); return Ok(Some(blobs.clone())); } - match blobs_db.get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? { + match self + .blobs_db + .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_bytes())? + { Some(ref blobs_bytes) => { let blobs = BlobSidecarList::from_ssz_bytes(blobs_bytes)?; self.block_cache @@ -1640,7 +1632,7 @@ impl, Cold: ItemStore> HotColdDB }); let blob_info = BlobInfo { oldest_blob_slot, - blobs_db: self.blobs_db.is_some(), + blobs_db: true, }; self.compare_and_set_blob_info(self.get_blob_info(), blob_info) } diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 124c9a2f586..6fef74d7ff0 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -135,7 +135,7 @@ pub struct BlobInfo { /// If the `oldest_blob_slot` is `None` then this means that the Deneb fork epoch is not yet /// known. pub oldest_blob_slot: Option, - /// A separate blobs database is in use. + /// A separate blobs database is in use (deprecated, always `true`). pub blobs_db: bool, } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index c59e59abf5a..043c0f197e5 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -272,6 +272,31 @@ impl BeaconNodeHttpClient { } } + /// Perform a HTTP GET request using an 'accept' header, returning `None` on a 404 error. + pub async fn get_bytes_response_with_response_headers( + &self, + url: U, + accept_header: Accept, + timeout: Duration, + ) -> Result<(Option>, Option), Error> { + let opt_response = self + .get_response(url, |b| b.accept(accept_header).timeout(timeout)) + .await + .optional()?; + + // let headers = opt_response.headers(); + match opt_response { + Some(resp) => { + let response_headers = resp.headers().clone(); + Ok(( + Some(resp.bytes().await?.into_iter().collect::>()), + Some(response_headers), + )) + } + None => Ok((None, None)), + } + } + /// Perform a HTTP POST request. async fn post(&self, url: U, body: &T) -> Result<(), Error> { self.post_generic(url, body, None).await?; @@ -1684,30 +1709,14 @@ impl BeaconNodeHttpClient { Ok(path) } - /// `GET v3/validator/blocks/{slot}` - pub async fn get_validator_blocks_v3( - &self, - slot: Slot, - randao_reveal: &SignatureBytes, - graffiti: Option<&Graffiti>, - ) -> Result, Error> { - self.get_validator_blocks_v3_modular( - slot, - randao_reveal, - graffiti, - SkipRandaoVerification::No, - ) - .await - } - - /// `GET v3/validator/blocks/{slot}` - pub async fn get_validator_blocks_v3_modular( + /// returns `GET v3/validator/blocks/{slot}` URL path + pub async fn get_validator_blocks_v3_path( &self, slot: Slot, randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, - ) -> Result, Error> { + ) -> Result { let mut path = self.eth_path(V3)?; path.path_segments_mut() @@ -1729,6 +1738,42 @@ impl BeaconNodeHttpClient { .append_pair("skip_randao_verification", ""); } + Ok(path) + } + + /// `GET v3/validator/blocks/{slot}` + pub async fn get_validator_blocks_v3( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result, Error> { + self.get_validator_blocks_v3_modular( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + ) + .await + } + + /// `GET v3/validator/blocks/{slot}` + pub async fn get_validator_blocks_v3_modular( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + ) -> Result, Error> { + let path = self + .get_validator_blocks_v3_path::( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + ) + .await?; + let response = self.get_response(path, |b| b).await?; let is_blinded_payload = response @@ -1750,6 +1795,58 @@ impl BeaconNodeHttpClient { } } + /// `GET v3/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_v3_ssz( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + ) -> Result<(Option>, bool), Error> { + self.get_validator_blocks_v3_modular_ssz::( + slot, + randao_reveal, + graffiti, + SkipRandaoVerification::No, + ) + .await + } + + /// `GET v3/validator/blocks/{slot}` in ssz format + pub async fn get_validator_blocks_v3_modular_ssz( + &self, + slot: Slot, + randao_reveal: &SignatureBytes, + graffiti: Option<&Graffiti>, + skip_randao_verification: SkipRandaoVerification, + ) -> Result<(Option>, bool), Error> { + let path = self + .get_validator_blocks_v3_path::( + slot, + randao_reveal, + graffiti, + skip_randao_verification, + ) + .await?; + + let (response_content, response_headers) = self + .get_bytes_response_with_response_headers( + path, + Accept::Ssz, + self.timeouts.get_validator_block_ssz, + ) + .await?; + + let is_blinded_payload = match response_headers { + Some(headers) => headers + .get(EXECUTION_PAYLOAD_BLINDED_HEADER) + .map(|value| value.to_str().unwrap_or_default().to_lowercase() == "true") + .unwrap_or(false), + None => false, + }; + + Ok((response_content, is_blinded_payload)) + } + /// `GET v2/validator/blocks/{slot}` in ssz format pub async fn get_validator_blocks_ssz>( &self, diff --git a/common/slot_clock/src/system_time_slot_clock.rs b/common/slot_clock/src/system_time_slot_clock.rs index c54646fbc6d..770132064ef 100644 --- a/common/slot_clock/src/system_time_slot_clock.rs +++ b/common/slot_clock/src/system_time_slot_clock.rs @@ -2,8 +2,6 @@ use super::{ManualSlotClock, SlotClock}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use types::Slot; -pub use std::time::SystemTimeError; - /// Determines the present slot based upon the present system time. #[derive(Clone)] pub struct SystemTimeSlotClock { diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index dc3de71cefd..595de86e862 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -250,7 +250,7 @@ impl MerkleTree { if deposit_count == (0x1 << level) { return Ok(MerkleTree::Finalized( *finalized_branch - .get(0) + .first() .ok_or(MerkleTreeError::PleaseNotifyTheDevs)?, )); } diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 18b9866f35d..e2e25f24b82 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -626,6 +626,25 @@ impl BeaconState { cache.get_all_beacon_committees() } + /// Returns the block root which decided the proposer shuffling for the epoch passed in parameter. This root + /// can be used to key this proposer shuffling. + /// + /// ## Notes + /// + /// The `block_root` must be equal to the latest block applied to `self`. + pub fn proposer_shuffling_decision_root_at_epoch( + &self, + epoch: Epoch, + block_root: Hash256, + ) -> Result { + let decision_slot = self.proposer_shuffling_decision_slot(epoch); + if self.slot() <= decision_slot { + Ok(block_root) + } else { + self.get_block_root(decision_slot).map(|root| *root) + } + } + /// Returns the block root which decided the proposer shuffling for the current epoch. This root /// can be used to key this proposer shuffling. /// @@ -634,7 +653,7 @@ impl BeaconState { /// The `block_root` covers the one-off scenario where the genesis block decides its own /// shuffling. It should be set to the latest block applied to `self` or the genesis block root. pub fn proposer_shuffling_decision_root(&self, block_root: Hash256) -> Result { - let decision_slot = self.proposer_shuffling_decision_slot(); + let decision_slot = self.proposer_shuffling_decision_slot(self.current_epoch()); if self.slot() == decision_slot { Ok(block_root) } else { @@ -643,11 +662,9 @@ impl BeaconState { } /// Returns the slot at which the proposer shuffling was decided. The block root at this slot - /// can be used to key the proposer shuffling for the current epoch. - fn proposer_shuffling_decision_slot(&self) -> Slot { - self.current_epoch() - .start_slot(T::slots_per_epoch()) - .saturating_sub(1_u64) + /// can be used to key the proposer shuffling for the given epoch. + fn proposer_shuffling_decision_slot(&self, epoch: Epoch) -> Slot { + epoch.start_slot(T::slots_per_epoch()).saturating_sub(1_u64) } /// Returns the block root which decided the attester shuffling for the given `relative_epoch`. diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_aggregator_selection_data.rs index 2b60d01b8ee..3da130bb068 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_aggregator_selection_data.rs @@ -25,11 +25,11 @@ pub struct SyncAggregatorSelectionData { pub subcommittee_index: u64, } +impl SignedRoot for SyncAggregatorSelectionData {} + #[cfg(test)] mod tests { use super::*; ssz_and_tree_hash_tests!(SyncAggregatorSelectionData); } - -impl SignedRoot for SyncAggregatorSelectionData {} diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 93654b8dd50..95af4d63821 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -210,7 +210,7 @@ pub fn display_db_version( HotColdDB::, LevelDB>::open( &hot_path, &cold_path, - blobs_path, + &blobs_path, |_, from, _| { version = from; Ok(()) @@ -288,7 +288,7 @@ pub fn inspect_db( let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, - blobs_path, + &blobs_path, |_, _, _| Ok(()), client_config.store, spec, @@ -410,7 +410,7 @@ pub fn migrate_db( let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, - blobs_path, + &blobs_path, |_, db_initial_version, _| { from = db_initial_version; Ok(()) @@ -450,7 +450,7 @@ pub fn prune_payloads( let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, - blobs_path, + &blobs_path, |_, _, _| Ok(()), client_config.store, spec.clone(), @@ -476,7 +476,7 @@ pub fn prune_blobs( let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, - blobs_path, + &blobs_path, |_, _, _| Ok(()), client_config.store, spec.clone(), @@ -512,7 +512,7 @@ pub fn prune_states( let db = HotColdDB::, LevelDB>::open( &hot_path, &cold_path, - blobs_path, + &blobs_path, |_, _, _| Ok(()), client_config.store, spec.clone(), diff --git a/lighthouse/src/main.rs b/lighthouse/src/main.rs index f98af96176f..edf661abab3 100644 --- a/lighthouse/src/main.rs +++ b/lighthouse/src/main.rs @@ -11,7 +11,7 @@ use ethereum_hashing::have_sha_extensions; use futures::TryFutureExt; use lighthouse_version::VERSION; use malloc_utils::configure_memory_allocator; -use slog::{crit, info, warn}; +use slog::{crit, info}; use std::path::PathBuf; use std::process::exit; use task_executor::ShutdownReason; @@ -81,16 +81,6 @@ fn main() { cfg!(feature = "gnosis"), ).as_str() ) - .arg( - Arg::with_name("spec") - .short("s") - .long("spec") - .value_name("DEPRECATED") - .help("This flag is deprecated, it will be disallowed in a future release. This \ - value is now derived from the --network or --testnet-dir flags.") - .takes_value(true) - .global(true) - ) .arg( Arg::with_name("env_log") .short("l") @@ -549,16 +539,9 @@ fn run( // Allow Prometheus access to the version and commit of the Lighthouse build. metrics::expose_lighthouse_version(); - if matches.is_present("spec") { - warn!( - log, - "The --spec flag is deprecated and will be removed in a future release" - ); - } - #[cfg(all(feature = "modern", target_arch = "x86_64"))] if !std::is_x86_feature_detected!("adx") { - warn!( + slog::warn!( log, "CPU seems incompatible with optimized Lighthouse build"; "advice" => "If you get a SIGILL, please try Lighthouse portable build" diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 7937b1d496e..c8e064e224d 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -243,60 +243,6 @@ fn paranoid_block_proposal_on() { .with_config(|config| assert!(config.chain.paranoid_block_proposal)); } -#[test] -fn count_unrealized_no_arg() { - CommandLineTest::new() - .flag("count-unrealized", None) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_false() { - CommandLineTest::new() - .flag("count-unrealized", Some("false")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_true() { - CommandLineTest::new() - .flag("count-unrealized", Some("true")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_full_no_arg() { - CommandLineTest::new() - .flag("count-unrealized-full", None) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_full_false() { - CommandLineTest::new() - .flag("count-unrealized-full", Some("false")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - -#[test] -fn count_unrealized_full_true() { - CommandLineTest::new() - .flag("count-unrealized-full", Some("true")) - // This flag should be ignored, so there's nothing to test but that the - // client starts with the flag present. - .run_with_zero_port(); -} - #[test] fn reset_payload_statuses_default() { CommandLineTest::new() @@ -388,23 +334,6 @@ fn eth1_flag() { .with_config(|config| assert!(config.sync_eth1_chain)); } #[test] -fn eth1_endpoints_flag() { - CommandLineTest::new() - .flag("eth1-endpoints", Some("http://localhost:9545")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.eth1.endpoint.get_endpoint().full.to_string(), - "http://localhost:9545/" - ); - assert_eq!( - config.eth1.endpoint.get_endpoint().to_string(), - "http://localhost:9545/" - ); - assert!(config.sync_eth1_chain); - }); -} -#[test] fn eth1_blocks_per_log_query_flag() { CommandLineTest::new() .flag("eth1-blocks-per-log-query", Some("500")) @@ -527,49 +456,6 @@ fn merge_execution_endpoints_flag() { fn merge_execution_endpoint_flag() { run_merge_execution_endpoints_flag_test("execution-endpoint") } -fn run_execution_endpoints_overrides_eth1_endpoints_test(eth1_flag: &str, execution_flag: &str) { - use sensitive_url::SensitiveUrl; - - let eth1_endpoint = "http://bad.bad"; - let execution_endpoint = "http://good.good"; - - assert!(eth1_endpoint != execution_endpoint); - - let dir = TempDir::new().expect("Unable to create temporary directory"); - let jwt_path = dir.path().join("jwt-file"); - - CommandLineTest::new() - .flag(eth1_flag, Some(ð1_endpoint)) - .flag(execution_flag, Some(&execution_endpoint)) - .flag("execution-jwt", jwt_path.as_os_str().to_str()) - .run_with_zero_port() - .with_config(|config| { - assert_eq!( - config.execution_layer.as_ref().unwrap().execution_endpoints, - vec![SensitiveUrl::parse(execution_endpoint).unwrap()] - ); - - // The eth1 endpoint should have been set to the --execution-endpoint value in defiance - // of --eth1-endpoints. - assert_eq!( - config.eth1.endpoint, - Eth1Endpoint::Auth { - endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), - jwt_path: jwt_path.clone(), - jwt_id: None, - jwt_version: None, - } - ); - }); -} -#[test] -fn execution_endpoints_overrides_eth1_endpoints() { - run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoints", "execution-endpoints"); -} -#[test] -fn execution_endpoint_overrides_eth1_endpoint() { - run_execution_endpoints_overrides_eth1_endpoints_test("eth1-endpoint", "execution-endpoint"); -} #[test] fn merge_jwt_secrets_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -1772,11 +1658,17 @@ fn metrics_allow_origin_all_flag() { // Tests for Validator Monitor flags. #[test] +fn validator_monitor_default_values() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| assert!(config.validator_monitor == <_>::default())); +} +#[test] fn validator_monitor_auto_flag() { CommandLineTest::new() .flag("validator-monitor-auto", None) .run_with_zero_port() - .with_config(|config| assert!(config.validator_monitor_auto)); + .with_config(|config| assert!(config.validator_monitor.auto_register)); } #[test] fn validator_monitor_pubkeys_flag() { @@ -1785,8 +1677,8 @@ fn validator_monitor_pubkeys_flag() { 0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.validator_monitor_pubkeys[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); - assert_eq!(config.validator_monitor_pubkeys[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); }); } #[test] @@ -1800,8 +1692,8 @@ fn validator_monitor_file_flag() { .flag("validator-monitor-file", dir.path().join("pubkeys.txt").as_os_str().to_str()) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.validator_monitor_pubkeys[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); - assert_eq!(config.validator_monitor_pubkeys[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[0].to_string(), "0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); + assert_eq!(config.validator_monitor.validators[1].to_string(), "0xbeefdeadbeefdeaddeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); }); } #[test] @@ -1810,7 +1702,7 @@ fn validator_monitor_metrics_threshold_default() { .run_with_zero_port() .with_config(|config| { assert_eq!( - config.validator_monitor_individual_tracking_threshold, + config.validator_monitor.individual_tracking_threshold, // If this value changes make sure to update the help text for // the CLI command. 64 @@ -1826,7 +1718,7 @@ fn validator_monitor_metrics_threshold_custom() { ) .run_with_zero_port() .with_config(|config| { - assert_eq!(config.validator_monitor_individual_tracking_threshold, 42) + assert_eq!(config.validator_monitor.individual_tracking_threshold, 42) }); } @@ -2472,7 +2364,7 @@ fn gui_flag() { .run_with_zero_port() .with_config(|config| { assert!(config.http_api.enabled); - assert!(config.validator_monitor_auto); + assert!(config.validator_monitor.auto_register); }); } diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 4420a980616..14d65468a78 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -101,12 +101,6 @@ fn beacon_nodes_flag() { }); } -#[test] -fn allow_unsynced_flag() { - // No-op, but doesn't crash. - CommandLineTest::new().flag("allow-unsynced", None).run(); -} - #[test] fn disable_auto_discover_flag() { CommandLineTest::new() diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 4f54154d1d2..87565b0cae6 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -5,58 +5,105 @@ This setup can be useful for testing and development. ## Requirements -The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH`. +The scripts require `lcli`, `lighthouse`, `geth`, `bootnode` to be installed on `PATH` (run `echo $PATH` to view all `PATH` directories). MacOS users need to install GNU `sed` and GNU `grep`, and add them both to `PATH` as well. -From the -root of this repository, run: +The first step is to install Rust and dependencies. Refer to the [Lighthouse Book](https://lighthouse-book.sigmaprime.io/installation-source.html#dependencies) for installation. We will also need [jq](https://jqlang.github.io/jq/), which can be installed with `sudo apt install jq`. + +Then, we clone the Lighthouse repository: +```bash +cd ~ +git clone https://github.com/sigp/lighthouse.git +cd lighthouse +``` +We are now ready to build Lighthouse. Run the command: ```bash make make install-lcli ``` +This will build `lighthouse` and `lcli`. For `geth` and `bootnode`, go to [geth website](https://geth.ethereum.org/downloads) and download the `Geth & Tools`. For example, to download and extract `Geth & Tools 1.13.1`: + +```bash +cd ~ +curl -LO https://gethstore.blob.core.windows.net/builds/geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz +tar xvf geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz +``` + +After extraction, copy `geth` and `bootnode` to the `PATH`. A typical directory is `/usr/local/bin`. + +```bash +cd geth-alltools-linux-amd64-1.13.1-3f40e65c +sudo cp geth bootnode /usr/local/bin +``` + +After that We can remove the downloaded files: + +```bash +cd ~ +rm -r geth-alltools-linux-amd64-1.13.1-3f40e65c geth-alltools-linux-amd64-1.13.1-3f40e65c.tar.gz +``` + +We are now ready to start a local testnet. + ## Starting the testnet -Modify `vars.env` as desired. +To start a testnet using the predetermined settings: + +```bash +cd ~ +cd ./lighthouse/scripts/local_testnet +./start_local_testnet.sh genesis.json +``` + +This will execute the script and if the testnet setup is successful, you will see "Started!" at the end. The testnet starts with a post-merge genesis state. -Start a consensus layer and execution layer boot node along with `BN_COUNT` -number of beacon nodes each connected to a geth execution client and `VC_COUNT` validator clients. +The testnet starts a consensus layer and execution layer boot node along with `BN_COUNT` +(the number of beacon nodes) each connected to a geth execution client and `VC_COUNT` (the number of validator clients). By default, `BN_COUNT=4`, `VC_COUNT=4`. The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help. It also takes a mandatory `GENESIS_FILE` for initialising geth's state. A sample `genesis.json` is provided in this directory. -The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified. - The options may be in any order or absent in which case they take the default value specified. - VC_COUNT: the number of validator clients to create, default: `BN_COUNT` - DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info` +The `ETH1_BLOCK_HASH` environment variable is set to the block_hash of the genesis execution layer block which depends on the contents of `genesis.json`. Users of these scripts need to ensure that the `ETH1_BLOCK_HASH` variable is updated if genesis file is modified. +To view the beacon, validator client and geth logs: ```bash -./start_local_testnet.sh genesis.json +tail -f ~/.lighthouse/local-testnet/testnet/beacon_node_1.log +taif -f ~/.lighthouse/local-testnet/testnet/validator_node_1.log +tail -f ~/.lighthouse/local-testnet/testnet/geth_1.log ``` +where `beacon_node_1` can be changed to `beacon_node_2`, `beacon_node_3` or `beacon_node_4` to view logs for different beacon nodes. The same applies to validator clients and geth nodes. + ## Stopping the testnet -This is not necessary before `start_local_testnet.sh` as it invokes `stop_local_testnet.sh` automatically. +To stop the testnet, navigate to the directory `cd ~/lighthouse/scripts/local_testnet`, then run the command: + ```bash ./stop_local_testnet.sh ``` +Once a testnet is stopped, it cannot be continued from where it left off. When the start local testnet command is run, it will start a new local testnet. + ## Manual creation of local testnet -These scripts are used by ./start_local_testnet.sh and may be used to manually +In [Starting the testnet](./README.md#starting-the-testnet), the testnet is started automatically with predetermined parameters (database directory, ports used etc). This section describes some modifications of the local testnet settings, e.g., changing the database directory, or changing the ports used. -Assuming you are happy with the configuration in `vars.env`, -create the testnet directory, genesis state with embedded validators and validator keys with: + +The testnet also contains parameters that are specified in `vars.env`, such as the slot time `SECONDS_PER_SLOT=3` (instead of 12 seconds on mainnet). You may change these parameters to suit your testing purposes. After that, in the `local_testnet` directory, run the following command to create genesis state with embedded validators and validator keys, and also to update the time in `genesis.json`: ```bash ./setup.sh +./setup_time.sh genesis.json ``` Note: The generated genesis validators are embedded into the genesis state as genesis validators and hence do not require manual deposits to activate. @@ -73,17 +120,17 @@ Start a geth node: ``` e.g. ```bash -./geth.sh $HOME/.lighthouse/local-testnet/geth_1 5000 6000 7000 genesis.json +./geth.sh $HOME/.lighthouse/local-testnet/geth_1 7001 6001 5001 genesis.json ``` Start a beacon node: ```bash -./beacon_node.sh +./beacon_node.sh ``` e.g. ```bash -./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9000 8000 http://localhost:6000 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret +./beacon_node.sh $HOME/.lighthouse/local-testnet/node_1 9001 9101 8001 http://localhost:5001 ~/.lighthouse/local-testnet/geth_1/geth/jwtsecret ``` In a new terminal, start the validator client which will attach to the first @@ -94,10 +141,16 @@ beacon node: ``` e.g. to attach to the above created beacon node ```bash -./validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8000 +./validator_client.sh $HOME/.lighthouse/local-testnet/node_1 http://localhost:8001 ``` -You can create additional beacon node and validator client instances with appropriate parameters. +You can create additional geth, beacon node and validator client instances by changing the ports, e.g., for a second geth, beacon node and validator client: + +```bash +./geth.sh $HOME/.lighthouse/local-testnet/geth_2 7002 6002 5002 genesis.json +./beacon_node.sh $HOME/.lighthouse/local-testnet/node_2 9002 9102 8002 http://localhost:5002 ~/.lighthouse/local-testnet/geth_2/geth/jwtsecret +./validator_client.sh $HOME/.lighthouse/local-testnet/node_2 http://localhost:8002 +``` ## Additional Info @@ -109,7 +162,7 @@ instances using the `--datadir` parameter. ### Starting fresh -Delete the current testnet and all related files using. Generally not necessary as `start_local_test.sh` does this each time it starts. +You can delete the current testnet and all related files using the following command. Alternatively, if you wish to start another testnet, doing the steps [Starting the testnet](./README.md#starting-the-testnet) will automatically delete the files and start a fresh local testnet. ```bash ./clean.sh @@ -131,12 +184,12 @@ Update the genesis time to now using: ### Testing builder flow -1. Add builder URL to `BN_ARGS` in `./var.env`, e.g. `--builder http://localhost:8650`. Some mock builder server options: +1. Add builder URL to `BN_ARGS` in `./vars.env`, e.g. `--builder http://localhost:8650`. Some mock builder server options: - [`mock-relay`](https://github.com/realbigsean/mock-relay) - [`dummy-builder`](https://github.com/michaelsproul/dummy_builder) 2. (Optional) Add `--always-prefer-builder-payload` to `BN_ARGS`. 3. The above mock builders do not support non-mainnet presets as of now, and will require setting `SECONDS_PER_SLOT` and `SECONDS_PER_ETH1_BLOCK` to `12` in `./vars.env`. -4. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag: +4. Start the testnet with the following command (the `-p` flag enables the validator client `--builder-proposals` flag): ```bash ./start_local_testnet.sh -p genesis.json ``` diff --git a/scripts/local_testnet/el_bootnode.sh b/scripts/local_testnet/el_bootnode.sh index d73a463f6d1..ee437a491c9 100755 --- a/scripts/local_testnet/el_bootnode.sh +++ b/scripts/local_testnet/el_bootnode.sh @@ -1,3 +1,3 @@ priv_key="02fd74636e96a8ffac8e7b01b0de8dea94d6bcf4989513b38cf59eb32163ff91" source ./vars.env -$EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file +exec $EL_BOOTNODE_BINARY --nodekeyhex $priv_key \ No newline at end of file diff --git a/scripts/local_testnet/setup.sh b/scripts/local_testnet/setup.sh index 7e000251a29..d7a6016aa80 100755 --- a/scripts/local_testnet/setup.sh +++ b/scripts/local_testnet/setup.sh @@ -32,7 +32,7 @@ lcli \ --ttd $TTD \ --eth1-block-hash $ETH1_BLOCK_HASH \ --eth1-id $CHAIN_ID \ - --eth1-follow-distance 1 \ + --eth1-follow-distance 128 \ --seconds-per-slot $SECONDS_PER_SLOT \ --seconds-per-eth1-block $SECONDS_PER_ETH1_BLOCK \ --proposer-score-boost "$PROPOSER_SCORE_BOOST" \ diff --git a/scripts/local_testnet/setup_time.sh b/scripts/local_testnet/setup_time.sh new file mode 100755 index 00000000000..21a8ae7ac15 --- /dev/null +++ b/scripts/local_testnet/setup_time.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -Eeuo pipefail + +source ./vars.env + +# Function to output SLOT_PER_EPOCH for mainnet or minimal +get_spec_preset_value() { + case "$SPEC_PRESET" in + mainnet) echo 32 ;; + minimal) echo 8 ;; + gnosis) echo 16 ;; + *) echo "Unsupported preset: $SPEC_PRESET" >&2; exit 1 ;; + esac +} + +SLOT_PER_EPOCH=$(get_spec_preset_value $SPEC_PRESET) +echo "slot_per_epoch=$SLOT_PER_EPOCH" + +genesis_file=$1 + +# Update future hardforks time in the EL genesis file based on the CL genesis time +GENESIS_TIME=$(lcli pretty-ssz --spec $SPEC_PRESET --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') +echo $GENESIS_TIME +CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) +echo $CAPELLA_TIME +sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file +CANCUN_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * $SLOT_PER_EPOCH * SECONDS_PER_SLOT))) +echo $CANCUN_TIME +sed -i 's/"cancunTime".*$/"cancunTime": '"$CANCUN_TIME"',/g' $genesis_file +cat $genesis_file + diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index c796050bc42..512b1e98d16 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -102,16 +102,8 @@ execute_command_add_PID() { echo "executing: ./setup.sh >> $LOG_DIR/setup.log" ./setup.sh >> $LOG_DIR/setup.log 2>&1 -# Update future hardforks time in the EL genesis file based on the CL genesis time -GENESIS_TIME=$(lcli pretty-ssz --spec $SPEC_PRESET --testnet-dir $TESTNET_DIR BeaconState $TESTNET_DIR/genesis.ssz | jq | grep -Po 'genesis_time": "\K.*\d') -echo $GENESIS_TIME -CAPELLA_TIME=$((GENESIS_TIME + (CAPELLA_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) -echo $CAPELLA_TIME -sed -i 's/"shanghaiTime".*$/"shanghaiTime": '"$CAPELLA_TIME"',/g' $genesis_file -CANCUN_TIME=$((GENESIS_TIME + (DENEB_FORK_EPOCH * 32 * SECONDS_PER_SLOT))) -echo $CANCUN_TIME -sed -i 's/"cancunTime".*$/"cancunTime": '"$CANCUN_TIME"',/g' $genesis_file -cat $genesis_file +# Call setup_time.sh to update future hardforks time in the EL genesis file based on the CL genesis time +./setup_time.sh genesis.json # Delay to let boot_enr.yaml to be created execute_command_add_PID bootnode.log ./bootnode.sh diff --git a/scripts/local_testnet/vars.env b/scripts/local_testnet/vars.env index d04a2354979..31274d2c575 100644 --- a/scripts/local_testnet/vars.env +++ b/scripts/local_testnet/vars.env @@ -56,7 +56,7 @@ SPEC_PRESET=mainnet SECONDS_PER_SLOT=3 # Seconds per Eth1 block -SECONDS_PER_ETH1_BLOCK=1 +SECONDS_PER_ETH1_BLOCK=3 # Proposer score boost percentage PROPOSER_SCORE_BOOST=40 diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 3b9235cc4e0..29f5c015e38 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -127,7 +127,7 @@ vectors_and_tests!( ExitTest { block_modifier: Box::new(|_, block| { // Duplicate the exit - let exit = block.body().voluntary_exits().get(0).unwrap().clone(); + let exit = block.body().voluntary_exits().first().unwrap().clone(); block.body_mut().voluntary_exits_mut().push(exit).unwrap(); }), expected: Err(BlockProcessingError::ExitInvalid { diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 4aa0eb6f987..e0a80055864 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -8,15 +8,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { "When connected to a beacon node, performs the duties of a staked \ validator (e.g., proposing blocks and attestations).", ) - // This argument is deprecated, use `--beacon-nodes` instead. - .arg( - Arg::with_name("beacon-node") - .long("beacon-node") - .value_name("NETWORK_ADDRESS") - .help("Deprecated. Use --beacon-nodes.") - .takes_value(true) - .conflicts_with("beacon-nodes"), - ) .arg( Arg::with_name("beacon-nodes") .long("beacon-nodes") @@ -45,15 +36,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { api calls only go out to the first available and synced beacon node") .takes_value(false) ) - // This argument is deprecated, use `--beacon-nodes` instead. - .arg( - Arg::with_name("server") - .long("server") - .value_name("NETWORK_ADDRESS") - .help("Deprecated. Use --beacon-nodes.") - .takes_value(true) - .conflicts_with_all(&["beacon-node", "beacon-nodes"]), - ) .arg( Arg::with_name("validators-dir") .long("validators-dir") @@ -80,13 +62,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .conflicts_with("datadir") ) - .arg( - Arg::with_name("delete-lockfiles") - .long("delete-lockfiles") - .help( - "DEPRECATED. This flag does nothing and will be removed in a future release." - ) - ) .arg( Arg::with_name("init-slashing-protection") .long("init-slashing-protection") @@ -106,11 +81,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { will need to be manually added to the validator_definitions.yml file." ) ) - .arg( - Arg::with_name("allow-unsynced") - .long("allow-unsynced") - .help("DEPRECATED: this flag does nothing"), - ) .arg( Arg::with_name("use-long-timeouts") .long("use-long-timeouts") @@ -327,18 +297,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { headers during proposals and will sign over headers. Useful for outsourcing \ execution payload construction during proposals.") .takes_value(false), - ).arg( - Arg::with_name("strict-fee-recipient") - .long("strict-fee-recipient") - .help("[DEPRECATED] If this flag is set, Lighthouse will refuse to sign any block whose \ - `fee_recipient` does not match the `suggested_fee_recipient` sent by this validator. \ - This applies to both the normal block proposal flow, as well as block proposals \ - through the builder API. Proposals through the builder API are more likely to have a \ - discrepancy in `fee_recipient` so you should be aware of how your connected relay \ - sends proposer payments before using this flag. If this flag is used, a fee recipient \ - mismatch in the builder API flow will result in a fallback to the local execution engine \ - for payload construction, where a strict fee recipient check will still be applied.") - .takes_value(false), ) .arg( Arg::with_name("builder-registration-timestamp-override") diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 09fd612800b..808f1f805bb 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -9,7 +9,7 @@ use directory::{ use eth2::types::Graffiti; use sensitive_url::SensitiveUrl; use serde::{Deserialize, Serialize}; -use slog::{info, warn, Logger}; +use slog::{info, Logger}; use std::fs; use std::net::IpAddr; use std::path::PathBuf; @@ -174,27 +174,6 @@ impl Config { .collect::>() .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?; } - // To be deprecated. - else if let Some(beacon_node) = parse_optional::(cli_args, "beacon-node")? { - warn!( - log, - "The --beacon-node flag is deprecated"; - "msg" => "please use --beacon-nodes instead" - ); - config.beacon_nodes = vec![SensitiveUrl::parse(&beacon_node) - .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?]; - } - // To be deprecated. - else if let Some(server) = parse_optional::(cli_args, "server")? { - warn!( - log, - "The --server flag is deprecated"; - "msg" => "please use --beacon-nodes instead" - ); - config.beacon_nodes = vec![SensitiveUrl::parse(&server) - .map_err(|e| format!("Unable to parse beacon node URL: {:?}", e))?]; - } - if let Some(proposer_nodes) = parse_optional::(cli_args, "proposer_nodes")? { config.proposer_nodes = proposer_nodes .split(',') @@ -203,21 +182,6 @@ impl Config { .map_err(|e| format!("Unable to parse proposer node URL: {:?}", e))?; } - if cli_args.is_present("delete-lockfiles") { - warn!( - log, - "The --delete-lockfiles flag is deprecated"; - "msg" => "it is no longer necessary, and no longer has any effect", - ); - } - - if cli_args.is_present("allow-unsynced") { - warn!( - log, - "The --allow-unsynced flag is deprecated"; - "msg" => "it no longer has any effect", - ); - } config.disable_run_on_all = cli_args.is_present("disable-run-on-all"); config.disable_auto_discover = cli_args.is_present("disable-auto-discover"); config.init_slashing_protection = cli_args.is_present("init-slashing-protection"); @@ -387,14 +351,6 @@ impl Config { ); } - if cli_args.is_present("strict-fee-recipient") { - warn!( - log, - "The flag `--strict-fee-recipient` has been deprecated due to a bug causing \ - missed proposals. The flag will be ignored." - ); - } - config.enable_latency_measurement_service = parse_optional(cli_args, "latency-measurement-service")?.unwrap_or(true); diff --git a/watch/src/blockprint/mod.rs b/watch/src/blockprint/mod.rs index b8107e5bf58..532776f425a 100644 --- a/watch/src/blockprint/mod.rs +++ b/watch/src/blockprint/mod.rs @@ -17,7 +17,7 @@ pub use config::Config; pub use database::{ get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint, - list_consensus_clients, WatchBlockprint, + WatchBlockprint, }; pub use server::blockprint_routes; diff --git a/watch/src/database/mod.rs b/watch/src/database/mod.rs index b9a7a900a59..841ebe5ee7b 100644 --- a/watch/src/database/mod.rs +++ b/watch/src/database/mod.rs @@ -26,24 +26,29 @@ pub use self::error::Error; pub use self::models::{WatchBeaconBlock, WatchCanonicalSlot, WatchProposerInfo, WatchValidator}; pub use self::watch_types::{WatchHash, WatchPK, WatchSlot}; +// Clippy has false positives on these re-exports from Rust 1.75.0-beta.1. +#[allow(unused_imports)] pub use crate::block_rewards::{ get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards, get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards, WatchBlockRewards, }; +#[allow(unused_imports)] pub use crate::block_packing::{ get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing, get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing, WatchBlockPacking, }; +#[allow(unused_imports)] pub use crate::suboptimal_attestations::{ get_all_suboptimal_attestations_for_epoch, get_attestation_by_index, get_attestation_by_pubkey, get_highest_attestation, get_lowest_attestation, insert_batch_suboptimal_attestations, WatchAttestation, WatchSuboptimalAttestation, }; +#[allow(unused_imports)] pub use crate::blockprint::{ get_blockprint_by_root, get_blockprint_by_slot, get_highest_blockprint, get_lowest_blockprint, get_unknown_blockprint, get_validators_clients_at_slot, insert_batch_blockprint,