diff --git a/.gitignore b/.gitignore index 59be402e1..1e6fed2cd 100644 --- a/.gitignore +++ b/.gitignore @@ -4,5 +4,6 @@ web-root /rust-toolchain /.vscode/ **/db-* -/consensus/tests/testdata/goref-1.6M-tx-10K-blocks.json.gz +/consensus/tests/testdata/dags_for_json_tests/goref-mainnet +/consensus/tests/testdata/dags_for_json_tests/goref-1.6M-tx-10K-blocks analyzer-target diff --git a/Cargo.lock b/Cargo.lock index ca95eab13..00b390ceb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1572,7 +1572,7 @@ dependencies = [ name = "kaspa-utils" version = "0.1.0" dependencies = [ - "async-std", + "async-channel", "triggered", ] @@ -2512,7 +2512,7 @@ version = "0.1.0" dependencies = [ "addresses", "ahash 0.8.2", - "async-std", + "async-channel", "async-trait", "borsh", "cfg-if", @@ -2520,6 +2520,7 @@ dependencies = [ "derive_more", "faster-hex", "futures", + "futures-util", "hashes", "kaspa-core", "kaspa-utils", @@ -2537,7 +2538,7 @@ dependencies = [ name = "rpc-grpc" version = "0.1.0" dependencies = [ - "async-std", + "async-channel", "async-trait", "faster-hex", "futures", @@ -2782,6 +2783,7 @@ dependencies = [ "indexmap", "itertools 0.10.5", "kaspa-core", + "log", "rand 0.8.5", "rand_distr", "rayon", diff --git a/Cargo.toml b/Cargo.toml index 2c902f178..c136f3f0c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,8 +62,8 @@ criterion = { version = "0.4", default-features = false } indexmap = "1.9.1" smallvec = { version = "1.10.0", features = ["serde"] } borsh = "0.9.3" -clap = { version = "4.0.23", features = ["derive"] } -async-std = { version = "1.12.0", features = ['attributes'] } +clap = { version = "4.0.23", features = ["derive", "string"] } +async-channel = "1.8.0" derive_more = { version = "0.99" } log = "0.4" cfg-if = "1.0.0" diff --git a/consensus/core/src/lib.rs b/consensus/core/src/lib.rs index 83d2f8738..4606b1a8b 100644 --- a/consensus/core/src/lib.rs +++ b/consensus/core/src/lib.rs @@ -17,6 +17,7 @@ pub mod merkle; pub mod muhash; pub mod networktype; pub mod notify; +pub mod pruning; pub mod sign; pub mod subnets; pub mod tx; @@ -101,6 +102,8 @@ impl BuildHasher for BlockHasher { } } +pub type BlockLevel = u8; + #[cfg(test)] mod tests { use super::BlockHasher; diff --git a/consensus/core/src/muhash.rs b/consensus/core/src/muhash.rs index a570f9307..dde5b9436 100644 --- a/consensus/core/src/muhash.rs +++ b/consensus/core/src/muhash.rs @@ -7,6 +7,7 @@ use muhash::MuHash; pub trait MuHashExtensions { fn add_transaction(&mut self, tx: &impl VerifiableTransaction, block_daa_score: u64); + fn add_utxo(&mut self, outpoint: &TransactionOutpoint, entry: &UtxoEntry); } impl MuHashExtensions for MuHash { @@ -20,11 +21,15 @@ impl MuHashExtensions for MuHash { for (i, output) in tx.outputs().iter().enumerate() { let outpoint = TransactionOutpoint::new(tx_id, i as u32); let entry = UtxoEntry::new(output.value, output.script_public_key.clone(), block_daa_score, tx.is_coinbase()); - let mut writer = self.add_element_builder(); - write_utxo(&mut writer, &entry, &outpoint); - writer.finalize(); + self.add_utxo(&outpoint, &entry); } } + + fn add_utxo(&mut self, outpoint: &TransactionOutpoint, entry: &UtxoEntry) { + let mut writer = self.add_element_builder(); + write_utxo(&mut writer, entry, outpoint); + writer.finalize(); + } } fn write_utxo(writer: &mut impl HasherBase, entry: &UtxoEntry, outpoint: &TransactionOutpoint) { diff --git a/consensus/core/src/pruning.rs b/consensus/core/src/pruning.rs new file mode 100644 index 000000000..79a5e0809 --- /dev/null +++ b/consensus/core/src/pruning.rs @@ -0,0 +1,5 @@ +use std::sync::Arc; + +use crate::header::Header; + +pub type PruningPointProof = Vec>>; diff --git a/consensus/src/config.rs b/consensus/src/config.rs new file mode 100644 index 000000000..bf8824506 --- /dev/null +++ b/consensus/src/config.rs @@ -0,0 +1,82 @@ +use std::ops::Deref; + +use crate::{ + constants::perf::{PerfParams, PERF_PARAMS}, + params::Params, +}; + +/// Various consensus configurations all bundled up under a single struct. Use `Config::new` for directly building from +/// a `Params` instance. For anything more complex it is recommended to use `ConfigBuilder`. NOTE: this struct can be +/// implicitly de-refed into `Params` +#[derive(Clone)] +pub struct Config { + /// Consensus params + pub params: Params, + /// Performance params + pub perf: PerfParams, + + // + // Additional consensus configuration arguments which are not consensus sensitive + // + pub process_genesis: bool, + // TODO: + // is_archival: bool, + // enable_sanity_check_pruning_utxoset: bool, +} + +impl Config { + pub fn new(params: Params) -> Self { + Self { params, perf: PERF_PARAMS, process_genesis: true } + } +} + +impl AsRef for Config { + fn as_ref(&self) -> &Params { + &self.params + } +} + +impl Deref for Config { + type Target = Params; + + fn deref(&self) -> &Self::Target { + &self.params + } +} + +pub struct ConfigBuilder { + config: Config, +} + +impl ConfigBuilder { + pub fn new(params: Params) -> Self { + Self { config: Config::new(params) } + } + + pub fn set_perf_params(mut self, perf: PerfParams) -> Self { + self.config.perf = perf; + self + } + + pub fn edit_consensus_params(mut self, edit_func: F) -> Self + where + F: Fn(&mut Params), + { + edit_func(&mut self.config.params); + self + } + + pub fn skip_proof_of_work(mut self) -> Self { + self.config.params.skip_proof_of_work = true; + self + } + + pub fn skip_adding_genesis(mut self) -> Self { + self.config.process_genesis = false; + self + } + + pub fn build(self) -> Config { + self.config + } +} diff --git a/consensus/src/consensus/mod.rs b/consensus/src/consensus/mod.rs index 77c9e0e27..3785df3f1 100644 --- a/consensus/src/consensus/mod.rs +++ b/consensus/src/consensus/mod.rs @@ -1,10 +1,8 @@ pub mod test_consensus; use crate::{ - constants::{ - perf::{PerfParams, PERF_PARAMS}, - store_names, - }, + config::Config, + constants::store_names, errors::{BlockProcessResult, RuleError}, model::{ services::{reachability::MTReachabilityService, relations::MTRelationsService, statuses::MTStatusesService}, @@ -14,7 +12,7 @@ use crate::{ block_window_cache::BlockWindowCacheStore, daa::DbDaaStore, depth::DbDepthStore, - ghostdag::DbGhostdagStore, + ghostdag::{DbGhostdagStore, GhostdagData}, headers::DbHeadersStore, headers_selected_tip::DbHeadersSelectedTipStore, past_pruning_points::DbPastPruningPointsStore, @@ -25,40 +23,48 @@ use crate::{ tips::{DbTipsStore, TipsStoreReader}, utxo_diffs::DbUtxoDiffsStore, utxo_multisets::DbUtxoMultisetsStore, - utxo_set::DbUtxoSetStore, + utxo_set::{DbUtxoSetStore, UtxoSetStore}, virtual_state::{DbVirtualStateStore, VirtualStateStoreReader}, DB, }, }, - params::Params, pipeline::{ body_processor::BlockBodyProcessor, - deps_manager::{BlockResultSender, BlockTask}, + deps_manager::{BlockProcessingMessage, BlockResultSender, BlockTask}, header_processor::HeaderProcessor, - virtual_processor::VirtualStateProcessor, + virtual_processor::{errors::VirtualProcessorResult, VirtualStateProcessor}, ProcessingCounters, }, processes::{ block_depth::BlockDepthManager, coinbase::CoinbaseManager, difficulty::DifficultyManager, ghostdag::protocol::GhostdagManager, mass::MassCalculator, parents_builder::ParentsManager, past_median_time::PastMedianTimeManager, pruning::PruningManager, - reachability::inquirer as reachability, transaction_validator::TransactionValidator, traversal_manager::DagTraversalManager, + pruning_proof::PruningProofManager, reachability::inquirer as reachability, transaction_validator::TransactionValidator, + traversal_manager::DagTraversalManager, }, }; use consensus_core::{ api::ConsensusApi, - block::{Block, BlockTemplate}, - blockstatus::BlockStatus, - coinbase::MinerData, - errors::{coinbase::CoinbaseResult, tx::TxResult}, - tx::{MutableTransaction, Transaction}, - BlockHashSet, + header::Header, + muhash::MuHashExtensions, + pruning::PruningPointProof, + tx::{TransactionOutpoint, UtxoEntry}, + { + block::{Block, BlockTemplate}, + blockstatus::BlockStatus, + coinbase::MinerData, + errors::{coinbase::CoinbaseResult, tx::TxResult}, + tx::{MutableTransaction, Transaction}, + BlockHashSet, + }, }; use crossbeam_channel::{unbounded, Receiver, Sender}; use futures_util::future::BoxFuture; use hashes::Hash; +use itertools::Itertools; use kaspa_core::{core::Core, service::Service}; +use muhash::MuHash; use parking_lot::RwLock; -use std::{future::Future, sync::atomic::Ordering}; +use std::{cmp::max, future::Future, sync::atomic::Ordering}; use std::{ ops::DerefMut, sync::Arc, @@ -86,7 +92,7 @@ pub struct Consensus { db: Arc, // Channels - block_sender: Sender, + block_sender: Sender, // Processors header_processor: Arc, @@ -95,13 +101,14 @@ pub struct Consensus { // Stores statuses_store: Arc>, - pub relations_store: Arc>, + pub relations_stores: Arc>>, reachability_store: Arc>, pruning_store: Arc>, headers_selected_tip_store: Arc>, body_tips_store: Arc>, pub headers_store: Arc, pub block_transactions_store: Arc, + pruning_point_utxo_set_store: Arc, // TODO: remove all pub from stores and processors when StoreManager is implemented // Append-only stores @@ -117,17 +124,16 @@ pub struct Consensus { pub(super) past_median_time_manager: PastMedianTimeManager, pub(super) coinbase_manager: CoinbaseManager, pub(super) pruning_manager: PruningManager, + pub(super) pruning_proof_manager: PruningProofManager, // Counters pub counters: Arc, } impl Consensus { - pub fn new(db: Arc, params: &Params) -> Self { - Self::with_perf_params(db, params, &PERF_PARAMS) - } - - pub fn with_perf_params(db: Arc, params: &Params, perf_params: &PerfParams) -> Self { + pub fn new(db: Arc, config: &Config) -> Self { + let params = &config.params; + let perf_params = &config.perf; // // Stores // @@ -137,17 +143,35 @@ impl Consensus { // Headers let statuses_store = Arc::new(RwLock::new(DbStatusesStore::new(db.clone(), pruning_plus_finality_size_for_caches))); - let relations_store = Arc::new(RwLock::new(DbRelationsStore::new(db.clone(), pruning_plus_finality_size_for_caches))); - let reachability_store = - Arc::new(RwLock::new(DbReachabilityStore::new(db.clone(), pruning_plus_finality_size_for_caches * 2))); - let ghostdag_store = Arc::new(DbGhostdagStore::new(db.clone(), pruning_plus_finality_size_for_caches)); + let relations_stores = Arc::new(RwLock::new( + (0..=params.max_block_level) + .map(|level| { + let cache_size = + max(pruning_plus_finality_size_for_caches.checked_shr(level as u32).unwrap_or(0), 2 * params.pruning_proof_m); + DbRelationsStore::new(db.clone(), level, cache_size) + }) + .collect_vec(), + )); + let reachability_store = Arc::new(RwLock::new(DbReachabilityStore::new(db.clone(), pruning_plus_finality_size_for_caches))); + let ghostdag_stores = (0..=params.max_block_level) + .map(|level| { + let cache_size = + max(pruning_plus_finality_size_for_caches.checked_shr(level as u32).unwrap_or(0), 2 * params.pruning_proof_m); + Arc::new(DbGhostdagStore::new(db.clone(), level, cache_size)) + }) + .collect_vec(); + let ghostdag_store = ghostdag_stores[0].clone(); let daa_excluded_store = Arc::new(DbDaaStore::new(db.clone(), pruning_size_for_caches)); let headers_store = Arc::new(DbHeadersStore::new(db.clone(), perf_params.header_data_cache_size)); let depth_store = Arc::new(DbDepthStore::new(db.clone(), perf_params.header_data_cache_size)); // Pruning let pruning_store = Arc::new(RwLock::new(DbPruningStore::new(db.clone()))); let past_pruning_points_store = Arc::new(DbPastPruningPointsStore::new(db.clone(), 4)); + let pruning_point_utxo_set_store = + Arc::new(DbUtxoSetStore::new(db.clone(), perf_params.utxo_set_cache_size, store_names::PRUNING_UTXO_SET)); + // Block data + let block_transactions_store = Arc::new(DbBlockTransactionsStore::new(db.clone(), perf_params.block_data_cache_size)); let utxo_diffs_store = Arc::new(DbUtxoDiffsStore::new(db.clone(), perf_params.block_data_cache_size)); let utxo_multisets_store = Arc::new(DbUtxoMultisetsStore::new(db.clone(), perf_params.block_data_cache_size)); @@ -169,7 +193,9 @@ impl Consensus { // let statuses_service = MTStatusesService::new(statuses_store.clone()); - let relations_service = MTRelationsService::new(relations_store.clone()); + let relations_services = + (0..=params.max_block_level).map(|level| MTRelationsService::new(relations_stores.clone(), level)).collect_vec(); + let relations_service = relations_services[0].clone(); let reachability_service = MTReachabilityService::new(reachability_store.clone()); let dag_traversal_manager = DagTraversalManager::new( params.genesis_hash, @@ -199,14 +225,22 @@ impl Consensus { reachability_service.clone(), ghostdag_store.clone(), ); - let ghostdag_manager = GhostdagManager::new( - params.genesis_hash, - params.ghostdag_k, - ghostdag_store.clone(), - relations_service.clone(), - headers_store.clone(), - reachability_service.clone(), - ); + let ghostdag_managers = ghostdag_stores + .iter() + .cloned() + .enumerate() + .map(|(level, ghostdag_store)| { + GhostdagManager::new( + params.genesis_hash, + params.ghostdag_k, + ghostdag_store, + relations_services[level].clone(), + headers_store.clone(), + reachability_service.clone(), + ) + }) + .collect_vec(); + let ghostdag_manager = ghostdag_managers[0].clone(); let coinbase_manager = CoinbaseManager::new( params.coinbase_payload_script_public_key_max_len, @@ -243,12 +277,12 @@ impl Consensus { params.genesis_hash, headers_store.clone(), reachability_service.clone(), - relations_store.clone(), + relations_service.clone(), ); - let (sender, receiver): (Sender, Receiver) = unbounded(); - let (body_sender, body_receiver): (Sender, Receiver) = unbounded(); - let (virtual_sender, virtual_receiver): (Sender, Receiver) = unbounded(); + let (sender, receiver): (Sender, Receiver) = unbounded(); + let (body_sender, body_receiver): (Sender, Receiver) = unbounded(); + let (virtual_sender, virtual_receiver): (Sender, Receiver) = unbounded(); let counters = Arc::new(ProcessingCounters::default()); @@ -284,26 +318,27 @@ impl Consensus { body_sender, block_processors_pool.clone(), params, + config.process_genesis, db.clone(), - relations_store.clone(), + relations_stores.clone(), reachability_store.clone(), - ghostdag_store.clone(), + ghostdag_stores.clone(), headers_store.clone(), daa_excluded_store.clone(), statuses_store.clone(), pruning_store.clone(), - depth_store, + depth_store.clone(), headers_selected_tip_store.clone(), block_window_cache_for_difficulty, block_window_cache_for_past_median_time, reachability_service.clone(), - relations_service.clone(), past_median_time_manager.clone(), dag_traversal_manager.clone(), difficulty_manager.clone(), depth_manager.clone(), pruning_manager.clone(), parents_manager.clone(), + ghostdag_managers.clone(), counters.clone(), )); @@ -324,12 +359,14 @@ impl Consensus { past_median_time_manager.clone(), params.max_block_mass, params.genesis_hash, + config.process_genesis, )); let virtual_processor = Arc::new(VirtualStateProcessor::new( virtual_receiver, virtual_pool, params, + config.process_genesis, db.clone(), statuses_store.clone(), ghostdag_store.clone(), @@ -337,12 +374,13 @@ impl Consensus { daa_excluded_store, block_transactions_store.clone(), pruning_store.clone(), - past_pruning_points_store, + past_pruning_points_store.clone(), body_tips_store.clone(), utxo_diffs_store, utxo_multisets_store, acceptance_data_store, - virtual_stores, + virtual_stores.clone(), + pruning_point_utxo_set_store.clone(), ghostdag_manager.clone(), reachability_service.clone(), relations_service.clone(), @@ -352,10 +390,29 @@ impl Consensus { transaction_validator, past_median_time_manager.clone(), pruning_manager.clone(), - parents_manager, + parents_manager.clone(), depth_manager, )); + let pruning_proof_manager = PruningProofManager::new( + db.clone(), + headers_store.clone(), + reachability_store.clone(), + parents_manager, + reachability_service.clone(), + ghostdag_stores, + relations_stores.clone(), + pruning_store.clone(), + past_pruning_points_store, + virtual_stores, + body_tips_store.clone(), + headers_selected_tip_store.clone(), + depth_store, + ghostdag_managers, + params.max_block_level, + params.genesis_hash, + ); + Self { db, block_sender: sender, @@ -363,7 +420,7 @@ impl Consensus { body_processor, virtual_processor, statuses_store, - relations_store, + relations_stores, reachability_store, ghostdag_store, pruning_store, @@ -371,6 +428,7 @@ impl Consensus { body_tips_store, headers_store, block_transactions_store, + pruning_point_utxo_set_store, statuses_service, relations_service, @@ -381,6 +439,7 @@ impl Consensus { past_median_time_manager, coinbase_manager, pruning_manager, + pruning_proof_manager, counters, } @@ -391,6 +450,7 @@ impl Consensus { reachability::init(self.reachability_store.write().deref_mut()).unwrap(); // Ensure that genesis was processed + self.header_processor.process_origin_if_needed(); self.header_processor.process_genesis_if_needed(); self.body_processor.process_genesis_if_needed(); self.virtual_processor.process_genesis_if_needed(); @@ -407,13 +467,69 @@ impl Consensus { ] } - pub fn validate_and_insert_block(&self, block: Block) -> impl Future> { + pub fn validate_and_insert_block( + &self, + block: Block, + update_virtual: bool, + ) -> impl Future> { + let (tx, rx): (BlockResultSender, _) = oneshot::channel(); + self.block_sender + .send(BlockProcessingMessage::Process(BlockTask { block, trusted_ghostdag_data: None, update_virtual }, vec![tx])) + .unwrap(); + self.counters.blocks_submitted.fetch_add(1, Ordering::SeqCst); + async { rx.await.unwrap() } + } + + pub fn validate_and_insert_trusted_block( + &self, + block: Block, + ghostdag_data: Arc, + ) -> impl Future> { let (tx, rx): (BlockResultSender, _) = oneshot::channel(); - self.block_sender.send(BlockTask::Process(block, vec![tx])).unwrap(); + self.block_sender + .send(BlockProcessingMessage::Process( + BlockTask { block, trusted_ghostdag_data: Some(ghostdag_data), update_virtual: false }, + vec![tx], + )) + .unwrap(); self.counters.blocks_submitted.fetch_add(1, Ordering::SeqCst); async { rx.await.unwrap() } } + pub fn apply_proof(&self, proof: PruningPointProof, trusted_blocks: &[(Block, GhostdagData)]) { + self.pruning_proof_manager.apply_proof(proof, trusted_blocks) + } + + pub fn import_pruning_points(&self, pruning_points: Vec>) { + self.pruning_proof_manager.import_pruning_points(&pruning_points) + } + + pub fn append_imported_pruning_point_utxos( + &self, + outpoint_utxo_pairs: &[(TransactionOutpoint, UtxoEntry)], + current_multiset: &mut MuHash, + ) { + // TODO: Check if a db tx is needed. We probably need some kind of a flag that is set on this function to true, and then + // is set to false on the end of import_pruning_point_utxo_set. On any failure on any of those functions (and also if the + // node starts when the flag is true) the related data will be deleted and the flag will be set to false. + self.pruning_point_utxo_set_store.write_many(outpoint_utxo_pairs).unwrap(); + for (outpoint, entry) in outpoint_utxo_pairs { + current_multiset.add_utxo(outpoint, entry); + } + } + + pub fn import_pruning_point_utxo_set( + &self, + new_pruning_point: Hash, + imported_utxo_multiset: &mut MuHash, + ) -> VirtualProcessorResult<()> { + self.virtual_processor.import_pruning_point_utxo_set(new_pruning_point, imported_utxo_multiset) + } + + pub fn resolve_virtual(&self) { + self.virtual_processor.resolve_virtual() + } + pub fn build_block_template(&self, miner_data: MinerData, txs: Vec) -> Result { self.virtual_processor.build_block_template(miner_data, txs) } @@ -431,7 +547,7 @@ impl Consensus { } pub fn signal_exit(&self) { - self.block_sender.send(BlockTask::Exit).unwrap(); + self.block_sender.send(BlockProcessingMessage::Exit).unwrap(); } pub fn shutdown(&self, wait_handles: Vec>) { @@ -451,9 +567,10 @@ impl ConsensusApi for Consensus { fn validate_and_insert_block( self: Arc, block: Block, - _update_virtual: bool, + update_virtual: bool, ) -> BoxFuture<'static, BlockProcessResult> { - Box::pin(self.as_ref().validate_and_insert_block(block)) + let result = self.as_ref().validate_and_insert_block(block, update_virtual); + Box::pin(async move { result.await }) } fn validate_mempool_transaction_and_populate(self: Arc, transaction: &mut MutableTransaction) -> TxResult<()> { diff --git a/consensus/src/consensus/test_consensus.rs b/consensus/src/consensus/test_consensus.rs index f435e4085..aed229682 100644 --- a/consensus/src/consensus/test_consensus.rs +++ b/consensus/src/consensus/test_consensus.rs @@ -24,6 +24,7 @@ use parking_lot::RwLock; use std::future::Future; use crate::{ + config::Config, constants::TX_VERSION, errors::BlockProcessResult, model::stores::{ @@ -43,23 +44,23 @@ use crate::{ use super::{Consensus, DbGhostdagManager}; pub struct TestConsensus { - consensus: Arc, + pub consensus: Arc, pub params: Params, temp_db_lifetime: TempDbLifetime, } impl TestConsensus { - pub fn new(db: Arc, params: &Params) -> Self { - Self { consensus: Arc::new(Consensus::new(db, params)), params: params.clone(), temp_db_lifetime: Default::default() } + pub fn new(db: Arc, config: &Config) -> Self { + Self { consensus: Arc::new(Consensus::new(db, config)), params: config.params.clone(), temp_db_lifetime: Default::default() } } pub fn consensus(&self) -> Arc { self.consensus.clone() } - pub fn create_from_temp_db(params: &Params) -> Self { + pub fn create_from_temp_db(config: &Config) -> Self { let (temp_db_lifetime, db) = create_temp_db(); - Self { consensus: Arc::new(Consensus::new(db, params)), params: params.clone(), temp_db_lifetime } + Self { consensus: Arc::new(Consensus::new(db, config)), params: config.params.clone(), temp_db_lifetime } } pub fn build_header_with_parents(&self, hash: Hash, parents: Vec) -> Header { @@ -111,7 +112,7 @@ impl TestConsensus { } pub fn validate_and_insert_block(&self, block: Block) -> impl Future> { - self.consensus.as_ref().validate_and_insert_block(block) + self.consensus.as_ref().validate_and_insert_block(block, true) } pub fn init(&self) -> Vec> { @@ -245,6 +246,15 @@ impl Drop for TempDbLifetime { } } +fn create_db_with_custom_options(db_path: PathBuf, create_if_missing: bool) -> Arc { + let mut opts = rocksdb::Options::default(); + // Set parallelism to 3 as an heuristic for header/block/virtual processing + opts.increase_parallelism(3); + opts.create_if_missing(create_if_missing); + let db = Arc::new(DB::open(&opts, db_path.to_str().unwrap()).unwrap()); + db +} + /// Creates a DB within a temp directory under `/kaspa-rust` /// Callers must keep the `TempDbLifetime` guard for as long as they wish the DB to exist. pub fn create_temp_db() -> (TempDbLifetime, Arc) { @@ -253,7 +263,7 @@ pub fn create_temp_db() -> (TempDbLifetime, Arc) { fs::create_dir_all(kaspa_tempdir.as_path()).unwrap(); let db_tempdir = tempfile::tempdir_in(kaspa_tempdir.as_path()).unwrap(); let db_path = db_tempdir.path().to_owned(); - let db = Arc::new(DB::open_default(db_path.to_str().unwrap()).unwrap()); + let db = create_db_with_custom_options(db_path, true); (TempDbLifetime::new(db_tempdir, Arc::downgrade(&db)), db) } @@ -267,7 +277,7 @@ pub fn create_permanent_db(db_path: String) -> (TempDbLifetime, Arc) { _ => panic!("{e}"), } } - let db = Arc::new(DB::open_default(db_dir.to_str().unwrap()).unwrap()); + let db = create_db_with_custom_options(db_dir, true); (TempDbLifetime::without_destroy(Arc::downgrade(&db)), db) } @@ -275,7 +285,6 @@ pub fn create_permanent_db(db_path: String) -> (TempDbLifetime, Arc) { /// Callers must keep the `TempDbLifetime` guard for as long as they wish the DB instance to exist. pub fn load_existing_db(db_path: String) -> (TempDbLifetime, Arc) { let db_dir = PathBuf::from(db_path); - assert!(db_dir.is_dir(), "DB directory {db_dir:?} is expected to exist"); - let db = Arc::new(DB::open_default(db_dir.to_str().unwrap()).unwrap()); + let db = create_db_with_custom_options(db_dir, false); (TempDbLifetime::without_destroy(Arc::downgrade(&db)), db) } diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index 506ccb14f..f6d8ec989 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -2,6 +2,7 @@ // TODO: remove this #![allow(dead_code)] +pub mod config; pub mod consensus; pub mod constants; pub mod errors; diff --git a/consensus/src/model/services/relations.rs b/consensus/src/model/services/relations.rs index 6fff8c094..1e0d19a24 100644 --- a/consensus/src/model/services/relations.rs +++ b/consensus/src/model/services/relations.rs @@ -6,25 +6,26 @@ use std::sync::Arc; /// Multi-threaded block-relations service imp #[derive(Clone)] pub struct MTRelationsService { - store: Arc>, + store: Arc>>, + level: usize, } impl MTRelationsService { - pub fn new(store: Arc>) -> Self { - Self { store } + pub fn new(store: Arc>>, level: u8) -> Self { + Self { store, level: level as usize } } } impl RelationsStoreReader for MTRelationsService { fn get_parents(&self, hash: Hash) -> Result { - self.store.read().get_parents(hash) + self.store.read()[self.level].get_parents(hash) } fn get_children(&self, hash: Hash) -> Result { - self.store.read().get_children(hash) + self.store.read()[self.level].get_children(hash) } fn has(&self, hash: Hash) -> Result { - self.store.read().has(hash) + self.store.read()[self.level].has(hash) } } diff --git a/consensus/src/model/stores/acceptance_data.rs b/consensus/src/model/stores/acceptance_data.rs index 557e134da..bc3f43b43 100644 --- a/consensus/src/model/stores/acceptance_data.rs +++ b/consensus/src/model/stores/acceptance_data.rs @@ -33,7 +33,7 @@ pub struct DbAcceptanceDataStore { impl DbAcceptanceDataStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/block_transactions.rs b/consensus/src/model/stores/block_transactions.rs index 3556081be..b2b7e4c6e 100644 --- a/consensus/src/model/stores/block_transactions.rs +++ b/consensus/src/model/stores/block_transactions.rs @@ -29,7 +29,7 @@ pub struct DbBlockTransactionsStore { impl DbBlockTransactionsStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/daa.rs b/consensus/src/model/stores/daa.rs index b642d17f9..2da043050 100644 --- a/consensus/src/model/stores/daa.rs +++ b/consensus/src/model/stores/daa.rs @@ -29,7 +29,7 @@ pub struct DbDaaStore { impl DbDaaStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/database/access.rs b/consensus/src/model/stores/database/access.rs index 329d04951..0a2db3cdf 100644 --- a/consensus/src/model/stores/database/access.rs +++ b/consensus/src/model/stores/database/access.rs @@ -1,7 +1,8 @@ use super::prelude::{Cache, DbKey, DbWriter}; use crate::model::stores::{errors::StoreError, DB}; +use rocksdb::{Direction, IteratorMode, ReadOptions}; use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::hash_map::RandomState, hash::BuildHasher, sync::Arc}; +use std::{collections::hash_map::RandomState, error::Error, hash::BuildHasher, sync::Arc}; /// A concurrent DB store access with typed caching. #[derive(Clone)] @@ -16,7 +17,7 @@ where cache: Cache, // DB bucket/path - prefix: &'static [u8], + prefix: Vec, } impl CachedDbAccess @@ -25,7 +26,7 @@ where TData: Clone + Send + Sync, S: BuildHasher + Default, { - pub fn new(db: Arc, cache_size: u64, prefix: &'static [u8]) -> Self { + pub fn new(db: Arc, cache_size: u64, prefix: Vec) -> Self { Self { db, cache: Cache::new(cache_size), prefix } } @@ -40,7 +41,7 @@ where where TKey: Copy + AsRef<[u8]>, { - Ok(self.cache.contains_key(&key) || self.db.get_pinned(DbKey::new(self.prefix, key))?.is_some()) + Ok(self.cache.contains_key(&key) || self.db.get_pinned(DbKey::new(&self.prefix, key))?.is_some()) } pub fn read(&self, key: TKey) -> Result @@ -51,7 +52,7 @@ where if let Some(data) = self.cache.get(&key) { Ok(data) } else { - let db_key = DbKey::new(self.prefix, key); + let db_key = DbKey::new(&self.prefix, key); if let Some(slice) = self.db.get_pinned(&db_key)? { let data: TData = bincode::deserialize(&slice)?; self.cache.insert(key, data.clone()); @@ -62,6 +63,23 @@ where } } + pub fn iterator(&self) -> impl Iterator, TData), Box>> + '_ + where + TKey: Copy + AsRef<[u8]> + ToString, + TData: DeserializeOwned, // We need `DeserializeOwned` since the slice coming from `db.get_pinned` has short lifetime + { + let db_key = DbKey::prefix_only(&self.prefix); + let mut read_opts = ReadOptions::default(); + read_opts.set_iterate_range(rocksdb::PrefixRange(db_key.as_ref())); + self.db.iterator_opt(IteratorMode::From(db_key.as_ref(), Direction::Forward), read_opts).map(|iter_result| match iter_result { + Ok((key, data_bytes)) => match bincode::deserialize(&data_bytes) { + Ok(data) => Ok((key[self.prefix.len() + 1..].into(), data)), + Err(e) => Err(e.into()), + }, + Err(e) => Err(e.into()), + }) + } + pub fn write(&self, mut writer: impl DbWriter, key: TKey, data: TData) -> Result<(), StoreError> where TKey: Copy + AsRef<[u8]>, @@ -69,7 +87,7 @@ where { let bin_data = bincode::serialize(&data)?; self.cache.insert(key, data); - writer.put(DbKey::new(self.prefix, key), bin_data)?; + writer.put(DbKey::new(&self.prefix, key), bin_data)?; Ok(()) } @@ -86,7 +104,7 @@ where self.cache.insert_many(iter); for (key, data) in iter_clone { let bin_data = bincode::serialize(&data)?; - writer.put(DbKey::new(self.prefix, key), bin_data)?; + writer.put(DbKey::new(&self.prefix, key), bin_data)?; } Ok(()) } @@ -96,7 +114,7 @@ where TKey: Copy + AsRef<[u8]>, { self.cache.remove(&key); - writer.delete(DbKey::new(self.prefix, key))?; + writer.delete(DbKey::new(&self.prefix, key))?; Ok(()) } @@ -107,7 +125,7 @@ where let key_iter_clone = key_iter.clone(); self.cache.remove_many(key_iter); for key in key_iter_clone { - writer.delete(DbKey::new(self.prefix, key))?; + writer.delete(DbKey::new(&self.prefix, key))?; } Ok(()) } diff --git a/consensus/src/model/stores/depth.rs b/consensus/src/model/stores/depth.rs index 3e635aae6..3f73a1177 100644 --- a/consensus/src/model/stores/depth.rs +++ b/consensus/src/model/stores/depth.rs @@ -37,7 +37,7 @@ pub struct DbDepthStore { impl DbDepthStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/ghostdag.rs b/consensus/src/model/stores/ghostdag.rs index 8ee0dc14e..4f2fc1bff 100644 --- a/consensus/src/model/stores/ghostdag.rs +++ b/consensus/src/model/stores/ghostdag.rs @@ -2,7 +2,7 @@ use super::database::prelude::{BatchDbWriter, CachedDbAccess, DbKey, DirectDbWri use super::{errors::StoreError, DB}; use crate::processes::ghostdag::ordering::SortableBlock; use consensus_core::{blockhash::BlockHashes, BlueWorkType}; -use consensus_core::{BlockHashMap, BlockHasher, HashMapCustomHasher}; +use consensus_core::{BlockHashMap, BlockHasher, BlockLevel, HashMapCustomHasher}; use hashes::Hash; use itertools::EitherOrBoth::{Both, Left, Right}; @@ -207,21 +207,26 @@ const COMPACT_STORE_PREFIX: &[u8] = b"compact-block-ghostdag-data"; #[derive(Clone)] pub struct DbGhostdagStore { db: Arc, + level: BlockLevel, access: CachedDbAccess, BlockHasher>, compact_access: CachedDbAccess, } impl DbGhostdagStore { - pub fn new(db: Arc, cache_size: u64) -> Self { + pub fn new(db: Arc, level: BlockLevel, cache_size: u64) -> Self { + let lvl_bytes = level.to_le_bytes(); + let prefix = STORE_PREFIX.iter().copied().chain(lvl_bytes).collect_vec(); + let compact_prefix = COMPACT_STORE_PREFIX.iter().copied().chain(lvl_bytes).collect_vec(); Self { db: Arc::clone(&db), - access: CachedDbAccess::new(db.clone(), cache_size, STORE_PREFIX), - compact_access: CachedDbAccess::new(db, cache_size, COMPACT_STORE_PREFIX), + level, + access: CachedDbAccess::new(db.clone(), cache_size, prefix), + compact_access: CachedDbAccess::new(db, cache_size, compact_prefix), } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { - Self::new(Arc::clone(&self.db), cache_size) + Self::new(Arc::clone(&self.db), self.level, cache_size) } pub fn insert_batch(&self, batch: &mut WriteBatch, hash: Hash, data: &Arc) -> Result<(), StoreError> { diff --git a/consensus/src/model/stores/headers.rs b/consensus/src/model/stores/headers.rs index 7376df87f..76d514db7 100644 --- a/consensus/src/model/stores/headers.rs +++ b/consensus/src/model/stores/headers.rs @@ -2,10 +2,10 @@ use std::sync::Arc; use super::{ database::prelude::{BatchDbWriter, CachedDbAccess, DirectDbWriter}, - errors::StoreError, + errors::{StoreError, StoreResult}, DB, }; -use consensus_core::{header::Header, BlockHasher}; +use consensus_core::{header::Header, BlockHasher, BlockLevel}; use hashes::Hash; use rocksdb::WriteBatch; use serde::{Deserialize, Serialize}; @@ -23,12 +23,12 @@ pub trait HeaderStoreReader { #[derive(Clone, Serialize, Deserialize)] pub struct HeaderWithBlockLevel { pub header: Arc
, - pub block_level: u8, + pub block_level: BlockLevel, } pub trait HeaderStore: HeaderStoreReader { // This is append only - fn insert(&self, hash: Hash, header: Arc
, block_level: u8) -> Result<(), StoreError>; + fn insert(&self, hash: Hash, header: Arc
, block_level: BlockLevel) -> Result<(), StoreError>; } const HEADERS_STORE_PREFIX: &[u8] = b"headers"; @@ -54,8 +54,8 @@ impl DbHeadersStore { pub fn new(db: Arc, cache_size: u64) -> Self { Self { db: Arc::clone(&db), - compact_headers_access: CachedDbAccess::new(Arc::clone(&db), cache_size, COMPACT_HEADER_DATA_STORE_PREFIX), - headers_access: CachedDbAccess::new(db, cache_size, HEADERS_STORE_PREFIX), + compact_headers_access: CachedDbAccess::new(Arc::clone(&db), cache_size, COMPACT_HEADER_DATA_STORE_PREFIX.to_vec()), + headers_access: CachedDbAccess::new(db, cache_size, HEADERS_STORE_PREFIX.to_vec()), } } @@ -63,7 +63,17 @@ impl DbHeadersStore { Self::new(Arc::clone(&self.db), cache_size) } - pub fn insert_batch(&self, batch: &mut WriteBatch, hash: Hash, header: Arc
, block_level: u8) -> Result<(), StoreError> { + pub fn has(&self, hash: Hash) -> StoreResult { + self.headers_access.has(hash) + } + + pub fn insert_batch( + &self, + batch: &mut WriteBatch, + hash: Hash, + header: Arc
, + block_level: BlockLevel, + ) -> Result<(), StoreError> { if self.headers_access.has(hash)? { return Err(StoreError::KeyAlreadyExists(hash.to_string())); } diff --git a/consensus/src/model/stores/past_pruning_points.rs b/consensus/src/model/stores/past_pruning_points.rs index c3e3515ca..20fb8c62d 100644 --- a/consensus/src/model/stores/past_pruning_points.rs +++ b/consensus/src/model/stores/past_pruning_points.rs @@ -49,7 +49,7 @@ pub struct DbPastPruningPointsStore { impl DbPastPruningPointsStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/reachability.rs b/consensus/src/model/stores/reachability.rs index 54f4e418b..ba0ca24fa 100644 --- a/consensus/src/model/stores/reachability.rs +++ b/consensus/src/model/stores/reachability.rs @@ -68,7 +68,7 @@ impl DbReachabilityStore { pub fn new(db: Arc, cache_size: u64) -> Self { Self { db: Arc::clone(&db), - access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX), + access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX.to_vec()), reindex_root: CachedDbItem::new(db, REINDEX_ROOT_KEY), } } diff --git a/consensus/src/model/stores/relations.rs b/consensus/src/model/stores/relations.rs index d87073cff..877e0670b 100644 --- a/consensus/src/model/stores/relations.rs +++ b/consensus/src/model/stores/relations.rs @@ -3,9 +3,9 @@ use super::{ errors::StoreError, DB, }; -use consensus_core::{blockhash::BlockHashes, BlockHashMap, BlockHasher, HashMapCustomHasher}; +use consensus_core::{blockhash::BlockHashes, BlockHashMap, BlockHasher, BlockLevel, HashMapCustomHasher}; use hashes::Hash; -use parking_lot::{RwLock, RwLockWriteGuard}; +use itertools::Itertools; use rocksdb::WriteBatch; use std::{collections::hash_map::Entry::Vacant, sync::Arc}; @@ -31,25 +31,29 @@ const CHILDREN_PREFIX: &[u8] = b"block-children"; #[derive(Clone)] pub struct DbRelationsStore { db: Arc, + level: BlockLevel, parents_access: CachedDbAccess>, BlockHasher>, children_access: CachedDbAccess>, BlockHasher>, } impl DbRelationsStore { - pub fn new(db: Arc, cache_size: u64) -> Self { + pub fn new(db: Arc, level: BlockLevel, cache_size: u64) -> Self { + let lvl_bytes = level.to_le_bytes(); + let parents_prefix = PARENTS_PREFIX.iter().copied().chain(lvl_bytes).collect_vec(); + let children_prefix = CHILDREN_PREFIX.iter().copied().chain(lvl_bytes).collect_vec(); Self { db: Arc::clone(&db), - parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size, PARENTS_PREFIX), - children_access: CachedDbAccess::new(db, cache_size, CHILDREN_PREFIX), + level, + parents_access: CachedDbAccess::new(Arc::clone(&db), cache_size, parents_prefix), + children_access: CachedDbAccess::new(db, cache_size, children_prefix), } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { - Self::new(Arc::clone(&self.db), cache_size) + Self::new(Arc::clone(&self.db), self.level, cache_size) } - // Should be kept private and used only through `RelationsStoreBatchExtensions.insert_batch` - fn insert_batch(&mut self, batch: &mut WriteBatch, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { + pub fn insert_batch(&mut self, batch: &mut WriteBatch, hash: Hash, parents: BlockHashes) -> Result<(), StoreError> { if self.has(hash)? { return Err(StoreError::KeyAlreadyExists(hash.to_string())); } @@ -71,28 +75,6 @@ impl DbRelationsStore { } } -pub trait RelationsStoreBatchExtensions { - fn insert_batch( - &self, - batch: &mut WriteBatch, - hash: Hash, - parents: BlockHashes, - ) -> Result, StoreError>; -} - -impl RelationsStoreBatchExtensions for Arc> { - fn insert_batch( - &self, - batch: &mut WriteBatch, - hash: Hash, - parents: BlockHashes, - ) -> Result, StoreError> { - let mut write_guard = self.write(); - write_guard.insert_batch(batch, hash, parents)?; - Ok(write_guard) - } -} - impl RelationsStoreReader for DbRelationsStore { fn get_parents(&self, hash: Hash) -> Result { self.parents_access.read(hash) @@ -209,7 +191,7 @@ mod tests { fn test_db_relations_store() { let db_tempdir = tempfile::tempdir().unwrap(); let db = Arc::new(DB::open_default(db_tempdir.path().to_owned().to_str().unwrap()).unwrap()); - test_relations_store(DbRelationsStore::new(db, 2)); + test_relations_store(DbRelationsStore::new(db, 0, 2)); } fn test_relations_store(mut store: T) { diff --git a/consensus/src/model/stores/statuses.rs b/consensus/src/model/stores/statuses.rs index 29609ba97..26b942537 100644 --- a/consensus/src/model/stores/statuses.rs +++ b/consensus/src/model/stores/statuses.rs @@ -34,7 +34,7 @@ pub struct DbStatusesStore { impl DbStatusesStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(db, cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/utxo_diffs.rs b/consensus/src/model/stores/utxo_diffs.rs index 890c4bf09..3b327e5d1 100644 --- a/consensus/src/model/stores/utxo_diffs.rs +++ b/consensus/src/model/stores/utxo_diffs.rs @@ -34,7 +34,7 @@ pub struct DbUtxoDiffsStore { impl DbUtxoDiffsStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/utxo_multisets.rs b/consensus/src/model/stores/utxo_multisets.rs index 90a415d36..f2641d179 100644 --- a/consensus/src/model/stores/utxo_multisets.rs +++ b/consensus/src/model/stores/utxo_multisets.rs @@ -29,7 +29,7 @@ pub struct DbUtxoMultisetsStore { impl DbUtxoMultisetsStore { pub fn new(db: Arc, cache_size: u64) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX) } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, STORE_PREFIX.to_vec()) } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { diff --git a/consensus/src/model/stores/utxo_set.rs b/consensus/src/model/stores/utxo_set.rs index ee561fe6b..a24b41bbd 100644 --- a/consensus/src/model/stores/utxo_set.rs +++ b/consensus/src/model/stores/utxo_set.rs @@ -12,11 +12,10 @@ use consensus_core::{ }; use hashes::Hash; use rocksdb::WriteBatch; -use std::{fmt::Display, sync::Arc}; +use std::{error::Error, fmt::Display, sync::Arc}; pub trait UtxoSetStoreReader { fn get(&self, outpoint: &TransactionOutpoint) -> Result, StoreError>; - // TODO: UTXO entry iterator } pub trait UtxoSetStore: UtxoSetStoreReader { @@ -24,6 +23,7 @@ pub trait UtxoSetStore: UtxoSetStoreReader { /// Note we define `self` as `mut` in order to require write access even though the compiler does not require it. /// This is because concurrent readers can interfere with cache consistency. fn write_diff(&mut self, utxo_diff: &UtxoDiff) -> Result<(), StoreError>; + fn write_many(&self, utxos: &[(TransactionOutpoint, UtxoEntry)]) -> Result<(), StoreError>; } pub const UTXO_KEY_SIZE: usize = hashes::HASH_SIZE + std::mem::size_of::(); @@ -72,7 +72,7 @@ pub struct DbUtxoSetStore { impl DbUtxoSetStore { pub fn new(db: Arc, cache_size: u64, prefix: &'static [u8]) -> Self { - Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, prefix), prefix } + Self { db: Arc::clone(&db), access: CachedDbAccess::new(Arc::clone(&db), cache_size, prefix.to_vec()), prefix } } pub fn clone_with_new_cache(&self, cache_size: u64) -> Self { @@ -86,6 +86,20 @@ impl DbUtxoSetStore { self.access.write_many(&mut writer, &mut utxo_diff.added().iter().map(|(o, e)| ((*o).into(), Arc::new(e.clone()))))?; Ok(()) } + + pub fn iterator(&self) -> impl Iterator), Box>> + '_ { + self.access.iterator().map(|iter_result| match iter_result { + Ok((key_bytes, utxo_entry)) => match <[u8; UTXO_KEY_SIZE]>::try_from(&key_bytes[..]) { + Ok(utxo_key_slice) => { + let utxo_key = UtxoKey(utxo_key_slice); + let outpoint: TransactionOutpoint = utxo_key.into(); + Ok((outpoint, utxo_entry)) + } + Err(e) => Err(e.into()), + }, + Err(e) => Err(e), + }) + } } impl UtxoView for DbUtxoSetStore { @@ -107,6 +121,12 @@ impl UtxoSetStore for DbUtxoSetStore { self.access.write_many(&mut writer, &mut utxo_diff.added().iter().map(|(o, e)| ((*o).into(), Arc::new(e.clone()))))?; Ok(()) } + + fn write_many(&self, utxos: &[(TransactionOutpoint, UtxoEntry)]) -> Result<(), StoreError> { + let mut writer = DirectDbWriter::new(&self.db); + self.access.write_many(&mut writer, &mut utxos.iter().map(|(o, e)| ((*o).into(), Arc::new(e.clone()))))?; + Ok(()) + } } #[cfg(test)] diff --git a/consensus/src/model/stores/virtual_state.rs b/consensus/src/model/stores/virtual_state.rs index 80f93aeff..660426728 100644 --- a/consensus/src/model/stores/virtual_state.rs +++ b/consensus/src/model/stores/virtual_state.rs @@ -22,7 +22,7 @@ pub struct VirtualState { pub bits: u32, pub past_median_time: u64, pub multiset: MuHash, - pub utxo_diff: UtxoDiff, + pub utxo_diff: UtxoDiff, // This is the UTXO diff from the selected tip to the virtual. i.e., if this diff is applied on the past UTXO of the selected tip, we'll get the virtual UTXO set. pub accepted_tx_ids: Vec, // TODO: consider saving `accepted_id_merkle_root` directly pub mergeset_rewards: BlockHashMap, pub mergeset_non_daa: BlockHashSet, diff --git a/consensus/src/params.rs b/consensus/src/params.rs index ed994e255..ea2fa971a 100644 --- a/consensus/src/params.rs +++ b/consensus/src/params.rs @@ -1,7 +1,11 @@ +use consensus_core::BlockLevel; use hashes::{Hash, HASH_SIZE}; use crate::model::stores::ghostdag::KType; +/// Consensus parameters. Contains settings and configurations which are consensus-sensitive. +/// Changing one of these on a network node would exclude and prevent it from reaching consensus +/// with the other unmodified nodes. #[derive(Clone)] pub struct Params { pub genesis_hash: Hash, @@ -30,22 +34,16 @@ pub struct Params { pub pre_deflationary_phase_base_subsidy: u64, pub coinbase_maturity: u64, pub skip_proof_of_work: bool, - pub max_block_level: u8, -} - -impl Params { - /// Clones the params instance and sets `skip_proof_of_work = true`. - /// Should be used for testing purposes only. - pub fn clone_with_skip_pow(&self) -> Self { - let mut cloned_params = self.clone(); - cloned_params.skip_proof_of_work = true; - cloned_params - } + pub max_block_level: BlockLevel, + pub pruning_proof_m: u64, } const DEFAULT_GHOSTDAG_K: KType = 18; pub const MAINNET_PARAMS: Params = Params { - genesis_hash: Hash::from_bytes([1u8; HASH_SIZE]), // TODO: Use real mainnet genesis here + genesis_hash: Hash::from_bytes([ + 0x58, 0xc2, 0xd4, 0x19, 0x9e, 0x21, 0xf9, 0x10, 0xd1, 0x57, 0x1d, 0x11, 0x49, 0x69, 0xce, 0xce, 0xf4, 0x8f, 0x9, 0xf9, 0x34, + 0xd4, 0x2c, 0xcb, 0x6a, 0x28, 0x1a, 0x15, 0x86, 0x8f, 0x29, 0x99, + ]), ghostdag_k: DEFAULT_GHOSTDAG_K, timestamp_deviation_tolerance: 132, target_time_per_block: 1000, @@ -61,7 +59,7 @@ pub const MAINNET_PARAMS: Params = Params { max_coinbase_payload_len: 204, // This is technically a soft fork from the Go implementation since kaspad's consensus doesn't - // check these rules, but in practice it's encorced by the network layer that limits the message + // check these rules, but in practice it's enforced by the network layer that limits the message // size to 1 GB. // These values should be lowered to more reasonable amounts on the next planned HF/SF. max_tx_inputs: 1_000_000_000, @@ -85,10 +83,11 @@ pub const MAINNET_PARAMS: Params = Params { coinbase_maturity: 100, skip_proof_of_work: false, max_block_level: 225, + pruning_proof_m: 1000, }; pub const DEVNET_PARAMS: Params = Params { - genesis_hash: Hash::from_bytes([1u8; HASH_SIZE]), // TODO: Use real mainnet genesis here + genesis_hash: Hash::from_bytes([1u8; HASH_SIZE]), // TODO: Use golang devnet genesis here ghostdag_k: DEFAULT_GHOSTDAG_K, timestamp_deviation_tolerance: 132, target_time_per_block: 1000, @@ -104,7 +103,7 @@ pub const DEVNET_PARAMS: Params = Params { max_coinbase_payload_len: 204, // This is technically a soft fork from the Go implementation since kaspad's consensus doesn't - // check these rules, but in practice it's encorced by the network layer that limits the message + // check these rules, but in practice it's enforced by the network layer that limits the message // size to 1 GB. // These values should be lowered to more reasonable amounts on the next planned HF/SF. max_tx_inputs: 1_000_000_000, @@ -128,4 +127,5 @@ pub const DEVNET_PARAMS: Params = Params { coinbase_maturity: 100, skip_proof_of_work: false, max_block_level: 250, + pruning_proof_m: 1000, }; diff --git a/consensus/src/pipeline/body_processor/body_validation_in_context.rs b/consensus/src/pipeline/body_processor/body_validation_in_context.rs index 0bd403f19..d8d7243e2 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_context.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_context.rs @@ -81,7 +81,7 @@ impl BlockBodyProcessor { mod tests { use crate::{ - consensus::test_consensus::TestConsensus, constants::TX_VERSION, errors::RuleError, + config::ConfigBuilder, consensus::test_consensus::TestConsensus, constants::TX_VERSION, errors::RuleError, model::stores::ghostdag::GhostdagStoreReader, params::MAINNET_PARAMS, processes::transaction_validator::errors::TxRuleError, }; use consensus_core::{ @@ -94,14 +94,15 @@ mod tests { #[tokio::test] async fn validate_body_in_context_test() { - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.deflationary_phase_daa_score = 2; - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| p.deflationary_phase_daa_score = 2) + .build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let body_processor = consensus.block_body_processor(); - consensus.add_block_with_parents(1.into(), vec![params.genesis_hash]).await.unwrap(); + consensus.add_block_with_parents(1.into(), vec![config.genesis_hash]).await.unwrap(); { let block = consensus.build_block_with_parents_and_transactions(2.into(), vec![1.into()], vec![]); @@ -109,7 +110,7 @@ mod tests { assert_match!(body_processor.validate_body_in_context(&block.to_immutable()), Err(RuleError::MissingParents(_))); } - let valid_block = consensus.build_block_with_parents_and_transactions(3.into(), vec![params.genesis_hash], vec![]); + let valid_block = consensus.build_block_with_parents_and_transactions(3.into(), vec![config.genesis_hash], vec![]); consensus.validate_and_insert_block(valid_block.to_immutable()).await.unwrap(); { let mut block = consensus.build_block_with_parents_and_transactions(2.into(), vec![3.into()], vec![]); @@ -216,7 +217,7 @@ mod tests { consensus.validate_and_insert_block(block.to_immutable()).await.unwrap(); } else { assert_match!( - consensus.validate_and_insert_block(block.to_immutable()).await, + consensus.validate_and_insert_block(block.to_immutable()).await, Err(RuleError::TxInContextFailed(_, e)) if matches!(e, TxRuleError::NotFinalized(_))); } } diff --git a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs index 820e11dd8..ee38325e6 100644 --- a/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs +++ b/consensus/src/pipeline/body_processor/body_validation_in_isolation.rs @@ -105,7 +105,12 @@ impl BlockBodyProcessor { #[cfg(test)] mod tests { - use crate::{consensus::test_consensus::TestConsensus, errors::RuleError, params::MAINNET_PARAMS}; + use crate::{ + config::{Config, ConfigBuilder}, + consensus::test_consensus::TestConsensus, + errors::RuleError, + params::MAINNET_PARAMS, + }; use consensus_core::{ block::MutableBlock, header::Header, @@ -118,8 +123,7 @@ mod tests { #[test] fn validate_body_in_isolation_test() { - let params = &MAINNET_PARAMS; - let consensus = TestConsensus::create_from_temp_db(params); + let consensus = TestConsensus::create_from_temp_db(&Config::new(MAINNET_PARAMS)); let wait_handles = consensus.init(); let body_processor = consensus.block_body_processor(); @@ -430,11 +434,11 @@ mod tests { #[tokio::test] async fn merkle_root_missing_parents_known_invalid_test() { - let params = MAINNET_PARAMS.clone_with_skip_pow(); - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let mut block = consensus.build_block_with_parents_and_transactions(1.into(), vec![params.genesis_hash], vec![]); + let mut block = consensus.build_block_with_parents_and_transactions(1.into(), vec![config.genesis_hash], vec![]); block.transactions[0].version += 1; assert_match!(consensus.validate_and_insert_block(block.clone().to_immutable()).await, Err(RuleError::BadMerkleRoot(_, _))); @@ -442,7 +446,7 @@ mod tests { // BadMerkleRoot shouldn't mark the block as known invalid assert_match!(consensus.validate_and_insert_block(block.to_immutable()).await, Err(RuleError::BadMerkleRoot(_, _))); - let mut block = consensus.build_block_with_parents_and_transactions(1.into(), vec![params.genesis_hash], vec![]); + let mut block = consensus.build_block_with_parents_and_transactions(1.into(), vec![config.genesis_hash], vec![]); block.header.parents_by_level[0][0] = 0.into(); assert_match!(consensus.validate_and_insert_block(block.clone().to_immutable()).await, Err(RuleError::MissingParents(_))); diff --git a/consensus/src/pipeline/body_processor/processor.rs b/consensus/src/pipeline/body_processor/processor.rs index 7a3939ab2..8f65baeed 100644 --- a/consensus/src/pipeline/body_processor/processor.rs +++ b/consensus/src/pipeline/body_processor/processor.rs @@ -14,7 +14,7 @@ use crate::{ DB, }, }, - pipeline::deps_manager::{BlockTask, BlockTaskDependencyManager}, + pipeline::deps_manager::{BlockProcessingMessage, BlockTaskDependencyManager}, processes::{ coinbase::CoinbaseManager, mass::MassCalculator, past_median_time::PastMedianTimeManager, transaction_validator::TransactionValidator, @@ -35,8 +35,8 @@ use std::sync::Arc; pub struct BlockBodyProcessor { // Channels - receiver: Receiver, - sender: Sender, + receiver: Receiver, + sender: Sender, // Thread pool pub(super) thread_pool: Arc, @@ -47,6 +47,7 @@ pub struct BlockBodyProcessor { // Config pub(super) max_block_mass: u64, pub(super) genesis_hash: Hash, + process_genesis: bool, // Stores pub(super) statuses_store: Arc>, @@ -69,8 +70,8 @@ pub struct BlockBodyProcessor { impl BlockBodyProcessor { #[allow(clippy::too_many_arguments)] pub fn new( - receiver: Receiver, - sender: Sender, + receiver: Receiver, + sender: Sender, thread_pool: Arc, db: Arc, statuses_store: Arc>, @@ -85,6 +86,7 @@ impl BlockBodyProcessor { past_median_time_manager: PastMedianTimeManager, max_block_mass: u64, genesis_hash: Hash, + process_genesis: bool, ) -> Self { Self { receiver, @@ -103,17 +105,18 @@ impl BlockBodyProcessor { past_median_time_manager, max_block_mass, genesis_hash, + process_genesis, task_manager: BlockTaskDependencyManager::new(), } } pub fn worker(self: &Arc) { - while let Ok(task) = self.receiver.recv() { - match task { - BlockTask::Exit => break, - BlockTask::Process(block, result_transmitters) => { - let hash = block.header.hash; - if self.task_manager.register(block, result_transmitters) { + while let Ok(msg) = self.receiver.recv() { + match msg { + BlockProcessingMessage::Exit => break, + BlockProcessingMessage::Process(task, result_transmitters) => { + let hash = task.block.header.hash; + if self.task_manager.register(task, result_transmitters) { let processor = self.clone(); self.thread_pool.spawn(move || { processor.queue_block(hash); @@ -127,21 +130,21 @@ impl BlockBodyProcessor { self.task_manager.wait_for_idle(); // Pass the exit signal on to the following processor - self.sender.send(BlockTask::Exit).unwrap(); + self.sender.send(BlockProcessingMessage::Exit).unwrap(); } fn queue_block(self: &Arc, hash: Hash) { - if let Some(block) = self.task_manager.try_begin(hash) { - let res = self.process_block_body(&block); + if let Some(task) = self.task_manager.try_begin(hash) { + let res = self.process_block_body(&task.block, task.trusted_ghostdag_data.is_some()); - let dependent_tasks = self.task_manager.end(hash, |block, result_transmitters| { + let dependent_tasks = self.task_manager.end(hash, |task, result_transmitters| { if res.is_err() { for transmitter in result_transmitters { // We don't care if receivers were dropped let _ = transmitter.send(res.clone()); } } else { - self.sender.send(BlockTask::Process(block, result_transmitters)).unwrap(); + self.sender.send(BlockProcessingMessage::Process(task, result_transmitters)).unwrap(); } }); @@ -152,7 +155,7 @@ impl BlockBodyProcessor { } } - fn process_block_body(self: &Arc, block: &Block) -> BlockProcessResult { + fn process_block_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult { let status = self.statuses_store.read().get(block.hash()).unwrap(); match status { StatusInvalid => return Err(RuleError::KnownInvalid), @@ -161,7 +164,7 @@ impl BlockBodyProcessor { _ => panic!("unexpected block status {status:?}"), } - if let Err(e) = self.validate_body(block) { + if let Err(e) = self.validate_body(block, is_trusted) { // We mark invalid blocks with status StatusInvalid except in the // case of the following errors: // MissingParents - If we got MissingParents the block shouldn't be @@ -183,9 +186,13 @@ impl BlockBodyProcessor { Ok(BlockStatus::StatusUTXOPendingVerification) } - fn validate_body(self: &Arc, block: &Block) -> BlockProcessResult<()> { + fn validate_body(self: &Arc, block: &Block, is_trusted: bool) -> BlockProcessResult<()> { self.validate_body_in_isolation(block)?; - self.validate_body_in_context(block) + if !is_trusted { + // TODO: Check that it's safe to skip this check if the block is trusted. + return self.validate_body_in_context(block); + } + Ok(()) } fn commit_body(self: &Arc, hash: Hash, parents: &[Hash], transactions: Arc>) { @@ -207,6 +214,10 @@ impl BlockBodyProcessor { } pub fn process_genesis_if_needed(self: &Arc) { + if !self.process_genesis { + return; + } + let status = self.statuses_store.read().get(self.genesis_hash).unwrap(); match status { StatusHeaderOnly => { diff --git a/consensus/src/pipeline/deps_manager.rs b/consensus/src/pipeline/deps_manager.rs index 71ae8f2e5..756dfaf70 100644 --- a/consensus/src/pipeline/deps_manager.rs +++ b/consensus/src/pipeline/deps_manager.rs @@ -1,21 +1,34 @@ -use crate::errors::BlockProcessResult; +use crate::{errors::BlockProcessResult, model::stores::ghostdag::GhostdagData}; use consensus_core::{block::Block, blockstatus::BlockStatus, BlockHashMap, HashMapCustomHasher}; use hashes::Hash; use parking_lot::{Condvar, Mutex}; -use std::collections::hash_map::Entry::Vacant; +use std::{collections::hash_map::Entry::Vacant, sync::Arc}; use tokio::sync::oneshot; pub type BlockResultSender = oneshot::Sender>; -pub enum BlockTask { +pub enum BlockProcessingMessage { Exit, - Process(Block, Vec), + Process(BlockTask, Vec), +} + +#[derive(Clone)] +pub struct BlockTask { + /// The block to process, possibly header-only + pub block: Block, + + /// Possibly attached trusted ghostdag data - will be set only for + /// trusted blocks arriving as part of the pruning proof + pub trusted_ghostdag_data: Option>, + + /// A flag indicating whether to trigger virtual UTXO processing + pub update_virtual: bool, } /// An internal struct used to manage a block processing task struct BlockTaskInternal { - // The actual block - block: Block, + // The externally accepted block task + task: BlockTask, // A list of channel senders for transmitting the processing result of this task to the async callers result_transmitters: Vec, @@ -25,8 +38,8 @@ struct BlockTaskInternal { } impl BlockTaskInternal { - fn new(block: Block, result_transmitters: Vec) -> Self { - Self { block, result_transmitters, dependent_tasks: Vec::new() } + fn new(task: BlockTask, result_transmitters: Vec) -> Self { + Self { task, result_transmitters, dependent_tasks: Vec::new() } } } @@ -44,24 +57,24 @@ impl BlockTaskDependencyManager { Self { pending: Mutex::new(BlockHashMap::new()), idle_signal: Condvar::new() } } - /// Registers the `(block, result_transmitters)` pair as a pending task. If the block is already pending + /// Registers the `(task, result_transmitters)` pair as a pending task. If the task is already pending /// and has a corresponding internal task, the task is updated with the additional /// result transmitters and the function returns `false` indicating that the task shall /// not be queued for processing. The function is expected to be called by a worker /// controlling the reception of block processing tasks. - pub fn register(&self, block: Block, mut result_transmitters: Vec) -> bool { + pub fn register(&self, task: BlockTask, mut result_transmitters: Vec) -> bool { let mut pending = self.pending.lock(); - match pending.entry(block.header.hash) { + match pending.entry(task.block.header.hash) { Vacant(e) => { - e.insert(BlockTaskInternal::new(block, result_transmitters)); + e.insert(BlockTaskInternal::new(task, result_transmitters)); true } e => { e.and_modify(|v| { v.result_transmitters.append(&mut result_transmitters); - if v.block.is_header_only() && !block.is_header_only() { - // The block now includes transactions, so we update the internal block data - v.block = block; + if v.task.block.is_header_only() && !task.block.is_header_only() { + // The block now includes transactions, so we update the internal task data + v.task = task; } }); false @@ -73,18 +86,18 @@ impl BlockTaskDependencyManager { /// previously registered through `self.register`. If any of the direct parents `parent` of /// this hash are in `pending` state, the task is queued as a dependency to the `parent` task /// and wil be re-evaluated once that task completes -- in which case the function will return `None`. - pub fn try_begin(&self, hash: Hash) -> Option { + pub fn try_begin(&self, hash: Hash) -> Option { // Lock the pending map. The contention around the lock is // expected to be negligible in header processing time let mut pending = self.pending.lock(); - let block = pending.get(&hash).unwrap().block.clone(); - for parent in block.header.direct_parents().iter() { - if let Some(task) = pending.get_mut(parent) { - task.dependent_tasks.push(hash); + let task = pending.get(&hash).unwrap().task.clone(); + for parent in task.block.header.direct_parents().iter() { + if let Some(parent_task) = pending.get_mut(parent) { + parent_task.dependent_tasks.push(hash); return None; // The block will be reprocessed once the pending parent completes processing } } - Some(block) + Some(task) } /// Report the completion of a processing task. Signals idleness if pending task list is emptied. @@ -93,20 +106,20 @@ impl BlockTaskDependencyManager { /// and returns a list of `dependent_tasks` which should be requeued to workers. pub fn end(&self, hash: Hash, callback: F) -> Vec where - F: Fn(Block, Vec), + F: Fn(BlockTask, Vec), { // Re-lock for post-processing steps let mut pending = self.pending.lock(); - let task = pending.remove(&hash).expect("processed block is expected to be in pending map"); + let internal_task = pending.remove(&hash).expect("processed block is expected to be in pending map"); // Callback within the lock - callback(task.block, task.result_transmitters); + callback(internal_task.task, internal_task.result_transmitters); if pending.is_empty() { self.idle_signal.notify_one(); } - task.dependent_tasks + internal_task.dependent_tasks } /// Wait until all pending tasks are completed and workers are idle. diff --git a/consensus/src/pipeline/header_processor/post_pow_validation.rs b/consensus/src/pipeline/header_processor/post_pow_validation.rs index dd4060bdf..0d9fea580 100644 --- a/consensus/src/pipeline/header_processor/post_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/post_pow_validation.rs @@ -26,7 +26,7 @@ impl HeaderProcessor { ctx: &mut HeaderProcessingContext, header: &Header, ) -> BlockProcessResult<()> { - let (past_median_time, window) = self.past_median_time_manager.calc_past_median_time(&ctx.ghostdag_data.clone().unwrap()); + let (past_median_time, window) = self.past_median_time_manager.calc_past_median_time(&ctx.get_ghostdag_data().unwrap()); ctx.block_window_for_past_median_time = Some(window); if header.timestamp <= past_median_time { @@ -37,7 +37,7 @@ impl HeaderProcessor { } pub fn check_merge_size_limit(self: &Arc, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { - let mergeset_size = ctx.ghostdag_data.as_ref().unwrap().mergeset_size() as u64; + let mergeset_size = ctx.get_ghostdag_data().as_ref().unwrap().mergeset_size() as u64; if mergeset_size > self.mergeset_size_limit { return Err(RuleError::MergeSetTooBig(mergeset_size, self.mergeset_size_limit)); @@ -46,7 +46,7 @@ impl HeaderProcessor { } fn check_blue_score(self: &Arc, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { - let gd_blue_score = ctx.ghostdag_data.as_ref().unwrap().blue_score; + let gd_blue_score = ctx.get_ghostdag_data().as_ref().unwrap().blue_score; if gd_blue_score != header.blue_score { return Err(RuleError::UnexpectedHeaderBlueScore(gd_blue_score, header.blue_score)); } @@ -54,7 +54,7 @@ impl HeaderProcessor { } fn check_blue_work(self: &Arc, ctx: &mut HeaderProcessingContext, header: &Header) -> BlockProcessResult<()> { - let gd_blue_work = ctx.ghostdag_data.as_ref().unwrap().blue_work; + let gd_blue_work = ctx.get_ghostdag_data().as_ref().unwrap().blue_work; if gd_blue_work != header.blue_work { return Err(RuleError::UnexpectedHeaderBlueWork(gd_blue_work, header.blue_work)); } @@ -91,8 +91,9 @@ impl HeaderProcessor { ctx: &mut HeaderProcessingContext, header: &Header, ) -> BlockProcessResult<()> { - let expected = - self.pruning_manager.expected_header_pruning_point(ctx.ghostdag_data.as_ref().unwrap().to_compact(), ctx.pruning_info); + let expected = self + .pruning_manager + .expected_header_pruning_point(ctx.get_ghostdag_data().as_ref().unwrap().to_compact(), ctx.pruning_info); if expected != header.pruning_point { return Err(RuleError::WrongHeaderPruningPoint(expected, header.pruning_point)); } @@ -100,9 +101,9 @@ impl HeaderProcessor { } pub fn check_bounded_merge_depth(self: &Arc, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { - let gd_data = ctx.ghostdag_data.as_ref().unwrap(); - let merge_depth_root = self.depth_manager.calc_merge_depth_root(gd_data, ctx.pruning_point()); - let finality_point = self.depth_manager.calc_finality_point(gd_data, ctx.pruning_point()); + let gd_data = ctx.get_ghostdag_data().unwrap(); + let merge_depth_root = self.depth_manager.calc_merge_depth_root(&gd_data, ctx.pruning_point()); + let finality_point = self.depth_manager.calc_finality_point(&gd_data, ctx.pruning_point()); let mut kosherizing_blues: Option> = None; for red in gd_data.mergeset_reds.iter().copied() { @@ -111,7 +112,7 @@ impl HeaderProcessor { } // Lazy load the kosherizing blocks since this case is extremely rare if kosherizing_blues.is_none() { - kosherizing_blues = Some(self.depth_manager.kosherizing_blues(gd_data, merge_depth_root).collect()); + kosherizing_blues = Some(self.depth_manager.kosherizing_blues(&gd_data, merge_depth_root).collect()); } if !self.reachability_service.is_dag_ancestor_of_any(red, &mut kosherizing_blues.as_ref().unwrap().iter().copied()) { return Err(RuleError::ViolatingBoundedMergeDepth); diff --git a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs index ec426367a..391812781 100644 --- a/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_ghostdag_validation.rs @@ -7,6 +7,8 @@ use crate::model::stores::statuses::StatusesStoreReader; use consensus_core::blockhash::BlockHashExtensions; use consensus_core::blockstatus::BlockStatus::StatusInvalid; use consensus_core::header::Header; +use consensus_core::BlockLevel; +use std::cmp::max; use std::{ sync::Arc, time::{SystemTime, UNIX_EPOCH}, @@ -17,26 +19,31 @@ impl HeaderProcessor { self: &Arc, ctx: &mut HeaderProcessingContext, header: &Header, + is_trusted: bool, ) -> BlockProcessResult<()> { if header.hash == self.genesis_hash { return Ok(()); } - self.validate_header_in_isolation(header)?; - self.check_parents_exist(header)?; - self.check_parents_incest(ctx)?; + self.validate_header_in_isolation(ctx)?; + if !is_trusted { + self.check_parents_exist(header)?; + self.check_parents_incest(ctx)?; + } + Ok(()) } - fn validate_header_in_isolation(self: &Arc, header: &Header) -> BlockProcessResult<()> { - if header.hash == self.genesis_hash { + fn validate_header_in_isolation(self: &Arc, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { + if ctx.header.hash == self.genesis_hash { return Ok(()); } - self.check_header_version(header)?; - self.check_block_timestamp_in_isolation(header)?; - self.check_parents_limit(header)?; - Self::check_parents_not_origin(header)?; + self.check_header_version(ctx.header)?; + self.check_block_timestamp_in_isolation(ctx.header)?; + self.check_parents_limit(ctx.header)?; + Self::check_parents_not_origin(ctx.header)?; + self.check_pow_and_calc_block_level(ctx)?; Ok(()) } @@ -109,4 +116,16 @@ impl HeaderProcessor { Ok(()) } + + fn check_pow_and_calc_block_level(self: &Arc, ctx: &mut HeaderProcessingContext) -> BlockProcessResult<()> { + let state = pow::State::new(ctx.header); + let (passed, pow) = state.check_pow(ctx.header.nonce); + if passed || self.skip_proof_of_work { + let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; + ctx.block_level = Some(max(signed_block_level, 0) as BlockLevel); + Ok(()) + } else { + Err(RuleError::InvalidPoW) + } + } } diff --git a/consensus/src/pipeline/header_processor/pre_pow_validation.rs b/consensus/src/pipeline/header_processor/pre_pow_validation.rs index af2b3d081..cd265ae76 100644 --- a/consensus/src/pipeline/header_processor/pre_pow_validation.rs +++ b/consensus/src/pipeline/header_processor/pre_pow_validation.rs @@ -2,7 +2,6 @@ use super::*; use crate::errors::{BlockProcessResult, RuleError}; use crate::model::services::reachability::ReachabilityService; use consensus_core::header::Header; -use std::cmp::max; use std::sync::Arc; impl HeaderProcessor { @@ -16,7 +15,6 @@ impl HeaderProcessor { } self.check_pruning_violation(ctx)?; - self.check_pow_and_calc_block_level(ctx, header)?; self.check_difficulty_and_daa_score(ctx, header)?; Ok(()) } @@ -36,28 +34,12 @@ impl HeaderProcessor { Ok(()) } - fn check_pow_and_calc_block_level( - self: &Arc, - ctx: &mut HeaderProcessingContext, - header: &Header, - ) -> BlockProcessResult<()> { - let state = pow::State::new(header); - let (passed, pow) = state.check_pow(header.nonce); - if passed || self.skip_proof_of_work { - let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; - ctx.block_level = Some(max(signed_block_level, 0) as u8); - Ok(()) - } else { - Err(RuleError::InvalidPoW) - } - } - fn check_difficulty_and_daa_score( self: &Arc, ctx: &mut HeaderProcessingContext, header: &Header, ) -> BlockProcessResult<()> { - let ghostdag_data = ctx.ghostdag_data.clone().unwrap(); + let ghostdag_data = ctx.get_ghostdag_data().unwrap(); let window = self.dag_traversal_manager.block_window(&ghostdag_data, self.difficulty_window_size); let (daa_score, mergeset_non_daa) = self diff --git a/consensus/src/pipeline/header_processor/processor.rs b/consensus/src/pipeline/header_processor/processor.rs index 3a39b5e1b..24b97ce3c 100644 --- a/consensus/src/pipeline/header_processor/processor.rs +++ b/consensus/src/pipeline/header_processor/processor.rs @@ -7,19 +7,19 @@ use crate::{ daa::DbDaaStore, depth::DbDepthStore, errors::StoreResultExtensions, - ghostdag::{DbGhostdagStore, GhostdagData}, + ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStoreReader}, headers::DbHeadersStore, headers_selected_tip::{DbHeadersSelectedTipStore, HeadersSelectedTipStoreReader}, past_pruning_points::DbPastPruningPointsStore, pruning::{DbPruningStore, PruningPointInfo, PruningStore, PruningStoreReader}, - reachability::{DbReachabilityStore, StagingReachabilityStore}, - relations::{DbRelationsStore, RelationsStoreBatchExtensions}, + reachability::{DbReachabilityStore, ReachabilityStoreReader, StagingReachabilityStore}, + relations::{DbRelationsStore, RelationsStoreReader}, statuses::{DbStatusesStore, StatusesStore, StatusesStoreBatchExtensions, StatusesStoreReader}, DB, }, }, params::Params, - pipeline::deps_manager::{BlockTask, BlockTaskDependencyManager}, + pipeline::deps_manager::{BlockProcessingMessage, BlockTaskDependencyManager}, processes::{ block_depth::BlockDepthManager, difficulty::DifficultyManager, @@ -33,13 +33,14 @@ use crate::{ test_helpers::header_from_precomputed_hash, }; use consensus_core::{ - blockhash::{BlockHashes, ORIGIN}, + blockhash::{BlockHashExtensions, BlockHashes, ORIGIN}, blockstatus::BlockStatus::{self, StatusHeaderOnly, StatusInvalid}, header::Header, - BlockHashSet, + BlockHashSet, BlockLevel, }; use crossbeam_channel::{Receiver, Sender}; use hashes::Hash; +use itertools::Itertools; use parking_lot::RwLock; use rayon::ThreadPool; use rocksdb::WriteBatch; @@ -51,28 +52,26 @@ pub struct HeaderProcessingContext<'a> { pub hash: Hash, pub header: &'a Arc
, pub pruning_info: PruningPointInfo, + pub non_pruned_parents: Vec, // Staging data - pub ghostdag_data: Option>, + pub ghostdag_data: Option>>, pub block_window_for_difficulty: Option, pub block_window_for_past_median_time: Option, pub mergeset_non_daa: Option, pub merge_depth_root: Option, pub finality_point: Option, - pub block_level: Option, - - // Cache - non_pruned_parents: Option, + pub block_level: Option, } impl<'a> HeaderProcessingContext<'a> { - pub fn new(hash: Hash, header: &'a Arc
, pruning_info: PruningPointInfo) -> Self { + pub fn new(hash: Hash, header: &'a Arc
, pruning_info: PruningPointInfo, non_pruned_parents: Vec) -> Self { Self { hash, header, pruning_info, + non_pruned_parents, ghostdag_data: None, - non_pruned_parents: None, block_window_for_difficulty: None, mergeset_non_daa: None, block_window_for_past_median_time: None, @@ -83,24 +82,22 @@ impl<'a> HeaderProcessingContext<'a> { } pub fn get_non_pruned_parents(&mut self) -> BlockHashes { - if let Some(parents) = self.non_pruned_parents.clone() { - return parents; - } - - let non_pruned_parents = Arc::new(self.header.direct_parents().clone()); // TODO: Exclude pruned parents - self.non_pruned_parents = Some(non_pruned_parents.clone()); - non_pruned_parents + self.non_pruned_parents[0].clone() } pub fn pruning_point(&self) -> Hash { self.pruning_info.pruning_point } + + pub fn get_ghostdag_data(&self) -> Option> { + Some(self.ghostdag_data.as_ref()?[0].clone()) + } } pub struct HeaderProcessor { // Channels - receiver: Receiver, - body_sender: Sender, + receiver: Receiver, + body_sender: Sender, // Thread pool pub(super) thread_pool: Arc, @@ -115,15 +112,16 @@ pub struct HeaderProcessor { pub(super) difficulty_window_size: usize, pub(super) mergeset_size_limit: u64, pub(super) skip_proof_of_work: bool, - pub(super) max_block_level: u8, + pub(super) max_block_level: BlockLevel, + process_genesis: bool, // DB db: Arc, // Stores - relations_store: Arc>, + relations_stores: Arc>>, reachability_store: Arc>, - ghostdag_store: Arc, + ghostdag_stores: Vec>, pub(super) statuses_store: Arc>, pub(super) pruning_store: Arc>, pub(super) block_window_cache_for_difficulty: Arc, @@ -134,11 +132,13 @@ pub struct HeaderProcessor { depth_store: Arc, // Managers and services - ghostdag_manager: GhostdagManager< - DbGhostdagStore, - MTRelationsService, - MTReachabilityService, - DbHeadersStore, + ghostdag_managers: Vec< + GhostdagManager< + DbGhostdagStore, + MTRelationsService, + MTReachabilityService, + DbHeadersStore, + >, >, pub(super) dag_traversal_manager: DagTraversalManager, pub(super) difficulty_manager: DifficultyManager, @@ -158,14 +158,15 @@ pub struct HeaderProcessor { impl HeaderProcessor { #[allow(clippy::too_many_arguments)] pub fn new( - receiver: Receiver, - body_sender: Sender, + receiver: Receiver, + body_sender: Sender, thread_pool: Arc, params: &Params, + process_genesis: bool, db: Arc, - relations_store: Arc>, + relations_stores: Arc>>, reachability_store: Arc>, - ghostdag_store: Arc, + ghostdag_stores: Vec>, headers_store: Arc, daa_store: Arc, statuses_store: Arc>, @@ -175,13 +176,20 @@ impl HeaderProcessor { block_window_cache_for_difficulty: Arc, block_window_cache_for_past_median_time: Arc, reachability_service: MTReachabilityService, - relations_service: MTRelationsService, past_median_time_manager: PastMedianTimeManager, dag_traversal_manager: DagTraversalManager, difficulty_manager: DifficultyManager, depth_manager: BlockDepthManager, pruning_manager: PruningManager, parents_manager: ParentsManager, + ghostdag_managers: Vec< + GhostdagManager< + DbGhostdagStore, + MTRelationsService, + MTReachabilityService, + DbHeadersStore, + >, + >, counters: Arc, ) -> Self { Self { @@ -192,25 +200,18 @@ impl HeaderProcessor { genesis_timestamp: params.genesis_timestamp, difficulty_window_size: params.difficulty_window_size, db, - relations_store, + relations_stores, reachability_store, - ghostdag_store: ghostdag_store.clone(), + ghostdag_stores, statuses_store, pruning_store, daa_store, - headers_store: headers_store.clone(), + headers_store, depth_store, headers_selected_tip_store, block_window_cache_for_difficulty, block_window_cache_for_past_median_time, - ghostdag_manager: GhostdagManager::new( - params.genesis_hash, - params.ghostdag_k, - ghostdag_store, - relations_service, - headers_store, - reachability_service.clone(), - ), + ghostdag_managers, dag_traversal_manager, difficulty_manager, reachability_service, @@ -227,16 +228,17 @@ impl HeaderProcessor { genesis_bits: params.genesis_bits, skip_proof_of_work: params.skip_proof_of_work, max_block_level: params.max_block_level, + process_genesis, } } pub fn worker(self: &Arc) { - while let Ok(task) = self.receiver.recv() { - match task { - BlockTask::Exit => break, - BlockTask::Process(block, result_transmitters) => { - let hash = block.header.hash; - if self.task_manager.register(block, result_transmitters) { + while let Ok(msg) = self.receiver.recv() { + match msg { + BlockProcessingMessage::Exit => break, + BlockProcessingMessage::Process(task, result_transmitters) => { + let hash = task.block.header.hash; + if self.task_manager.register(task, result_transmitters) { let processor = self.clone(); self.thread_pool.spawn(move || { processor.queue_block(hash); @@ -250,21 +252,21 @@ impl HeaderProcessor { self.task_manager.wait_for_idle(); // Pass the exit signal on to the following processor - self.body_sender.send(BlockTask::Exit).unwrap(); + self.body_sender.send(BlockProcessingMessage::Exit).unwrap(); } fn queue_block(self: &Arc, hash: Hash) { - if let Some(block) = self.task_manager.try_begin(hash) { - let res = self.process_header(&block.header); + if let Some(task) = self.task_manager.try_begin(hash) { + let res = self.process_header(&task.block.header, task.trusted_ghostdag_data); - let dependent_tasks = self.task_manager.end(hash, |block, result_transmitters| { - if res.is_err() || block.is_header_only() { + let dependent_tasks = self.task_manager.end(hash, |task, result_transmitters| { + if res.is_err() || task.block.is_header_only() { for transmitter in result_transmitters { // We don't care if receivers were dropped let _ = transmitter.send(res.clone()); } } else { - self.body_sender.send(BlockTask::Process(block, result_transmitters)).unwrap(); + self.body_sender.send(BlockProcessingMessage::Process(task, result_transmitters)).unwrap(); } }); @@ -279,7 +281,12 @@ impl HeaderProcessor { self.statuses_store.read().has(hash).unwrap() } - fn process_header(self: &Arc, header: &Arc
) -> BlockProcessResult { + fn process_header( + self: &Arc, + header: &Arc
, + optional_trusted_ghostdag_data: Option>, + ) -> BlockProcessResult { + let is_trusted = optional_trusted_ghostdag_data.is_some(); let status_option = self.statuses_store.read().get(header.hash).unwrap_option(); match status_option { @@ -289,15 +296,66 @@ impl HeaderProcessor { } // Create processing context - let mut ctx = HeaderProcessingContext::new(header.hash, header, self.pruning_store.read().get().unwrap()); + let is_genesis = header.direct_parents().is_empty(); + let pruning_point = self.pruning_store.read().get().unwrap(); + let relations_read = self.relations_stores.read(); + let non_pruned_parents = (0..=self.max_block_level) + .map(|level| { + Arc::new(if is_genesis { + vec![ORIGIN] + } else { + let filtered = self + .parents_manager + .parents_at_level(header, level) + .iter() + .copied() + .filter(|parent| { + // self.ghostdag_stores[level as usize].has(*parent).unwrap() + relations_read[level as usize].has(*parent).unwrap() + }) + .collect_vec(); + if filtered.is_empty() { + vec![ORIGIN] + } else { + filtered + } + }) + }) + .collect_vec(); + drop(relations_read); + let mut ctx = HeaderProcessingContext::new(header.hash, header, pruning_point, non_pruned_parents); + if is_trusted { + ctx.mergeset_non_daa = Some(Default::default()); // TODO: Check that it's fine for coinbase calculations. + } // Run all header validations for the new header - self.pre_ghostdag_validation(&mut ctx, header)?; - ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.ghostdag(header.direct_parents()))); // TODO: Run GHOSTDAG for all block levels - self.pre_pow_validation(&mut ctx, header)?; - if let Err(e) = self.post_pow_validation(&mut ctx, header) { - self.statuses_store.write().set(ctx.hash, StatusInvalid).unwrap(); - return Err(e); + self.pre_ghostdag_validation(&mut ctx, header, is_trusted)?; + let ghostdag_data = (0..=ctx.block_level.unwrap()) + .map(|level| { + if let Some(gd) = self.ghostdag_stores[level as usize].get_data(ctx.hash).unwrap_option() { + gd + } else { + Arc::new(self.ghostdag_managers[level as usize].ghostdag(&ctx.non_pruned_parents[level as usize])) + } + }) + .collect_vec(); + ctx.ghostdag_data = Some(ghostdag_data); + if is_trusted { + // let gd_data = ctx.get_ghostdag_data().unwrap(); + // let merge_depth_root = self.depth_manager.calc_merge_depth_root(&gd_data, ctx.pruning_point()); + // let finality_point = self.depth_manager.calc_finality_point(&gd_data, ctx.pruning_point()); + ctx.merge_depth_root = Some(ORIGIN); + ctx.finality_point = Some(ORIGIN); + } + + if !is_trusted { + // TODO: For now we skip all validations for trusted blocks, but in the future we should + // employ some validations to avoid spam etc. + self.pre_pow_validation(&mut ctx, header)?; + if let Err(e) = self.post_pow_validation(&mut ctx, header) { + self.statuses_store.write().set(ctx.hash, StatusInvalid).unwrap(); + return Err(e); + } } self.commit_header(ctx, header); @@ -315,12 +373,30 @@ impl HeaderProcessor { let mut batch = WriteBatch::default(); // Write to append only stores: this requires no lock and hence done first - self.ghostdag_store.insert_batch(&mut batch, ctx.hash, &ghostdag_data).unwrap(); - self.block_window_cache_for_difficulty.insert(ctx.hash, Arc::new(ctx.block_window_for_difficulty.unwrap())); - self.block_window_cache_for_past_median_time.insert(ctx.hash, Arc::new(ctx.block_window_for_past_median_time.unwrap())); + // TODO: Insert all levels data + for (level, datum) in ghostdag_data.iter().enumerate() { + if self.ghostdag_stores[level].has(ctx.hash).unwrap() { + // The data might have been already written when applying the pruning proof. + continue; + } + self.ghostdag_stores[level].insert_batch(&mut batch, ctx.hash, datum).unwrap(); + } + if let Some(window) = ctx.block_window_for_difficulty { + self.block_window_cache_for_difficulty.insert(ctx.hash, Arc::new(window)); + } + + if let Some(window) = ctx.block_window_for_past_median_time { + self.block_window_cache_for_past_median_time.insert(ctx.hash, Arc::new(window)); + } + self.daa_store.insert_batch(&mut batch, ctx.hash, Arc::new(ctx.mergeset_non_daa.unwrap())).unwrap(); - self.headers_store.insert_batch(&mut batch, ctx.hash, ctx.header.clone(), ctx.block_level.unwrap()).unwrap(); - self.depth_store.insert_batch(&mut batch, ctx.hash, ctx.merge_depth_root.unwrap(), ctx.finality_point.unwrap()).unwrap(); + if !self.headers_store.has(ctx.hash).unwrap() { + // The data might have been already written when applying the pruning proof. + self.headers_store.insert_batch(&mut batch, ctx.hash, ctx.header.clone(), ctx.block_level.unwrap()).unwrap(); + } + if let Some(merge_depth_root) = ctx.merge_depth_root { + self.depth_store.insert_batch(&mut batch, ctx.hash, merge_depth_root, ctx.finality_point.unwrap()).unwrap(); + } // Create staging reachability store. We use an upgradable read here to avoid concurrent // staging reachability operations. PERF: we assume that reachability processing time << header processing @@ -328,14 +404,20 @@ impl HeaderProcessor { // alternative is to create a separate ReachabilityProcessor and to manage things more tightly. let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); - // Add block to staging reachability - reachability::add_block( - &mut staging, - ctx.hash, - ghostdag_data.selected_parent, - &mut ghostdag_data.unordered_mergeset_without_selected_parent(), - ) - .unwrap(); + let has_reachability = staging.has(ctx.hash).unwrap(); + if !has_reachability { + // Add block to staging reachability + let reachability_parent = if ctx.non_pruned_parents[0].len() == 1 && ctx.non_pruned_parents[0][0].is_origin() { + ORIGIN + } else { + ghostdag_data[0].selected_parent + }; + + let mut reachability_mergeset = ghostdag_data[0] + .unordered_mergeset_without_selected_parent() + .filter(|hash| self.reachability_store.read().has(*hash).unwrap()); // TODO: Use read lock only once + reachability::add_block(&mut staging, ctx.hash, reachability_parent, &mut reachability_mergeset).unwrap(); + } // Non-append only stores need to use write locks. // Note we need to keep the lock write guards until the batch is written. @@ -347,11 +429,26 @@ impl HeaderProcessor { hst_write_guard.set_batch(&mut batch, SortableBlock::new(ctx.hash, header.blue_work)).unwrap(); } - let relations_write_guard = if header.direct_parents().is_empty() { - self.relations_store.insert_batch(&mut batch, header.hash, BlockHashes::new(vec![ORIGIN])).unwrap() - } else { - self.relations_store.insert_batch(&mut batch, header.hash, BlockHashes::new(header.direct_parents().clone())).unwrap() - }; + let is_genesis = header.direct_parents().is_empty(); + let parents = (0..=ctx.block_level.unwrap()).map(|level| { + Arc::new(if is_genesis { + vec![ORIGIN] + } else { + self.parents_manager + .parents_at_level(ctx.header, level) + .iter() + .copied() + .filter(|parent| self.ghostdag_stores[level as usize].has(*parent).unwrap()) + .collect_vec() + }) + }); + + let mut relations_write_guard = self.relations_stores.write(); + parents.enumerate().for_each(|(level, parent_by_level)| { + if !relations_write_guard[level].has(header.hash).unwrap() { + relations_write_guard[level].insert_batch(&mut batch, header.hash, parent_by_level).unwrap(); + } + }); let statuses_write_guard = self.statuses_store.set_batch(&mut batch, ctx.hash, StatusHeaderOnly).unwrap(); @@ -371,18 +468,16 @@ impl HeaderProcessor { } pub fn process_genesis_if_needed(self: &Arc) { - if self.header_was_processed(self.genesis_hash) { + if !self.process_genesis || self.header_was_processed(self.genesis_hash) { return; } { let mut batch = WriteBatch::default(); - let relations_write_guard = self.relations_store.insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])); let mut hst_write_guard = self.headers_selected_tip_store.write(); hst_write_guard.set_batch(&mut batch, SortableBlock::new(self.genesis_hash, 0.into())).unwrap(); // TODO: take blue work from genesis block self.db.write(batch).unwrap(); drop(hst_write_guard); - drop(relations_write_guard); } self.pruning_store.write().set(self.genesis_hash, self.genesis_hash, 0).unwrap(); @@ -390,8 +485,13 @@ impl HeaderProcessor { header.bits = self.genesis_bits; header.timestamp = self.genesis_timestamp; let header = Arc::new(header); - let mut ctx = HeaderProcessingContext::new(self.genesis_hash, &header, PruningPointInfo::from_genesis(self.genesis_hash)); - ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data())); + let mut ctx = HeaderProcessingContext::new( + self.genesis_hash, + &header, + PruningPointInfo::from_genesis(self.genesis_hash), + vec![BlockHashes::new(vec![ORIGIN])], + ); + ctx.ghostdag_data = Some(self.ghostdag_managers.iter().map(|m| Arc::new(m.genesis_ghostdag_data())).collect()); ctx.block_window_for_difficulty = Some(Default::default()); ctx.block_window_for_past_median_time = Some(Default::default()); ctx.mergeset_non_daa = Some(Default::default()); @@ -400,4 +500,21 @@ impl HeaderProcessor { ctx.block_level = Some(self.max_block_level); self.commit_header(ctx, &header); } + + pub fn process_origin_if_needed(self: &Arc) { + if self.relations_stores.read()[0].has(ORIGIN).unwrap() { + return; + } + + let mut batch = WriteBatch::default(); + let mut relations_write_guard = self.relations_stores.write(); + (0..=self.max_block_level).for_each(|level| { + relations_write_guard[level as usize].insert_batch(&mut batch, ORIGIN, BlockHashes::new(vec![])).unwrap() + }); + let mut hst_write_guard = self.headers_selected_tip_store.write(); + hst_write_guard.set_batch(&mut batch, SortableBlock::new(ORIGIN, 0.into())).unwrap(); + self.db.write(batch).unwrap(); + drop(hst_write_guard); + drop(relations_write_guard); + } } diff --git a/consensus/src/pipeline/virtual_processor/errors.rs b/consensus/src/pipeline/virtual_processor/errors.rs new file mode 100644 index 000000000..a3c50fc6d --- /dev/null +++ b/consensus/src/pipeline/virtual_processor/errors.rs @@ -0,0 +1,18 @@ +use thiserror::Error; + +use crate::processes::transaction_validator::errors::TxRuleError; +use hashes::Hash; + +#[derive(Error, Debug, Clone)] +pub enum VirtualProcessorError { + #[error("new pruning point has an invalid transaction {0}: {1}")] + NewPruningPointTxError(Hash, TxRuleError), + + #[error("new pruning point transaction {0} is missing a UTXO entry")] + NewPruningPointTxMissingUTXOEntry(Hash), + + #[error("the imported multiset hash was expected to be {0} and was actually {1}")] + ImportedMultisetHashMismatch(Hash, Hash), +} + +pub type VirtualProcessorResult = std::result::Result; diff --git a/consensus/src/pipeline/virtual_processor/mod.rs b/consensus/src/pipeline/virtual_processor/mod.rs index 4d90020b2..e6e42e3c0 100644 --- a/consensus/src/pipeline/virtual_processor/mod.rs +++ b/consensus/src/pipeline/virtual_processor/mod.rs @@ -1,3 +1,4 @@ +pub mod errors; mod processor; mod utxo_validation; pub use processor::*; diff --git a/consensus/src/pipeline/virtual_processor/processor.rs b/consensus/src/pipeline/virtual_processor/processor.rs index a68a250bc..c5c7b666f 100644 --- a/consensus/src/pipeline/virtual_processor/processor.rs +++ b/consensus/src/pipeline/virtual_processor/processor.rs @@ -24,12 +24,13 @@ use crate::{ tips::{DbTipsStore, TipsStoreReader}, utxo_diffs::{DbUtxoDiffsStore, UtxoDiffsStoreReader}, utxo_multisets::{DbUtxoMultisetsStore, UtxoMultisetsStoreReader}, + utxo_set::{DbUtxoSetStore, UtxoSetStore}, virtual_state::{VirtualState, VirtualStateStore, VirtualStateStoreReader}, DB, }, }, params::Params, - pipeline::{deps_manager::BlockTask, virtual_processor::utxo_validation::UtxoProcessingContext}, + pipeline::{deps_manager::BlockProcessingMessage, virtual_processor::utxo_validation::UtxoProcessingContext}, processes::{ block_depth::BlockDepthManager, coinbase::CoinbaseManager, @@ -45,18 +46,21 @@ use crate::{ use consensus_core::{ block::{BlockTemplate, MutableBlock}, blockstatus::BlockStatus::{self, StatusDisqualifiedFromChain, StatusUTXOPendingVerification, StatusUTXOValid}, - coinbase::MinerData, + coinbase::{BlockRewardData, MinerData}, header::Header, merkle::calc_hash_merkle_root, - tx::{MutableTransaction, Transaction}, + muhash::MuHashExtensions, + tx::{ + PopulatedTransaction, Transaction, {MutableTransaction, TransactionOutpoint, UtxoEntry, ValidatedTransaction}, + }, utxo::{ utxo_diff::UtxoDiff, utxo_view::{UtxoView, UtxoViewComposition}, }, - BlockHashSet, + BlockHashMap, BlockHashSet, HashMapCustomHasher, }; use hashes::Hash; -use kaspa_core::{info, trace}; +use kaspa_core::{debug, info, trace}; use muhash::MuHash; use crossbeam_channel::Receiver; @@ -66,15 +70,18 @@ use rayon::ThreadPool; use rocksdb::WriteBatch; use std::{ cmp::{min, Reverse}, + collections::HashSet, collections::VecDeque, ops::Deref, sync::Arc, - time::SystemTime, + time::{Duration, SystemTime}, }; +use super::errors::{VirtualProcessorError, VirtualProcessorResult}; + pub struct VirtualStateProcessor { // Channels - receiver: Receiver, + receiver: Receiver, // Thread pool pub(super) thread_pool: Arc, @@ -90,6 +97,7 @@ pub struct VirtualStateProcessor { pub(super) difficulty_window_size: usize, pub(super) mergeset_size_limit: u64, pub(super) pruning_depth: u64, + process_genesis: bool, // Stores pub(super) statuses_store: Arc>, @@ -106,6 +114,7 @@ pub struct VirtualStateProcessor { pub(super) utxo_multisets_store: Arc, pub(super) acceptance_data_store: Arc, pub virtual_stores: Arc>, + pub(super) pruning_point_utxo_set_store: Arc, // TODO: remove all pub from stores when StoreManager is implemented // Managers and services @@ -125,9 +134,10 @@ pub struct VirtualStateProcessor { impl VirtualStateProcessor { #[allow(clippy::too_many_arguments)] pub fn new( - receiver: Receiver, + receiver: Receiver, thread_pool: Arc, params: &Params, + process_genesis: bool, db: Arc, // Stores statuses_store: Arc>, @@ -144,6 +154,7 @@ impl VirtualStateProcessor { acceptance_data_store: Arc, // Virtual-related stores virtual_stores: Arc>, + pruning_point_utxo_set_store: Arc, // Managers ghostdag_manager: DbGhostdagManager, reachability_service: MTReachabilityService, @@ -168,6 +179,7 @@ impl VirtualStateProcessor { difficulty_window_size: params.difficulty_window_size, mergeset_size_limit: params.mergeset_size_limit, pruning_depth: params.pruning_depth, + process_genesis, db, statuses_store, @@ -182,6 +194,7 @@ impl VirtualStateProcessor { utxo_multisets_store, acceptance_data_store, virtual_stores, + pruning_point_utxo_set_store, ghostdag_manager, reachability_service, relations_service, @@ -197,23 +210,28 @@ impl VirtualStateProcessor { } pub fn worker(self: &Arc) { - 'outer: while let Ok(first_task) = self.receiver.recv() { + 'outer: while let Ok(first_msg) = self.receiver.recv() { // Once a task arrived, collect all pending tasks from the channel. // This is done since virtual processing is not a per-block // operation, so it benefits from max available info - let tasks: Vec = std::iter::once(first_task).chain(self.receiver.try_iter()).collect(); - trace!("virtual processor received {} tasks", tasks.len()); - self.resolve_virtual(); + let update_virtual = + if let BlockProcessingMessage::Process(ref task, _) = first_msg { task.update_virtual } else { false }; + let messages: Vec = std::iter::once(first_msg).chain(self.receiver.try_iter()).collect(); + trace!("virtual processor received {} tasks", messages.len()); + + if update_virtual { + self.resolve_virtual(); + } let statuses_read = self.statuses_store.read(); - for task in tasks { - match task { - BlockTask::Exit => break 'outer, - BlockTask::Process(block, result_transmitters) => { + for msg in messages { + match msg { + BlockProcessingMessage::Exit => break 'outer, + BlockProcessingMessage::Process(task, result_transmitters) => { for transmitter in result_transmitters { // We don't care if receivers were dropped - let _ = transmitter.send(Ok(statuses_read.get(block.hash()).unwrap())); + let _ = transmitter.send(Ok(statuses_read.get(task.block.hash()).unwrap())); } } }; @@ -221,12 +239,14 @@ impl VirtualStateProcessor { } } - fn resolve_virtual(self: &Arc) { + pub fn resolve_virtual(self: &Arc) { // TODO: check finality violation // TODO: handle disqualified chain loop // TODO: acceptance data format // TODO: refactor this methods into multiple methods + let pruning_read_guard = self.pruning_store.upgradable_read(); + let pruning_point = pruning_read_guard.pruning_point().unwrap(); let virtual_read = self.virtual_stores.upgradable_read(); let prev_state = virtual_read.state.get().unwrap(); let tips = self.body_tips_store.read().get().unwrap().iter().copied().collect_vec(); @@ -249,11 +269,23 @@ impl VirtualStateProcessor { } let split_point = split_point.expect("chain iterator was expected to reach the reorg split point"); + debug!("resolve_virtual found split point: {split_point}"); // Walk back up to the new virtual selected parent candidate - for (selected_parent, current) in - self.reachability_service.forward_chain_iterator(split_point, new_selected, true).tuple_windows() + let mut last_log_index = 0; + let mut last_log_time = SystemTime::now(); + for (i, (selected_parent, current)) in + self.reachability_service.forward_chain_iterator(split_point, new_selected, true).tuple_windows().enumerate() { + let now = SystemTime::now(); + let passed = now.duration_since(last_log_time).unwrap(); + if passed > Duration::new(10, 0) { + info!("UTXO validated {} chain blocks in the last {} seconds (total {})", i - last_log_index, passed.as_secs(), i); + last_log_time = now; + last_log_index = i; + } + + debug!("UTXO validation for {current}"); match self.utxo_diffs_store.get(current) { Ok(mergeset_diff) => { accumulated_diff.with_diff_in_place(mergeset_diff.deref()).unwrap(); @@ -296,7 +328,7 @@ impl VirtualStateProcessor { let new_selected_status = self.statuses_store.read().get(new_selected).unwrap(); match new_selected_status { BlockStatus::StatusUTXOValid => { - let (virtual_parents, virtual_ghostdag_data) = self.pick_virtual_parents(new_selected, tips); + let (virtual_parents, virtual_ghostdag_data) = self.pick_virtual_parents(new_selected, tips, pruning_point); assert_eq!(virtual_ghostdag_data.selected_parent, new_selected); // Calc the new virtual UTXO diff @@ -354,7 +386,7 @@ impl VirtualStateProcessor { } // TODO: Make a separate pruning processor and send to its channel here - self.advance_pruning_point_and_candidate_if_possible() + self.advance_pruning_point_and_candidate_if_possible(pruning_read_guard) } fn commit_utxo_state(self: &Arc, current: Hash, mergeset_diff: UtxoDiff, multiset: MuHash, acceptance_data: AcceptanceData) { @@ -371,7 +403,7 @@ impl VirtualStateProcessor { /// Picks the virtual parents according to virtual parent selection pruning constrains. /// Assumes `selected_parent` is a UTXO-valid block, and that `candidates` are an antichain /// containing `selected_parent` s.t. it is the block with highest blue work amongst them. - fn pick_virtual_parents(&self, selected_parent: Hash, candidates: Vec) -> (Vec, GhostdagData) { + fn pick_virtual_parents(&self, selected_parent: Hash, candidates: Vec, pruning_point: Hash) -> (Vec, GhostdagData) { // TODO: tests let max_block_parents = self.max_block_parents as usize; @@ -423,7 +455,7 @@ impl VirtualStateProcessor { } assert!(mergeset_size <= self.mergeset_size_limit); assert!(virtual_parents.len() <= max_block_parents); - self.remove_bounded_merge_breaking_parents(virtual_parents) + self.remove_bounded_merge_breaking_parents(virtual_parents, pruning_point) } fn mergeset_increase(&self, selected_parents: &[Hash], candidate: Hash, budget: u64) -> MergesetIncreaseResult { @@ -457,11 +489,21 @@ impl VirtualStateProcessor { MergesetIncreaseResult::Accepted { increase_size: mergeset_increase } } - fn remove_bounded_merge_breaking_parents(&self, mut virtual_parents: Vec) -> (Vec, GhostdagData) { + fn remove_bounded_merge_breaking_parents( + &self, + mut virtual_parents: Vec, + current_pruning_point: Hash, + ) -> (Vec, GhostdagData) { let mut ghostdag_data = self.ghostdag_manager.ghostdag(&virtual_parents); - let pruning_point = - self.pruning_manager.expected_header_pruning_point(ghostdag_data.to_compact(), self.pruning_store.read().get().unwrap()); - let merge_depth_root = self.depth_manager.calc_merge_depth_root(&ghostdag_data, pruning_point); + let current_pruning_point_bs = self.headers_store.get_blue_score(current_pruning_point).unwrap(); + let expected_pruning_point = if ghostdag_data.blue_score < current_pruning_point_bs + self.pruning_depth { + // If the pruning point is not in pruning depth, it means we're still in IBD, so we can't look for a more up to date pruning point. + current_pruning_point + } else { + self.pruning_manager.expected_header_pruning_point(ghostdag_data.to_compact(), self.pruning_store.read().get().unwrap()) + }; + debug!("The expected pruning point based on the curent virtual parents is {expected_pruning_point}"); + let merge_depth_root = self.depth_manager.calc_merge_depth_root(&ghostdag_data, expected_pruning_point); let mut kosherizing_blues: Option> = None; let mut bad_reds = Vec::new(); @@ -581,14 +623,16 @@ impl VirtualStateProcessor { Ok(BlockTemplate::new(MutableBlock::new(header, txs), miner_data, coinbase.has_red_reward, selected_parent_timestamp)) } - fn advance_pruning_point_and_candidate_if_possible(self: &Arc) { + fn advance_pruning_point_and_candidate_if_possible( + self: &Arc, + pruning_read_guard: RwLockUpgradableReadGuard, + ) { let virtual_sp = self.virtual_stores.read().state.get().unwrap().ghostdag_data.selected_parent; if virtual_sp == self.genesis_hash { return; } let ghostdag_data = self.ghostdag_store.get_compact_data(virtual_sp).unwrap(); - let pruning_read_guard = self.pruning_store.upgradable_read(); let current_pruning_info = pruning_read_guard.get().unwrap(); let (new_pruning_points, new_candidate) = self.pruning_manager.next_pruning_points_and_candidate_by_ghostdag_data( ghostdag_data, @@ -614,6 +658,10 @@ impl VirtualStateProcessor { } pub fn process_genesis_if_needed(self: &Arc) { + if !self.process_genesis { + return; + } + let status = self.statuses_store.read().get(self.genesis_hash).unwrap(); match status { StatusUTXOPendingVerification => { @@ -646,6 +694,126 @@ impl VirtualStateProcessor { _ => panic!("unexpected genesis status {status:?}"), } } + + pub fn import_pruning_point_utxo_set( + &self, + new_pruning_point: Hash, + imported_utxo_multiset: &mut MuHash, + ) -> VirtualProcessorResult<()> { + info!("Importing the UTXO set of the pruning point {}", new_pruning_point); + let new_pruning_point_header = self.headers_store.get_header(new_pruning_point).unwrap(); + let imported_utxo_multiset_hash = imported_utxo_multiset.finalize(); + if imported_utxo_multiset_hash != new_pruning_point_header.utxo_commitment { + return Err(VirtualProcessorError::ImportedMultisetHashMismatch( + new_pruning_point_header.utxo_commitment, + imported_utxo_multiset_hash, + )); + } + + let new_pruning_point_transactions = self.block_transactions_store.get(new_pruning_point).unwrap(); + let new_pruning_point_daa_score = new_pruning_point_header.daa_score; + let mut total_fee = 0; + let mut virtual_multiset = imported_utxo_multiset.clone(); + let virtual_parents = vec![new_pruning_point]; + let virtual_gd = self.ghostdag_manager.ghostdag(&virtual_parents); + let window = self.dag_traversal_manager.block_window(&virtual_gd, self.difficulty_window_size); + let (virtual_daa_score, mergeset_non_daa) = self + .difficulty_manager + .calc_daa_score_and_non_daa_mergeset_blocks(&mut window.iter().map(|item| item.0.hash), &virtual_gd); + + for tx in new_pruning_point_transactions.iter() { + let res: VirtualProcessorResult> = tx + .inputs + .iter() + .map(|input| { + if let Some(entry) = self.pruning_point_utxo_set_store.get(&input.previous_outpoint) { + Ok(entry) + } else { + Err(VirtualProcessorError::NewPruningPointTxMissingUTXOEntry(tx.id())) + } + }) + .collect(); + let entries = res?; + let populated_tx = PopulatedTransaction::new(tx, entries); + let res = if tx.is_coinbase() { + Ok(0) + } else { + self.transaction_validator.validate_populated_transaction_and_get_fee(&populated_tx, new_pruning_point_daa_score) + }; + + if let Err(e) = res { + return Err(VirtualProcessorError::NewPruningPointTxError(tx.id(), e)); + } else { + let tx_fee = res.unwrap(); + total_fee += tx_fee; + let validated_tx = ValidatedTransaction::new(populated_tx, tx_fee); + virtual_multiset.add_transaction(&validated_tx, virtual_daa_score); + } + } + self.statuses_store.write().set(new_pruning_point, StatusUTXOValid).unwrap(); + + { + let mut batch = WriteBatch::default(); + self.utxo_multisets_store.insert_batch(&mut batch, new_pruning_point, imported_utxo_multiset.clone()).unwrap(); + self.db.write(batch).unwrap(); + } + + let virtual_bits = self.difficulty_manager.calculate_difficulty_bits(&window); + let accepted_tx_ids = new_pruning_point_transactions.iter().map(|tx| tx.id()).collect_vec(); + + let coinbase_data = self.coinbase_manager.deserialize_coinbase_payload(&new_pruning_point_transactions[0].payload).unwrap(); + let mut mergeset_rewards = BlockHashMap::new(); + mergeset_rewards.insert( + new_pruning_point, + BlockRewardData::new(coinbase_data.subsidy, total_fee, coinbase_data.miner_data.script_public_key), + ); + + let new_pp_spent_outpoints: HashSet = + new_pruning_point_transactions.iter().flat_map(|tx| tx.inputs.iter().map(|input| input.previous_outpoint)).collect(); + let mut to_remove_diff = Vec::new(); + for (outpoint, entry) in self.pruning_point_utxo_set_store.iterator().map(|iter_result| iter_result.unwrap()) { + if new_pp_spent_outpoints.contains(&outpoint) { + to_remove_diff.push((outpoint, (*entry).clone())); + } + // TODO: Write in actual batches + self.virtual_stores.write().utxo_set.write_many(&[(outpoint, (*entry).clone())]).unwrap(); + } + + let new_pp_added_utxos = new_pruning_point_transactions + .iter() + .flat_map(|tx| { + tx.outputs.iter().enumerate().map(|(index, output)| { + ( + TransactionOutpoint { transaction_id: tx.id(), index: index as u32 }, + UtxoEntry { + amount: output.value, + script_public_key: output.script_public_key.clone(), + block_daa_score: virtual_daa_score, + is_coinbase: tx.is_coinbase(), + }, + ) + }) + }) + .collect_vec(); + self.virtual_stores.write().utxo_set.write_many(&new_pp_added_utxos).unwrap(); + + let virtual_past_median_time = self.past_median_time_manager.calc_past_median_time(&virtual_gd).0; + let new_virtual_state = VirtualState { + parents: virtual_parents, + ghostdag_data: virtual_gd, + daa_score: virtual_daa_score, + bits: virtual_bits, + multiset: virtual_multiset, + utxo_diff: UtxoDiff { add: new_pp_added_utxos.into_iter().collect(), remove: to_remove_diff.into_iter().collect() }, + accepted_tx_ids, + mergeset_rewards, + mergeset_non_daa, + past_median_time: virtual_past_median_time, + }; + self.virtual_stores.write().state.set(new_virtual_state).unwrap(); + + Ok(()) + } } enum MergesetIncreaseResult { diff --git a/consensus/src/processes/ghostdag/mergeset.rs b/consensus/src/processes/ghostdag/mergeset.rs index bcf6936b1..8399e048a 100644 --- a/consensus/src/processes/ghostdag/mergeset.rs +++ b/consensus/src/processes/ghostdag/mergeset.rs @@ -8,12 +8,19 @@ use std::collections::VecDeque; impl GhostdagManager { pub fn ordered_mergeset_without_selected_parent(&self, selected_parent: Hash, parents: &[Hash]) -> Vec { + self.sort_blocks(self.unordered_mergeset_without_selected_parent(selected_parent, parents)) + } + + pub fn unordered_mergeset_without_selected_parent(&self, selected_parent: Hash, parents: &[Hash]) -> BlockHashSet { let mut queue: VecDeque<_> = parents.iter().copied().filter(|p| p != &selected_parent).collect(); let mut mergeset: BlockHashSet = queue.iter().copied().collect(); let mut selected_parent_past = BlockHashSet::new(); while let Some(current) = queue.pop_front() { - let current_parents = self.relations_store.get_parents(current).unwrap(); + let current_parents = self.relations_store.get_parents(current).unwrap_or_else(|err| { + println!("WUT"); + panic!("{err:?}"); + }); // For each parent of the current block we check whether it is in the past of the selected parent. If not, // we add it to the resulting merge-set and queue it for further processing. @@ -36,6 +43,6 @@ impl Arc { + Arc::new(GhostdagData::new( + 0, + Default::default(), + 0.into(), + BlockHashes::new(Vec::new()), + BlockHashes::new(Vec::new()), + HashKTypeMap::new(BlockHashMap::new()), + )) + } + pub fn find_selected_parent(&self, parents: &mut impl Iterator) -> Hash { parents .map(|parent| SortableBlock { hash: parent, blue_work: self.ghostdag_store.get_blue_work(parent).unwrap() }) @@ -102,8 +113,14 @@ impl { - max_block_level: u8, + max_block_level: BlockLevel, genesis_hash: Hash, headers_store: Arc, reachability_service: MTReachabilityService, - relations_store: Arc>, + relations_service: MTRelationsService, } impl ParentsManager { pub fn new( - max_block_level: u8, + max_block_level: BlockLevel, genesis_hash: Hash, headers_store: Arc, reachability_service: MTReachabilityService, - relations_store: Arc>, + relations_service: MTRelationsService, ) -> Self { - Self { max_block_level, genesis_hash, headers_store, reachability_service, relations_store } + Self { max_block_level, genesis_hash, headers_store, reachability_service, relations_service } } pub fn calc_block_parents(&self, pruning_point: Hash, direct_parents: &[Hash]) -> Vec> { @@ -52,7 +54,7 @@ impl .expect("at least one of the parents is expected to be in the future of the pruning point"); direct_parent_headers.swap(0, first_parent_in_future_of_pruning_point); - let origin_children = self.relations_store.read().get_children(ORIGIN).unwrap(); + let origin_children = self.relations_service.get_children(ORIGIN).unwrap(); let origin_children_headers = origin_children.iter().copied().map(|parent| self.headers_store.get_header(parent).unwrap()).collect_vec(); @@ -166,8 +168,12 @@ impl parents } + pub fn parents<'a>(&'a self, header: &'a Header) -> impl ExactSizeIterator { + (0..=self.max_block_level).map(|level| self.parents_at_level(header, level)) + } + pub fn parents_at_level<'a>(&'a self, header: &'a Header, level: u8) -> &'a [Hash] { - if header.direct_parents().is_empty() { + if header.parents_by_level.is_empty() { // If is genesis &[] } else if header.parents_by_level.len() > level as usize { @@ -469,8 +475,9 @@ mod tests { let reachability_service = MTReachabilityService::new(Arc::new(RwLock::new(reachability_store))); let relations_store = - Arc::new(RwLock::new(RelationsStoreMock { children: BlockHashes::new(vec![pruning_point, pp_anticone_block]) })); - let parents_manager = ParentsManager::new(250, genesis_hash, headers_store, reachability_service, relations_store); + Arc::new(RwLock::new(vec![RelationsStoreMock { children: BlockHashes::new(vec![pruning_point, pp_anticone_block]) }])); + let relations_service = MTRelationsService::new(relations_store, 0); + let parents_manager = ParentsManager::new(250, genesis_hash, headers_store, reachability_service, relations_service); for test_block in test_blocks { let direct_parents = test_block.direct_parents.iter().map(|parent| Hash::from_u64_word(*parent)).collect_vec(); @@ -569,8 +576,9 @@ mod tests { } let reachability_service = MTReachabilityService::new(Arc::new(RwLock::new(reachability_store))); - let relations_store = Arc::new(RwLock::new(RelationsStoreMock { children: BlockHashes::new(vec![pruning_point]) })); - let parents_manager = ParentsManager::new(250, genesis_hash, headers_store, reachability_service, relations_store); + let relations_store = Arc::new(RwLock::new(vec![RelationsStoreMock { children: BlockHashes::new(vec![pruning_point]) }])); + let relations_service = MTRelationsService::new(relations_store, 0); + let parents_manager = ParentsManager::new(250, genesis_hash, headers_store, reachability_service, relations_service); for test_block in test_blocks { let direct_parents = test_block.direct_parents.iter().map(|parent| Hash::from_u64_word(*parent)).collect_vec(); diff --git a/consensus/src/processes/pruning.rs b/consensus/src/processes/pruning.rs index 9d594c170..de2444114 100644 --- a/consensus/src/processes/pruning.rs +++ b/consensus/src/processes/pruning.rs @@ -70,6 +70,15 @@ impl ghostdag_data.blue_score { + // The pruning point is not in depth of self.pruning_depth, so there's + // no point in checking if it is required to update it. This can happen + // because the virtual is not updated after IBD, so the pruning point + // might be in depth less than self.pruning_depth. + return (vec![], current_candidate); + } + let mut new_candidate = current_candidate; for selected_child in self.reachability_service.forward_chain_iterator(low_hash, ghostdag_data.selected_parent, true) { diff --git a/consensus/src/processes/pruning_proof/mod.rs b/consensus/src/processes/pruning_proof/mod.rs new file mode 100644 index 000000000..87a6efe09 --- /dev/null +++ b/consensus/src/processes/pruning_proof/mod.rs @@ -0,0 +1,385 @@ +use std::{ + cmp::{max, Reverse}, + collections::BinaryHeap, + sync::Arc, +}; + +use consensus_core::{ + block::Block, + blockhash::{BlockHashes, ORIGIN}, + header::Header, + pruning::PruningPointProof, + BlockHashMap, BlockHashSet, BlockLevel, HashMapCustomHasher, +}; +use hashes::Hash; +use itertools::Itertools; +use kaspa_core::{info, trace}; +use parking_lot::RwLock; +use rocksdb::WriteBatch; + +use crate::{ + consensus::{DbGhostdagManager, VirtualStores}, + model::{ + services::{ + reachability::{MTReachabilityService, ReachabilityService}, + relations::MTRelationsService, + }, + stores::{ + depth::DbDepthStore, + ghostdag::{DbGhostdagStore, GhostdagData, GhostdagStore, GhostdagStoreReader}, + headers::{DbHeadersStore, HeaderStore, HeaderStoreReader}, + headers_selected_tip::{DbHeadersSelectedTipStore, HeadersSelectedTipStore}, + past_pruning_points::{DbPastPruningPointsStore, PastPruningPointsStore}, + pruning::{DbPruningStore, PruningStore}, + reachability::{DbReachabilityStore, StagingReachabilityStore}, + relations::{DbRelationsStore, MemoryRelationsStore, RelationsStore}, + tips::DbTipsStore, + virtual_state::{VirtualState, VirtualStateStore}, + DB, + }, + }, + processes::ghostdag::ordering::SortableBlock, +}; +use std::collections::hash_map::Entry::Vacant; + +use super::{ghostdag::protocol::GhostdagManager, parents_builder::ParentsManager, reachability}; +use kaspa_utils::binary_heap::BinaryHeapExtensions; + +pub struct PruningProofManager { + db: Arc, + headers_store: Arc, + reachability_store: Arc>, + parents_manager: ParentsManager, + reachability_service: MTReachabilityService, + ghostdag_stores: Vec>, + relations_stores: Arc>>, + pruning_store: Arc>, + past_pruning_points_store: Arc, + virtual_stores: Arc>, + body_tips_store: Arc>, + headers_selected_tip_store: Arc>, + depth_store: Arc, + + ghostdag_managers: Vec, + + max_block_level: BlockLevel, + genesis_hash: Hash, +} + +struct HeaderStoreMock {} + +#[allow(unused_variables)] +impl HeaderStoreReader for HeaderStoreMock { + fn get_daa_score(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_blue_score(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_timestamp(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_bits(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_header(&self, hash: hashes::Hash) -> Result, crate::model::stores::errors::StoreError> { + todo!() + } + + fn get_header_with_block_level( + &self, + hash: hashes::Hash, + ) -> Result { + todo!() + } + + fn get_compact_header_data( + &self, + hash: hashes::Hash, + ) -> Result { + todo!() + } +} + +struct GhostdagStoreMock {} + +#[allow(unused_variables)] +impl GhostdagStoreReader for GhostdagStoreMock { + fn get_blue_score(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_blue_work(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_selected_parent(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_mergeset_blues(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_mergeset_reds(&self, hash: hashes::Hash) -> Result { + todo!() + } + + fn get_blues_anticone_sizes( + &self, + hash: hashes::Hash, + ) -> Result { + todo!() + } + + fn get_data( + &self, + hash: hashes::Hash, + ) -> Result, crate::model::stores::errors::StoreError> { + todo!() + } + + fn get_compact_data( + &self, + hash: hashes::Hash, + ) -> Result { + todo!() + } + + fn has(&self, hash: hashes::Hash) -> Result { + todo!() + } +} + +#[allow(clippy::too_many_arguments)] +impl PruningProofManager { + pub fn new( + db: Arc, + headers_store: Arc, + reachability_store: Arc>, + parents_manager: ParentsManager, + reachability_service: MTReachabilityService, + ghostdag_stores: Vec>, + relations_stores: Arc>>, + pruning_store: Arc>, + past_pruning_points_store: Arc, + virtual_stores: Arc>, + body_tips_store: Arc>, + headers_selected_tip_store: Arc>, + depth_store: Arc, + ghostdag_managers: Vec, + max_block_level: BlockLevel, + genesis_hash: Hash, + ) -> Self { + Self { + db, + headers_store, + reachability_store, + parents_manager, + reachability_service, + ghostdag_stores, + relations_stores, + pruning_store, + past_pruning_points_store, + virtual_stores, + body_tips_store, + headers_selected_tip_store, + depth_store, + ghostdag_managers, + max_block_level, + genesis_hash, + } + } + + pub fn import_pruning_points(&self, pruning_points: &[Arc
]) { + // TODO: Also write validate_pruning_points + for (i, header) in pruning_points.iter().enumerate() { + self.past_pruning_points_store.insert(i as u64, header.hash).unwrap(); + + if self.headers_store.has(header.hash).unwrap() { + continue; + } + + let state = pow::State::new(header); + let (_, pow) = state.check_pow(header.nonce); + let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; + let block_level = max(signed_block_level, 0) as BlockLevel; + self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); + } + let current_pp = pruning_points.last().unwrap().hash; + info!("Setting {current_pp} as the current pruning point"); + self.pruning_store.write().set(current_pp, current_pp, (pruning_points.len() - 1) as u64).unwrap(); + } + + pub fn apply_proof(&self, mut proof: PruningPointProof, trusted_blocks: &[(Block, GhostdagData)]) { + let proof_zero_set = BlockHashSet::from_iter(proof[0].iter().map(|header| header.hash)); + let mut trusted_gd_map = BlockHashMap::new(); + for (block, gd) in trusted_blocks.iter() { + trusted_gd_map.insert(block.hash(), gd.clone()); + if proof_zero_set.contains(&block.header.hash) { + continue; + } + + proof[0].push(block.header.clone()); + } + + proof[0].sort_by(|a, b| a.blue_work.cmp(&b.blue_work)); + self.populate_reachability(&proof); + for (level, headers) in proof.iter().enumerate() { + trace!("Applying level {} in pruning point proof", level); + self.ghostdag_stores[level].insert(ORIGIN, self.ghostdag_managers[level].origin_ghostdag_data()).unwrap(); + for header in headers.iter() { + let parents = self + .parents_manager + .parents_at_level(header, level as BlockLevel) + .iter() + .copied() + .filter(|parent| self.ghostdag_stores[level].has(*parent).unwrap()) + .collect_vec(); + + let parents = Arc::new(if parents.is_empty() { vec![ORIGIN] } else { parents }); + + self.relations_stores.write()[level].insert(header.hash, parents.clone()).unwrap(); + let gd = if header.hash == self.genesis_hash { + self.ghostdag_managers[level].genesis_ghostdag_data() + } else if level == 0 { + if let Some(gd) = trusted_gd_map.get(&header.hash) { + gd.clone() + } else { + let calculated_gd = self.ghostdag_managers[level].ghostdag(&parents); + // Override the ghostdag data with the real blue score and blue work + GhostdagData { + blue_score: header.blue_score, + blue_work: header.blue_work, + selected_parent: calculated_gd.selected_parent, + mergeset_blues: calculated_gd.mergeset_blues.clone(), + mergeset_reds: calculated_gd.mergeset_reds.clone(), + blues_anticone_sizes: calculated_gd.blues_anticone_sizes.clone(), + } + } + } else { + self.ghostdag_managers[level].ghostdag(&parents) + }; + self.ghostdag_stores[level].insert(header.hash, Arc::new(gd)).unwrap(); + } + } + + let pruning_point_header = proof[0].last().unwrap(); + let pruning_point = pruning_point_header.hash; + let virtual_parents = vec![pruning_point]; + let virtual_gd = self.ghostdag_managers[0].ghostdag(&virtual_parents); + + let virtual_state = VirtualState { + // TODO: Use real values when possible + parents: virtual_parents.clone(), + ghostdag_data: virtual_gd, + daa_score: 0, + bits: 0, + multiset: Default::default(), + utxo_diff: Default::default(), + accepted_tx_ids: vec![], + mergeset_rewards: Default::default(), + mergeset_non_daa: Default::default(), + past_median_time: 0, + }; + self.virtual_stores.write().state.set(virtual_state).unwrap(); + + let mut batch = WriteBatch::default(); + self.body_tips_store.write().init_batch(&mut batch, &virtual_parents).unwrap(); + self.headers_selected_tip_store + .write() + .set(SortableBlock { hash: pruning_point, blue_work: pruning_point_header.blue_work }) + .unwrap(); + // self.depth_store.insert_batch(&mut batch, pruning_point, pruning_point, pruning_point).unwrap(); + self.db.write(batch).unwrap(); + } + + pub fn populate_reachability(&self, proof: &PruningPointProof) { + let mut dag = BlockHashMap::new(); // TODO: Consider making a capacity estimation here + let mut up_heap = BinaryHeap::new(); + for header in proof.iter().flatten().cloned() { + if let Vacant(e) = dag.entry(header.hash) { + let state = pow::State::new(&header); + let (_, pow) = state.check_pow(header.nonce); // TODO: Check if pow passes + let signed_block_level = self.max_block_level as i64 - pow.bits() as i64; + let block_level = max(signed_block_level, 0) as BlockLevel; + self.headers_store.insert(header.hash, header.clone(), block_level).unwrap(); + + let mut parents = BlockHashSet::new(); // TODO: Consider making a capacity estimation here + for level in 0..=self.max_block_level { + for parent in self.parents_manager.parents_at_level(&header, level) { + parents.insert(*parent); + } + } + + struct DagEntry { + header: Arc
, + parents: Arc, + } + + up_heap.push(Reverse(SortableBlock { hash: header.hash, blue_work: header.blue_work })); + e.insert(DagEntry { header, parents: Arc::new(parents) }); + } + } + + let relations_store = Arc::new(RwLock::new(vec![MemoryRelationsStore::new()])); + relations_store.write()[0].insert(ORIGIN, Arc::new(vec![])).unwrap(); + let relations_service = MTRelationsService::new(relations_store.clone(), 0); + let gm = GhostdagManager::new( + 0.into(), + 0, + Arc::new(GhostdagStoreMock {}), + relations_service, + Arc::new(HeaderStoreMock {}), + self.reachability_service.clone(), + ); // Nothing except reachability and relations should be used, so all other arguments can be fake. + + let mut selected_tip = up_heap.peek().unwrap().clone().0; + for reverse_sortable_block in up_heap.into_sorted_iter() { + // TODO: Convert to into_iter_sorted once it gets stable + let hash = reverse_sortable_block.0.hash; + let dag_entry = dag.get(&hash).unwrap(); + let parents_in_dag = BinaryHeap::from_iter( + dag_entry + .parents + .iter() + .cloned() + .filter(|parent| dag.contains_key(parent)) + .map(|parent| SortableBlock { hash: parent, blue_work: dag.get(&parent).unwrap().header.blue_work }), + ); + + let mut fake_direct_parents: Vec = Vec::new(); + for parent in parents_in_dag.into_sorted_iter() { + if self + .reachability_service + .is_dag_ancestor_of_any(parent.hash, &mut fake_direct_parents.iter().map(|parent| &parent.hash).cloned()) + { + continue; + } + + fake_direct_parents.push(parent); + } + + let fake_direct_parents_hashes = BlockHashes::new(if fake_direct_parents.is_empty() { + vec![ORIGIN] + } else { + fake_direct_parents.iter().map(|parent| &parent.hash).cloned().collect_vec() + }); + + let selected_parent = fake_direct_parents.iter().max().map(|parent| parent.hash).unwrap_or(ORIGIN); + + relations_store.write()[0].insert(hash, fake_direct_parents_hashes.clone()).unwrap(); + let mergeset = gm.unordered_mergeset_without_selected_parent(selected_parent, &fake_direct_parents_hashes); + let mut staging = StagingReachabilityStore::new(self.reachability_store.upgradable_read()); + reachability::inquirer::add_block(&mut staging, hash, selected_parent, &mut mergeset.iter().cloned()).unwrap(); + let reachability_write_guard = staging.commit(&mut WriteBatch::default()).unwrap(); + drop(reachability_write_guard); + + selected_tip = max(selected_tip, reverse_sortable_block.0); + } + } +} diff --git a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs index 8ca9f6ef5..556c7be9c 100644 --- a/consensus/src/processes/transaction_validator/transaction_validator_populated.rs +++ b/consensus/src/processes/transaction_validator/transaction_validator_populated.rs @@ -6,6 +6,7 @@ use consensus_core::{ }, tx::VerifiableTransaction, }; +use kaspa_core::info; use super::{ errors::{TxResult, TxRuleError}, @@ -107,21 +108,24 @@ impl TransactionValidator { // TODO: this is a temporary implementation and not ready for consensus since any invalid signature // will crash the node. We need to replace it with a proper script engine once it's ready. let pk = &entry.script_public_key.script()[1..33]; - let pk = secp256k1::XOnlyPublicKey::from_slice(pk).unwrap(); - let sig = secp256k1::schnorr::Signature::from_slice(&input.signature_script[1..65]).unwrap(); - let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &mut reused_values); - let msg = secp256k1::Message::from_slice(sig_hash.as_bytes().as_slice()).unwrap(); - let sig_cache_key = SigCacheKey { signature: sig, pub_key: pk, message: msg }; - match self.sig_cache.get(&sig_cache_key) { - Some(valid) => { - assert!(valid, "invalid signature in sig cache"); - } - None => { - // TODO: Find a way to parallelize this part. This will be less trivial - // once this code is inside the script engine. - sig.verify(&msg, &pk).unwrap(); - self.sig_cache.insert(sig_cache_key, true); + if let Ok(pk) = secp256k1::XOnlyPublicKey::from_slice(pk) { + let sig = secp256k1::schnorr::Signature::from_slice(&input.signature_script[1..65]).unwrap(); + let sig_hash = calc_schnorr_signature_hash(tx, i, SIG_HASH_ALL, &mut reused_values); + let msg = secp256k1::Message::from_slice(sig_hash.as_bytes().as_slice()).unwrap(); + let sig_cache_key = SigCacheKey { signature: sig, pub_key: pk, message: msg }; + match self.sig_cache.get(&sig_cache_key) { + Some(valid) => { + assert!(valid, "invalid signature in sig cache"); + } + None => { + // TODO: Find a way to parallelize this part. This will be less trivial + // once this code is inside the script engine. + sig.verify(&msg, &pk).unwrap(); + self.sig_cache.insert(sig_cache_key, true); + } } + } else { + info!("Looks like this is not a p2pk script, so the current code can't handle it") } } diff --git a/consensus/tests/common/mod.rs b/consensus/tests/common/mod.rs index 538b1767a..039e0160d 100644 --- a/consensus/tests/common/mod.rs +++ b/consensus/tests/common/mod.rs @@ -5,7 +5,7 @@ use std::{ }; #[allow(dead_code)] // Usage by integration tests is ignored by the compiler for some reason -pub fn open_file(file_path: &str) -> File { +pub fn open_file(file_path: &Path) -> File { let file_res = File::open(file_path); match file_res { Ok(file) => file, @@ -20,6 +20,15 @@ pub fn open_file(file_path: &str) -> File { } } +#[allow(dead_code)] // Usage by integration tests is ignored by the compiler for some reason +pub fn file_exists(file_path: &Path) -> bool { + if !file_path.exists() { + // In debug mode the working directory is often the top-level workspace folder + return Path::new("consensus").join(file_path).exists(); + } + true +} + #[allow(dead_code)] // Usage by integration tests is ignored by the compiler pub fn read_dir(dir_path: &str) -> ReadDir { let dir_res = fs::read_dir(dir_path); diff --git a/consensus/tests/integration_tests.rs b/consensus/tests/integration_tests.rs index ac40d73c0..b5b06ce36 100644 --- a/consensus/tests/integration_tests.rs +++ b/consensus/tests/integration_tests.rs @@ -2,8 +2,9 @@ //! Integration tests //! +use consensus::config::{Config, ConfigBuilder}; use consensus::consensus::test_consensus::{create_temp_db, TestConsensus}; -use consensus::model::stores::ghostdag::{GhostdagStoreReader, KType as GhostdagKType}; +use consensus::model::stores::ghostdag::{GhostdagData, GhostdagStoreReader, HashKTypeMap, KType as GhostdagKType}; use consensus::model::stores::headers::HeaderStoreReader; use consensus::model::stores::reachability::DbReachabilityStore; use consensus::params::{Params, DEVNET_PARAMS, MAINNET_PARAMS}; @@ -15,21 +16,25 @@ use consensus_core::constants::BLOCK_VERSION; use consensus_core::errors::block::{BlockProcessResult, RuleError}; use consensus_core::header::Header; use consensus_core::subnets::SubnetworkId; -use consensus_core::tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput}; -use consensus_core::{blockhash, hashing, BlueWorkType}; +use consensus_core::tx::{ScriptPublicKey, Transaction, TransactionInput, TransactionOutpoint, TransactionOutput, UtxoEntry}; +use consensus_core::{blockhash, hashing, BlockHashMap, BlueWorkType}; use hashes::Hash; use flate2::read::GzDecoder; use futures_util::future::join_all; use itertools::Itertools; +use kaspa_core::info; use math::Uint256; +use muhash::MuHash; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; +use std::path::Path; +use std::sync::Arc; use std::{ collections::HashMap, fs::File, future::Future, - io::{self, BufRead, BufReader}, + io::{BufRead, BufReader}, str::{from_utf8, FromStr}, time::{Duration, SystemTime, UNIX_EPOCH}, }; @@ -62,7 +67,7 @@ fn reachability_stretch_test(use_attack_json: bool) { if use_attack_json { "" } else { "no" }, NUM_BLOCKS_EXPONENT ); - let file = common::open_file(&path_str); + let file = common::open_file(Path::new(&path_str)); let decoder = GzDecoder::new(file); let json_blocks: Vec = serde_json::from_reader(decoder).unwrap(); @@ -149,8 +154,8 @@ fn test_noattack_json() { #[tokio::test] async fn consensus_sanity_test() { let genesis_child: Hash = 2.into(); - - let consensus = TestConsensus::create_from_temp_db(&MAINNET_PARAMS.clone_with_skip_pow()); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); consensus @@ -206,11 +211,14 @@ async fn ghostdag_test() { let reader = BufReader::new(file); let test: GhostdagTestDag = serde_json::from_reader(reader).unwrap(); - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.genesis_hash = string_to_hash(&test.genesis_id); - params.ghostdag_k = test.k; - - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| { + p.genesis_hash = string_to_hash(&test.genesis_id); + p.ghostdag_k = test.k; + }) + .build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); for block in test.blocks.iter() { @@ -276,13 +284,15 @@ fn strings_to_hashes(strings: &Vec) -> Vec { #[tokio::test] async fn block_window_test() { + let config = ConfigBuilder::new(MAINNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| { + p.genesis_hash = string_to_hash("A"); + p.ghostdag_k = 1; + }) + .build(); let (_temp_db_lifetime, db) = create_temp_db(); - - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.genesis_hash = string_to_hash("A"); - params.ghostdag_k = 1; - - let consensus = TestConsensus::new(db, ¶ms); + let consensus = TestConsensus::new(db, &config); let wait_handles = consensus.init(); struct TestBlock { @@ -339,10 +349,10 @@ async fn block_window_test() { #[tokio::test] async fn header_in_isolation_validation_test() { - let params = &MAINNET_PARAMS; - let consensus = TestConsensus::create_from_temp_db(params); + let config = Config::new(MAINNET_PARAMS); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let block = consensus.build_block_with_parents(1.into(), vec![params.genesis_hash]); + let block = consensus.build_block_with_parents(1.into(), vec![config.genesis_hash]); { let mut block = block.clone(); @@ -363,7 +373,7 @@ async fn header_in_isolation_validation_test() { block.header.hash = 2.into(); let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_millis() as u64; - let block_ts = now + params.timestamp_deviation_tolerance * params.target_time_per_block + 2000; + let block_ts = now + config.timestamp_deviation_tolerance * config.target_time_per_block + 2000; block.header.timestamp = block_ts; match consensus.validate_and_insert_block(block.to_immutable()).await { Err(RuleError::TimeTooFarIntoTheFuture(ts, _)) => { @@ -390,11 +400,11 @@ async fn header_in_isolation_validation_test() { { let mut block = block.clone(); block.header.hash = 4.into(); - block.header.parents_by_level[0] = (5..(params.max_block_parents + 6)).map(|x| (x as u64).into()).collect(); + block.header.parents_by_level[0] = (5..(config.max_block_parents + 6)).map(|x| (x as u64).into()).collect(); match consensus.validate_and_insert_block(block.to_immutable()).await { Err(RuleError::TooManyParents(num_parents, limit)) => { - assert_eq!((params.max_block_parents + 1) as usize, num_parents); - assert_eq!(limit, params.max_block_parents as usize); + assert_eq!((config.max_block_parents + 1) as usize, num_parents); + assert_eq!(limit, config.max_block_parents as usize); } res => { panic!("Unexpected result: {res:?}") @@ -407,17 +417,17 @@ async fn header_in_isolation_validation_test() { #[tokio::test] async fn incest_test() { - let params = MAINNET_PARAMS.clone_with_skip_pow(); - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let block = consensus.build_block_with_parents(1.into(), vec![params.genesis_hash]); + let block = consensus.build_block_with_parents(1.into(), vec![config.genesis_hash]); consensus.validate_and_insert_block(block.to_immutable()).await.unwrap(); - let mut block = consensus.build_block_with_parents(2.into(), vec![params.genesis_hash]); - block.header.parents_by_level[0] = vec![1.into(), params.genesis_hash]; + let mut block = consensus.build_block_with_parents(2.into(), vec![config.genesis_hash]); + block.header.parents_by_level[0] = vec![1.into(), config.genesis_hash]; match consensus.validate_and_insert_block(block.to_immutable()).await { Err(RuleError::InvalidParentsRelation(a, b)) => { - assert_eq!(a, params.genesis_hash); + assert_eq!(a, config.genesis_hash); assert_eq!(b, 1.into()); } res => { @@ -430,10 +440,10 @@ async fn incest_test() { #[tokio::test] async fn missing_parents_test() { - let params = &MAINNET_PARAMS; - let consensus = TestConsensus::create_from_temp_db(params); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let mut block = consensus.build_block_with_parents(1.into(), vec![params.genesis_hash]); + let mut block = consensus.build_block_with_parents(1.into(), vec![config.genesis_hash]); block.header.parents_by_level[0] = vec![0.into()]; match consensus.validate_and_insert_block(block.to_immutable()).await { Err(RuleError::MissingParents(missing)) => { @@ -451,10 +461,10 @@ async fn missing_parents_test() { // as a known invalid. #[tokio::test] async fn known_invalid_test() { - let params = MAINNET_PARAMS.clone_with_skip_pow(); - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let mut block = consensus.build_block_with_parents(1.into(), vec![params.genesis_hash]); + let mut block = consensus.build_block_with_parents(1.into(), vec![config.genesis_hash]); block.header.timestamp -= 1; match consensus.validate_and_insert_block(block.clone().to_immutable()).await { @@ -476,21 +486,21 @@ async fn known_invalid_test() { #[tokio::test] async fn median_time_test() { - let params = MAINNET_PARAMS.clone_with_skip_pow(); - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let num_blocks = 2 * params.timestamp_deviation_tolerance - 1; + let num_blocks = 2 * config.timestamp_deviation_tolerance - 1; for i in 1..(num_blocks + 1) { - let parent = if i == 1 { params.genesis_hash } else { (i - 1).into() }; + let parent = if i == 1 { config.genesis_hash } else { (i - 1).into() }; let mut block = consensus.build_block_with_parents(i.into(), vec![parent]); - block.header.timestamp = params.genesis_timestamp + i; + block.header.timestamp = config.genesis_timestamp + i; consensus.validate_and_insert_block(block.to_immutable()).await.unwrap(); } let mut block = consensus.build_block_with_parents((num_blocks + 2).into(), vec![num_blocks.into()]); // We set the timestamp to be less than the median time and expect the block to be rejected - block.header.timestamp = params.genesis_timestamp + num_blocks - params.timestamp_deviation_tolerance - 1; + block.header.timestamp = config.genesis_timestamp + num_blocks - config.timestamp_deviation_tolerance - 1; match consensus.validate_and_insert_block(block.to_immutable()).await { Err(RuleError::TimeTooOld(_, _)) => {} res => { @@ -500,7 +510,7 @@ async fn median_time_test() { let mut block = consensus.build_block_with_parents((num_blocks + 3).into(), vec![num_blocks.into()]); // We set the timestamp to be the exact median time and expect the block to be rejected - block.header.timestamp = params.genesis_timestamp + num_blocks - params.timestamp_deviation_tolerance; + block.header.timestamp = config.genesis_timestamp + num_blocks - config.timestamp_deviation_tolerance; match consensus.validate_and_insert_block(block.to_immutable()).await { Err(RuleError::TimeTooOld(_, _)) => {} res => { @@ -510,7 +520,7 @@ async fn median_time_test() { let mut block = consensus.build_block_with_parents((num_blocks + 4).into(), vec![(num_blocks).into()]); // We set the timestamp to be bigger than the median time and expect the block to be inserted successfully. - block.header.timestamp = params.genesis_timestamp + params.timestamp_deviation_tolerance + 1; + block.header.timestamp = config.genesis_timestamp + config.timestamp_deviation_tolerance + 1; consensus.validate_and_insert_block(block.to_immutable()).await.unwrap(); consensus.shutdown(wait_handles); @@ -518,20 +528,20 @@ async fn median_time_test() { #[tokio::test] async fn mergeset_size_limit_test() { - let params = MAINNET_PARAMS.clone_with_skip_pow(); - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let num_blocks_per_chain = params.mergeset_size_limit + 1; + let num_blocks_per_chain = config.mergeset_size_limit + 1; - let mut tip1_hash = params.genesis_hash; + let mut tip1_hash = config.genesis_hash; for i in 1..(num_blocks_per_chain + 1) { let block = consensus.build_block_with_parents(i.into(), vec![tip1_hash]); tip1_hash = block.header.hash; consensus.validate_and_insert_block(block.to_immutable()).await.unwrap(); } - let mut tip2_hash = params.genesis_hash; + let mut tip2_hash = config.genesis_hash; for i in (num_blocks_per_chain + 2)..(2 * num_blocks_per_chain + 1) { let block = consensus.build_block_with_parents(i.into(), vec![tip2_hash]); tip2_hash = block.header.hash; @@ -541,8 +551,8 @@ async fn mergeset_size_limit_test() { let block = consensus.build_block_with_parents((3 * num_blocks_per_chain + 1).into(), vec![tip1_hash, tip2_hash]); match consensus.validate_and_insert_block(block.to_immutable()).await { Err(RuleError::MergeSetTooBig(a, b)) => { - assert_eq!(a, params.mergeset_size_limit + 1); - assert_eq!(b, params.mergeset_size_limit); + assert_eq!(a, config.mergeset_size_limit + 1); + assert_eq!(b, config.mergeset_size_limit); } res => { panic!("Unexpected result: {res:?}") @@ -631,6 +641,47 @@ struct RPCBlockVerboseData { Hash: String, } +#[allow(non_snake_case)] +#[derive(Deserialize, Debug)] +struct JsonBlockWithTrustedData { + Block: RPCBlock, + GHOSTDAG: JsonGHOSTDAGData, +} + +#[allow(non_snake_case)] +#[derive(Deserialize, Debug)] +struct JsonGHOSTDAGData { + BlueScore: u64, + BlueWork: String, + SelectedParent: String, + MergeSetBlues: Vec, + MergeSetReds: Vec, + BluesAnticoneSizes: Vec, +} + +#[allow(non_snake_case)] +#[derive(Deserialize, Debug)] +struct JsonBluesAnticoneSizes { + BlueHash: String, + AnticoneSize: GhostdagKType, +} + +#[allow(non_snake_case)] +#[derive(Deserialize, Debug)] +struct JsonOutpointUTXOEntryPair { + Outpoint: RPCOutpoint, + UTXOEntry: RPCUTXOEntry, +} + +#[allow(non_snake_case)] +#[derive(Deserialize, Debug)] +struct RPCUTXOEntry { + Amount: u64, + ScriptPublicKey: RPCScriptPublicKey, + BlockDAAScore: u64, + IsCoinbase: bool, +} + #[allow(non_snake_case)] #[derive(Deserialize, Debug)] struct KaspadGoParams { @@ -652,20 +703,21 @@ struct KaspadGoParams { PreDeflationaryPhaseBaseSubsidy: u64, SkipProofOfWork: bool, MaxBlockLevel: u8, + PruningProofM: u64, } impl KaspadGoParams { - fn into_params(self, genesis_header: &Header) -> Params { + fn into_params(self) -> Params { let finality_depth = self.FinalityDuration / self.TargetTimePerBlock; Params { - genesis_hash: genesis_header.hash, + genesis_hash: MAINNET_PARAMS.genesis_hash, ghostdag_k: self.K, timestamp_deviation_tolerance: self.TimestampDeviationTolerance, target_time_per_block: self.TargetTimePerBlock / 1_000_000, max_block_parents: self.MaxBlockParents, difficulty_window_size: self.DifficultyAdjustmentWindowSize, - genesis_timestamp: genesis_header.timestamp, - genesis_bits: genesis_header.bits, + genesis_timestamp: MAINNET_PARAMS.genesis_timestamp, + genesis_bits: MAINNET_PARAMS.genesis_bits, mergeset_size_limit: self.MergeSetSizeLimit, merge_depth: self.MergeDepth, finality_depth, @@ -685,59 +737,88 @@ impl KaspadGoParams { coinbase_maturity: MAINNET_PARAMS.coinbase_maturity, skip_proof_of_work: self.SkipProofOfWork, max_block_level: self.MaxBlockLevel, + pruning_proof_m: self.PruningProofM, } } } #[tokio::test] async fn goref_custom_pruning_depth() { - json_test("tests/testdata/goref_custom_pruning_depth.json.gz").await + json_test("tests/testdata/dags_for_json_tests/goref_custom_pruning_depth").await } #[tokio::test] async fn goref_notx_test() { - json_test("tests/testdata/goref-notx-5000-blocks.json.gz").await + json_test("tests/testdata/dags_for_json_tests/goref-notx-5000-blocks").await } #[tokio::test] async fn goref_notx_concurrent_test() { - json_concurrency_test("tests/testdata/goref-notx-5000-blocks.json.gz").await + json_concurrency_test("tests/testdata/dags_for_json_tests/goref-notx-5000-blocks").await } #[tokio::test] async fn goref_tx_small_test() { - json_test("tests/testdata/goref-905-tx-265-blocks.json.gz").await + json_test("tests/testdata/dags_for_json_tests/goref-905-tx-265-blocks").await } #[tokio::test] async fn goref_tx_small_concurrent_test() { - json_concurrency_test("tests/testdata/goref-905-tx-265-blocks.json.gz").await + json_concurrency_test("tests/testdata/dags_for_json_tests/goref-905-tx-265-blocks").await } #[ignore] #[tokio::test] async fn goref_tx_big_test() { - // TODO: add this file to a data repo and fetch dynamically - json_test("tests/testdata/goref-1.6M-tx-10K-blocks.json.gz").await + // TODO: add this directory to a data repo and fetch dynamically + json_test("tests/testdata/dags_for_json_tests/goref-1.6M-tx-10K-blocks").await } #[ignore] #[tokio::test] async fn goref_tx_big_concurrent_test() { // TODO: add this file to a data repo and fetch dynamically - json_concurrency_test("tests/testdata/goref-1.6M-tx-10K-blocks.json.gz").await + json_concurrency_test("tests/testdata/dags_for_json_tests/goref-1.6M-tx-10K-blocks").await } -async fn json_test(file_path: &str) { - let file = common::open_file(file_path); +#[tokio::test] +#[ignore = "long"] +async fn goref_mainnet_test() { + // TODO: add this directory to a data repo and fetch dynamically + json_test("tests/testdata/dags_for_json_tests/goref-mainnet").await +} + +#[tokio::test] +#[ignore = "long"] +async fn goref_mainnet_concurrent_test() { + // TODO: add this directory to a data repo and fetch dynamically + json_concurrency_test("tests/testdata/dags_for_json_tests/goref-mainnet").await +} + +fn gzip_file_lines(path: &Path) -> impl Iterator { + let file = common::open_file(path); let decoder = GzDecoder::new(file); - let mut lines = BufReader::new(decoder).lines(); - let first_line = lines.next().unwrap().unwrap(); + BufReader::new(decoder).lines().map(|line| line.unwrap()) +} + +async fn json_test(file_path: &str) { + kaspa_core::log::try_init_logger("INFO"); + let main_path = Path::new(file_path); + let proof_exists = common::file_exists(&main_path.join("proof.json.gz")); + + let mut lines = gzip_file_lines(&main_path.join("blocks.json.gz")); + let first_line = lines.next().unwrap(); let go_params_res: Result = serde_json::from_str(&first_line); let params = if let Ok(go_params) = go_params_res { - let second_line = lines.next().unwrap().unwrap(); - let genesis = json_line_to_block(second_line); - go_params.into_params(&genesis.header) + let mut params = go_params.into_params(); + if !proof_exists { + let second_line = lines.next().unwrap(); + let genesis = json_line_to_block(second_line); + params.genesis_bits = genesis.header.bits; + params.genesis_hash = genesis.header.hash; + params.genesis_timestamp = genesis.header.timestamp; + } + params } else { let genesis = json_line_to_block(first_line); let mut params = DEVNET_PARAMS; @@ -747,27 +828,83 @@ async fn json_test(file_path: &str) { params }; - let consensus = TestConsensus::create_from_temp_db(¶ms); + let mut config = Config::new(params); + if proof_exists { + config.process_genesis = false; + } + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); + let pruning_point = if proof_exists { + let proof_lines = gzip_file_lines(&main_path.join("proof.json.gz")); + let proof = proof_lines + .map(|line| { + let rpc_headers: Vec = serde_json::from_str(&line).unwrap(); + rpc_headers.iter().map(|rh| Arc::new(rpc_header_to_header(rh))).collect_vec() + }) + .collect_vec(); + + // TODO: Add consensus validation that the pruning point is one of the trusted blocks. + let trusted_blocks = gzip_file_lines(&main_path.join("trusted.json.gz")).map(json_trusted_line_to_block_and_gd).collect_vec(); + consensus.consensus.apply_proof(proof, &trusted_blocks); + + let past_pruning_points = + gzip_file_lines(&main_path.join("past-pps.json.gz")).map(|line| json_line_to_block(line).header).collect_vec(); + let pruning_point = past_pruning_points.last().unwrap().hash; + + consensus.consensus.import_pruning_points(past_pruning_points); + + let mut last_time = SystemTime::now(); + let mut last_index: usize = 0; + for (i, (block, gd)) in trusted_blocks.into_iter().enumerate() { + let now = SystemTime::now(); + let passed = now.duration_since(last_time).unwrap(); + if passed > Duration::new(1, 0) { + println!("Processed {} trusted blocks in the last {} seconds (total {})", i - last_index, passed.as_secs(), i); + last_time = now; + last_index = i; + } + consensus.consensus.validate_and_insert_trusted_block(block, Arc::new(gd)).await.unwrap(); + } + println!("Done processing trusted blocks"); + Some(pruning_point) + } else { + None + }; + let mut last_time = SystemTime::now(); let mut last_index: usize = 0; for (i, line) in lines.enumerate() { let now = SystemTime::now(); let passed = now.duration_since(last_time).unwrap(); if passed > Duration::new(10, 0) { - println!("Processed {} blocks in the last {} seconds", i - last_index, passed.as_secs()); + println!("Processed {} blocks in the last {} seconds (total {})", i - last_index, passed.as_secs(), i); last_time = now; last_index = i; } - let block = json_line_to_block(line.unwrap()); + let block = json_line_to_block(line); let hash = block.header.hash; // Test our hashing implementation vs the hash accepted from the json source assert_eq!(hashing::header::hash(&block.header), hash, "header hashing for block {i} {hash} failed"); - let status = consensus.validate_and_insert_block(block).await.unwrap_or_else(|e| panic!("block {i} {hash} failed: {e}")); + let status = consensus + .consensus + .validate_and_insert_block(block, !proof_exists) + .await + .unwrap_or_else(|e| panic!("block {i} {hash} failed: {e}")); assert!(status.is_utxo_valid_or_pending()); } + if proof_exists { + let mut multiset = MuHash::new(); + for outpoint_utxo_pairs in gzip_file_lines(&main_path.join("pp-utxo.json.gz")).map(json_line_to_utxo_pairs) { + consensus.consensus.append_imported_pruning_point_utxos(&outpoint_utxo_pairs, &mut multiset); + } + + consensus.consensus.import_pruning_point_utxo_set(pruning_point.unwrap(), &mut multiset).unwrap(); + consensus.consensus.resolve_virtual(); + // TODO: Add consensus validation that the pruning point is actually the right block according to the rules (in pruning depth etc). + } + // Assert that at least one body tip was resolved with valid UTXO assert!(consensus.body_tips().iter().copied().any(|h| consensus.block_status(h) == BlockStatus::StatusUTXOValid)); @@ -775,34 +912,101 @@ async fn json_test(file_path: &str) { } async fn json_concurrency_test(file_path: &str) { - let file = common::open_file(file_path); - let decoder = GzDecoder::new(file); - let mut lines = io::BufReader::new(decoder).lines(); + kaspa_core::log::try_init_logger("INFO"); + let main_path = Path::new(file_path); + let proof_exists = main_path.join("proof.json.gz").exists(); + + let mut lines = gzip_file_lines(&main_path.join("blocks.json.gz")); let first_line = lines.next().unwrap(); - let genesis = json_line_to_block(first_line.unwrap()); - let mut params = DEVNET_PARAMS; - params.genesis_bits = genesis.header.bits; - params.genesis_hash = genesis.header.hash; - params.genesis_timestamp = genesis.header.timestamp; + let go_params_res: Result = serde_json::from_str(&first_line); + let params = if let Ok(go_params) = go_params_res { + let mut params = go_params.into_params(); + if !proof_exists { + let second_line = lines.next().unwrap(); + let genesis = json_line_to_block(second_line); + params.genesis_bits = genesis.header.bits; + params.genesis_hash = genesis.header.hash; + params.genesis_timestamp = genesis.header.timestamp; + } + params + } else { + let genesis = json_line_to_block(first_line); + let mut params = DEVNET_PARAMS; + params.genesis_bits = genesis.header.bits; + params.genesis_hash = genesis.header.hash; + params.genesis_timestamp = genesis.header.timestamp; + params + }; - let consensus = TestConsensus::create_from_temp_db(¶ms); + let mut config = Config::new(params); + if proof_exists { + config.process_genesis = false; + } + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); + let pruning_point = if proof_exists { + let proof_lines = gzip_file_lines(&main_path.join("proof.json.gz")); + let proof = proof_lines + .map(|line| { + let rpc_headers: Vec = serde_json::from_str(&line).unwrap(); + rpc_headers.iter().map(|rh| Arc::new(rpc_header_to_header(rh))).collect_vec() + }) + .collect_vec(); + + let trusted_blocks = gzip_file_lines(&main_path.join("trusted.json.gz")).map(json_trusted_line_to_block_and_gd).collect_vec(); + consensus.consensus.apply_proof(proof, &trusted_blocks); + + let past_pruning_points = + gzip_file_lines(&main_path.join("past-pps.json.gz")).map(|line| json_line_to_block(line).header).collect_vec(); + let pruning_point = past_pruning_points.last().unwrap().hash; + + consensus.consensus.import_pruning_points(past_pruning_points); + + let mut last_time = SystemTime::now(); + let mut last_index: usize = 0; + for (i, (block, gd)) in trusted_blocks.into_iter().enumerate() { + let now = SystemTime::now(); + let passed = now.duration_since(last_time).unwrap(); + if passed > Duration::new(1, 0) { + println!("Processed {} trusted blocks in the last {} seconds (total {})", i - last_index, passed.as_secs(), i); + last_time = now; + last_index = i; + } + consensus.consensus.validate_and_insert_trusted_block(block, Arc::new(gd)).await.unwrap(); + } + println!("Done processing trusted blocks"); + Some(pruning_point) + } else { + None + }; + let chunks = lines.into_iter().chunks(1000); let mut iter = chunks.into_iter(); let mut chunk = iter.next().unwrap(); - let mut prev_joins = submit_chunk(&consensus, &mut chunk); + let mut prev_joins = submit_chunk(&consensus, &mut chunk, proof_exists); - for mut chunk in iter { - let current_joins = submit_chunk(&consensus, &mut chunk); + for (i, mut chunk) in iter.enumerate() { + let current_joins = submit_chunk(&consensus, &mut chunk, proof_exists); let statuses = join_all(prev_joins).await.into_iter().collect::, RuleError>>().unwrap(); assert!(statuses.iter().all(|s| s.is_utxo_valid_or_pending())); prev_joins = current_joins; + info!("Processed 1000 blocks ({} overall)", (i + 1) * 1000); } let statuses = join_all(prev_joins).await.into_iter().collect::, RuleError>>().unwrap(); assert!(statuses.iter().all(|s| s.is_utxo_valid_or_pending())); + if proof_exists { + let mut multiset = MuHash::new(); + for outpoint_utxo_pairs in gzip_file_lines(&main_path.join("pp-utxo.json.gz")).map(json_line_to_utxo_pairs) { + consensus.consensus.append_imported_pruning_point_utxos(&outpoint_utxo_pairs, &mut multiset); + } + + consensus.consensus.import_pruning_point_utxo_set(pruning_point.unwrap(), &mut multiset).unwrap(); + consensus.consensus.resolve_virtual(); + } + // Assert that at least one body tip was resolved with valid UTXO assert!(consensus.body_tips().iter().copied().any(|h| consensus.block_status(h) == BlockStatus::StatusUTXOValid)); @@ -811,39 +1015,98 @@ async fn json_concurrency_test(file_path: &str) { fn submit_chunk( consensus: &TestConsensus, - chunk: &mut impl Iterator>, + chunk: &mut impl Iterator, + proof_exists: bool, ) -> Vec>> { let mut futures = Vec::new(); for line in chunk { - let f = consensus.validate_and_insert_block(json_line_to_block(line.unwrap())); + let f = consensus.consensus.validate_and_insert_block(json_line_to_block(line), !proof_exists); futures.push(f); } futures } +fn rpc_header_to_header(rpc_header: &RPCBlockHeader) -> Header { + Header::new( + rpc_header.Version, + rpc_header + .Parents + .iter() + .map(|item| item.ParentHashes.iter().map(|parent| Hash::from_str(parent).unwrap()).collect()) + .collect(), + Hash::from_str(&rpc_header.HashMerkleRoot).unwrap(), + Hash::from_str(&rpc_header.AcceptedIDMerkleRoot).unwrap(), + Hash::from_str(&rpc_header.UTXOCommitment).unwrap(), + rpc_header.Timestamp, + rpc_header.Bits, + rpc_header.Nonce, + rpc_header.DAAScore, + BlueWorkType::from_hex(&rpc_header.BlueWork).unwrap(), + rpc_header.BlueScore, + Hash::from_str(&rpc_header.PruningPoint).unwrap(), + ) +} + +fn json_trusted_line_to_block_and_gd(line: String) -> (Block, GhostdagData) { + let json_block_with_trusted: JsonBlockWithTrustedData = serde_json::from_str(&line).unwrap(); + let block = rpc_block_to_block(json_block_with_trusted.Block); + + let gd = GhostdagData { + blue_score: json_block_with_trusted.GHOSTDAG.BlueScore, + blue_work: BlueWorkType::from_hex(&json_block_with_trusted.GHOSTDAG.BlueWork).unwrap(), + selected_parent: Hash::from_str(&json_block_with_trusted.GHOSTDAG.SelectedParent).unwrap(), + mergeset_blues: Arc::new( + json_block_with_trusted.GHOSTDAG.MergeSetBlues.into_iter().map(|hex| Hash::from_str(&hex).unwrap()).collect_vec(), + ), + mergeset_reds: Arc::new( + json_block_with_trusted.GHOSTDAG.MergeSetReds.into_iter().map(|hex| Hash::from_str(&hex).unwrap()).collect_vec(), + ), + blues_anticone_sizes: HashKTypeMap::new(BlockHashMap::from_iter( + json_block_with_trusted + .GHOSTDAG + .BluesAnticoneSizes + .into_iter() + .map(|e| (Hash::from_str(&e.BlueHash).unwrap(), e.AnticoneSize)), + )), + }; + + (block, gd) +} + +fn json_line_to_utxo_pairs(line: String) -> Vec<(TransactionOutpoint, UtxoEntry)> { + let json_pairs: Vec = serde_json::from_str(&line).unwrap(); + json_pairs + .iter() + .map(|json_pair| { + ( + TransactionOutpoint { + transaction_id: Hash::from_str(&json_pair.Outpoint.TransactionID).unwrap(), + index: json_pair.Outpoint.Index, + }, + UtxoEntry { + amount: json_pair.UTXOEntry.Amount, + script_public_key: ScriptPublicKey::from_vec( + json_pair.UTXOEntry.ScriptPublicKey.Version, + hex_decode(&json_pair.UTXOEntry.ScriptPublicKey.Script), + ), + block_daa_score: json_pair.UTXOEntry.BlockDAAScore, + is_coinbase: json_pair.UTXOEntry.IsCoinbase, + }, + ) + }) + .collect_vec() +} + fn json_line_to_block(line: String) -> Block { let rpc_block: RPCBlock = serde_json::from_str(&line).unwrap(); + rpc_block_to_block(rpc_block) +} + +fn rpc_block_to_block(rpc_block: RPCBlock) -> Block { + let header = rpc_header_to_header(&rpc_block.Header); + assert_eq!(header.hash, Hash::from_str(&rpc_block.VerboseData.Hash).unwrap()); Block::new( - Header { - hash: Hash::from_str(&rpc_block.VerboseData.Hash).unwrap(), - version: rpc_block.Header.Version, - parents_by_level: rpc_block - .Header - .Parents - .iter() - .map(|item| item.ParentHashes.iter().map(|parent| Hash::from_str(parent).unwrap()).collect()) - .collect(), - hash_merkle_root: Hash::from_str(&rpc_block.Header.HashMerkleRoot).unwrap(), - accepted_id_merkle_root: Hash::from_str(&rpc_block.Header.AcceptedIDMerkleRoot).unwrap(), - utxo_commitment: Hash::from_str(&rpc_block.Header.UTXOCommitment).unwrap(), - timestamp: rpc_block.Header.Timestamp, - bits: rpc_block.Header.Bits, - nonce: rpc_block.Header.Nonce, - daa_score: rpc_block.Header.DAAScore, - blue_work: BlueWorkType::from_hex(&rpc_block.Header.BlueWork).unwrap(), - blue_score: rpc_block.Header.BlueScore, - pruning_point: Hash::from_str(&rpc_block.Header.PruningPoint).unwrap(), - }, + header, rpc_block .Transactions .iter() @@ -893,26 +1156,30 @@ fn hex_decode(src: &str) -> Vec { #[tokio::test] async fn bounded_merge_depth_test() { - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.ghostdag_k = 5; - params.merge_depth = 7; + let config = ConfigBuilder::new(MAINNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| { + p.ghostdag_k = 5; + p.merge_depth = 7; + }) + .build(); - assert!((params.ghostdag_k as u64) < params.merge_depth, "K must be smaller than merge depth for this test to run"); + assert!((config.ghostdag_k as u64) < config.merge_depth, "K must be smaller than merge depth for this test to run"); - let consensus = TestConsensus::create_from_temp_db(¶ms); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); - let mut selected_chain = vec![params.genesis_hash]; - for i in 1..(params.merge_depth + 3) { + let mut selected_chain = vec![config.genesis_hash]; + for i in 1..(config.merge_depth + 3) { let hash: Hash = (i + 1).into(); consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); } // The length of block_chain_2 is shorter by one than selected_chain, so selected_chain will remain the selected chain. - let mut block_chain_2 = vec![params.genesis_hash]; - for i in 1..(params.merge_depth + 2) { - let hash: Hash = (i + params.merge_depth + 3).into(); + let mut block_chain_2 = vec![config.genesis_hash]; + for i in 1..(config.merge_depth + 2) { + let hash: Hash = (i + config.merge_depth + 3).into(); consensus.add_block_with_parents(hash, vec![*block_chain_2.last().unwrap()]).await.unwrap(); block_chain_2.push(hash); } @@ -948,7 +1215,7 @@ async fn bounded_merge_depth_test() { .unwrap(); // We extend the selected chain until kosherizing_hash will be red from the virtual POV. - for i in 0..params.ghostdag_k { + for i in 0..config.ghostdag_k { let hash = Hash::from_u64_word(i as u64 * 1000); consensus.add_block_with_parents(hash, vec![*selected_chain.last().unwrap()]).await.unwrap(); selected_chain.push(hash); @@ -989,15 +1256,18 @@ async fn difficulty_test() { Uint256::from_compact_target_bits(a).cmp(&Uint256::from_compact_target_bits(b)) } - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.ghostdag_k = 1; - params.difficulty_window_size = 140; - - let consensus = TestConsensus::create_from_temp_db(¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS) + .skip_proof_of_work() + .edit_consensus_params(|p| { + p.ghostdag_k = 1; + p.difficulty_window_size = 140; + }) + .build(); + let consensus = TestConsensus::create_from_temp_db(&config); let wait_handles = consensus.init(); let fake_genesis = Header { - hash: params.genesis_hash, + hash: config.genesis_hash, version: 0, parents_by_level: vec![], hash_merkle_root: 0.into(), @@ -1013,19 +1283,19 @@ async fn difficulty_test() { }; let mut tip = fake_genesis; - for _ in 0..params.difficulty_window_size { + for _ in 0..config.difficulty_window_size { tip = add_block(&consensus, None, vec![tip.hash]).await; - assert_eq!(tip.bits, params.genesis_bits, "until first DAA window is created difficulty should remains unchanged"); + assert_eq!(tip.bits, config.genesis_bits, "until first DAA window is created difficulty should remains unchanged"); } - for _ in 0..params.difficulty_window_size + 10 { + for _ in 0..config.difficulty_window_size + 10 { tip = add_block(&consensus, None, vec![tip.hash]).await; - assert_eq!(tip.bits, params.genesis_bits, "block rate wasn't changed so difficulty is not expected to change"); + assert_eq!(tip.bits, config.genesis_bits, "block rate wasn't changed so difficulty is not expected to change"); } let block_in_the_past = add_block_with_min_time(&consensus, vec![tip.hash]).await; assert_eq!( - block_in_the_past.bits, params.genesis_bits, + block_in_the_past.bits, config.genesis_bits, "block_in_the_past shouldn't affect its own difficulty, but only its future" ); tip = block_in_the_past; @@ -1033,7 +1303,7 @@ async fn difficulty_test() { assert_eq!(tip.bits, 0x1d02c50f); // TODO: Check that it makes sense // Increase block rate to increase difficulty - for _ in 0..params.difficulty_window_size { + for _ in 0..config.difficulty_window_size { let prev_bits = tip.bits; tip = add_block_with_min_time(&consensus, vec![tip.hash]).await; assert!( @@ -1044,7 +1314,7 @@ async fn difficulty_test() { // Add blocks until difficulty stabilizes let mut same_bits_count = 0; - while same_bits_count < params.difficulty_window_size + 1 { + while same_bits_count < config.difficulty_window_size + 1 { let prev_bits = tip.bits; tip = add_block(&consensus, None, vec![tip.hash]).await; if tip.bits == prev_bits { @@ -1054,7 +1324,7 @@ async fn difficulty_test() { } } - let slow_block_time = tip.timestamp + params.target_time_per_block + 1000; + let slow_block_time = tip.timestamp + config.target_time_per_block + 1000; let slow_block = add_block(&consensus, Some(slow_block_time), vec![tip.hash]).await; let slow_block_bits = slow_block.bits; assert_eq!(slow_block.bits, tip.bits, "The difficulty should change only when slow_block is in the past"); @@ -1074,7 +1344,7 @@ async fn difficulty_test() { // blocks in its past and one without. let split_hash = tip.hash; let mut blue_tip_hash = split_hash; - for _ in 0..params.difficulty_window_size { + for _ in 0..config.difficulty_window_size { blue_tip_hash = add_block(&consensus, None, vec![blue_tip_hash]).await.hash; } @@ -1097,7 +1367,7 @@ async fn difficulty_test() { // out the red blocks from the window, and check that the red blocks don't // affect the difficulty. blue_tip_hash = split_hash; - for _ in 0..params.difficulty_window_size + RED_CHAIN_LEN + 1 { + for _ in 0..config.difficulty_window_size + RED_CHAIN_LEN + 1 { blue_tip_hash = add_block(&consensus, None, vec![blue_tip_hash]).await.hash; } diff --git a/consensus/tests/pipeline_tests.rs b/consensus/tests/pipeline_tests.rs index d2d9cda6e..f065d49af 100644 --- a/consensus/tests/pipeline_tests.rs +++ b/consensus/tests/pipeline_tests.rs @@ -1,4 +1,5 @@ use consensus::{ + config::ConfigBuilder, consensus::test_consensus::{create_temp_db, TestConsensus}, model::stores::reachability::{DbReachabilityStore, StagingReachabilityStore}, params::MAINNET_PARAMS, @@ -80,11 +81,8 @@ fn test_reachability_staging() { #[tokio::test] async fn test_concurrent_pipeline() { let (_temp_db_lifetime, db) = create_temp_db(); - - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.genesis_hash = 1.into(); - - let consensus = TestConsensus::new(db, ¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().edit_consensus_params(|p| p.genesis_hash = 1.into()).build(); + let consensus = TestConsensus::new(db, &config); let wait_handles = consensus.init(); let blocks = vec![ @@ -153,17 +151,14 @@ async fn test_concurrent_pipeline_random() { let mut thread_rng = rand::thread_rng(); let (_temp_db_lifetime, db) = create_temp_db(); - - let mut params = MAINNET_PARAMS.clone_with_skip_pow(); - params.genesis_hash = genesis; - - let consensus = TestConsensus::new(db, ¶ms); + let config = ConfigBuilder::new(MAINNET_PARAMS).skip_proof_of_work().edit_consensus_params(|p| p.genesis_hash = genesis).build(); + let consensus = TestConsensus::new(db, &config); let wait_handles = consensus.init(); let mut tips = vec![genesis]; let mut total = 1000i64; while total > 0 { - let v = min(params.max_block_parents as i64, poi.sample(&mut thread_rng) as i64); + let v = min(config.max_block_parents as i64, poi.sample(&mut thread_rng) as i64); if v == 0 { continue; } diff --git a/consensus/tests/testdata/goref-905-tx-265-blocks.json.gz b/consensus/tests/testdata/dags_for_json_tests/goref-905-tx-265-blocks/blocks.json.gz similarity index 100% rename from consensus/tests/testdata/goref-905-tx-265-blocks.json.gz rename to consensus/tests/testdata/dags_for_json_tests/goref-905-tx-265-blocks/blocks.json.gz diff --git a/consensus/tests/testdata/goref-notx-5000-blocks.json.gz b/consensus/tests/testdata/dags_for_json_tests/goref-notx-5000-blocks/blocks.json.gz similarity index 100% rename from consensus/tests/testdata/goref-notx-5000-blocks.json.gz rename to consensus/tests/testdata/dags_for_json_tests/goref-notx-5000-blocks/blocks.json.gz diff --git a/consensus/tests/testdata/goref_custom_pruning_depth.json.gz b/consensus/tests/testdata/dags_for_json_tests/goref_custom_pruning_depth/blocks.json.gz similarity index 100% rename from consensus/tests/testdata/goref_custom_pruning_depth.json.gz rename to consensus/tests/testdata/dags_for_json_tests/goref_custom_pruning_depth/blocks.json.gz diff --git a/core/src/lib.rs b/core/src/lib.rs index c98c0f52b..9321f54f6 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -3,6 +3,7 @@ extern crate self as kaspa_core; pub mod assert; pub mod console; pub mod log; +pub mod panic; cfg_if::cfg_if! { if #[cfg(not(target_arch = "wasm32"))] { diff --git a/core/src/log.rs b/core/src/log.rs index d7d8229f2..c8d7319a1 100644 --- a/core/src/log.rs +++ b/core/src/log.rs @@ -32,6 +32,19 @@ pub fn init_logger(filters: &str) { .init(); } +/// Tries to init the global logger, but does not panic if it was already setup. +/// Should be used for tests. +#[cfg(not(target_arch = "wasm32"))] +pub fn try_init_logger(filters: &str) { + let _ = env_logger::Builder::new() + .format_target(false) + .format_timestamp_secs() + .filter_level(log::LevelFilter::Info) + .parse_default_env() + .parse_filters(filters) + .try_init(); +} + #[cfg(target_arch = "wasm32")] #[macro_export] macro_rules! trace { diff --git a/core/src/panic.rs b/core/src/panic.rs new file mode 100644 index 000000000..9f1fcbed9 --- /dev/null +++ b/core/src/panic.rs @@ -0,0 +1,13 @@ +use std::{panic, process}; + +/// Configures the panic hook to exit the program on every panic +pub fn configure_panic() { + let default_hook = panic::take_hook(); + panic::set_hook(Box::new(move |panic_info| { + // Invoke the default hook and exit the process + default_hook(panic_info); + println!("Exiting..."); + // TODO: setup a wait time and fold the log system properly + process::exit(1); + })); +} diff --git a/kaspad/src/main.rs b/kaspad/src/main.rs index bcc0f8ba3..523a72ea3 100644 --- a/kaspad/src/main.rs +++ b/kaspad/src/main.rs @@ -3,6 +3,7 @@ extern crate core; extern crate hashes; use clap::Parser; +use consensus::config::Config; use consensus::model::stores::DB; use kaspa_core::{core::Core, signals::Signals, task::runtime::AsyncRuntime}; use std::fs; @@ -65,8 +66,12 @@ pub fn main() { // Initialize the logger kaspa_core::log::init_logger(&args.log_level); + // Print package name and version info!("{} v{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")); + // Configure the panic behavior + kaspa_core::panic::configure_panic(); + // TODO: Refactor all this quick-and-dirty code let app_dir = args .app_dir @@ -84,9 +89,9 @@ pub fn main() { // --- - let params = DEVNET_PARAMS; + let config = Config::new(DEVNET_PARAMS); // TODO: network type let db = Arc::new(DB::open_default(db_dir.to_str().unwrap()).unwrap()); - let consensus = Arc::new(Consensus::new(db, ¶ms)); + let consensus = Arc::new(Consensus::new(db, &config)); let monitor = Arc::new(ConsensusMonitor::new(consensus.processing_counters().clone())); let notification_channel = ConsensusNotificationChannel::default(); diff --git a/p2p/build.rs b/p2p/build.rs index 3c1051aeb..899b25b0a 100644 --- a/p2p/build.rs +++ b/p2p/build.rs @@ -1,14 +1,14 @@ fn main() { - let iface_files = &["messages.proto", "p2p.proto", "rpc.proto"]; + let proto_files = &["./proto/messages.proto", "./proto/p2p.proto", "./proto/rpc.proto"]; let dirs = &["./proto"]; tonic_build::configure() .build_server(true) .build_client(true) - .compile(iface_files, dirs) + .compile(&proto_files[0..1], dirs) .unwrap_or_else(|e| panic!("protobuf compilation failed, error: {e}")); // recompile protobufs only if any of the proto files changes. - for file in iface_files { + for file in proto_files { println!("cargo:rerun-if-changed={file}"); } } diff --git a/rpc/core/Cargo.toml b/rpc/core/Cargo.toml index 7693c57f2..b5b8461ac 100644 --- a/rpc/core/Cargo.toml +++ b/rpc/core/Cargo.toml @@ -18,10 +18,11 @@ serde.workspace = true derive_more.workspace = true thiserror.workspace = true borsh.workspace = true -async-std.workspace = true +async-channel.workspace = true log.workspace = true smallvec.workspace = true cfg-if.workspace = true +futures-util.workspace = true async-trait = "0.1.57" ahash = "0.8.0" futures = { version = "0.3" } diff --git a/rpc/core/src/api/notifications.rs b/rpc/core/src/api/notifications.rs index d5b499e03..d07bf1234 100644 --- a/rpc/core/src/api/notifications.rs +++ b/rpc/core/src/api/notifications.rs @@ -1,6 +1,6 @@ use crate::model::message::*; use crate::stubs::*; -use async_std::channel::{Receiver, Sender}; +use async_channel::{Receiver, Sender}; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use serde::{Deserialize, Serialize}; use std::fmt::Display; diff --git a/rpc/core/src/notify/collector.rs b/rpc/core/src/notify/collector.rs index f89f54b24..78cbe5daf 100644 --- a/rpc/core/src/notify/collector.rs +++ b/rpc/core/src/notify/collector.rs @@ -1,5 +1,4 @@ -use async_std::channel::{Receiver, Sender}; -use async_std::stream::StreamExt; +use async_channel::{Receiver, Sender}; use async_trait::async_trait; use core::fmt::Debug; use futures::{ @@ -7,6 +6,7 @@ use futures::{ pin_mut, select, }; +use futures_util::stream::StreamExt; use kaspa_core::trace; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; diff --git a/rpc/core/src/notify/error.rs b/rpc/core/src/notify/error.rs index 31febb287..6b96e4973 100644 --- a/rpc/core/src/notify/error.rs +++ b/rpc/core/src/notify/error.rs @@ -1,4 +1,5 @@ use crate::RpcError; +use async_channel::{RecvError, SendError, TrySendError}; use thiserror::Error; pub type BoxedStdError = Box<(dyn std::error::Error + Sync + std::marker::Send + 'static)>; @@ -30,20 +31,20 @@ impl From for Error { } } -impl From> for Error { - fn from(_: async_std::channel::SendError) -> Self { +impl From> for Error { + fn from(_: SendError) -> Self { Error::ChannelSendError } } -impl From> for Error { - fn from(_: async_std::channel::TrySendError) -> Self { +impl From> for Error { + fn from(_: TrySendError) -> Self { Error::ChannelSendError } } -impl From for Error { - fn from(_: async_std::channel::RecvError) -> Self { +impl From for Error { + fn from(_: RecvError) -> Self { Error::ChannelRecvError } } diff --git a/rpc/core/src/notify/notifier.rs b/rpc/core/src/notify/notifier.rs index b324e9b09..86180e0af 100644 --- a/rpc/core/src/notify/notifier.rs +++ b/rpc/core/src/notify/notifier.rs @@ -10,7 +10,7 @@ use super::{ }; use crate::{api::ops::SubscribeCommand, Notification, NotificationType, RpcResult}; use ahash::AHashMap; -use async_std::channel::{Receiver, Sender}; +use async_channel::{Receiver, Sender}; use async_trait::async_trait; use kaspa_core::trace; use kaspa_utils::channel::Channel; diff --git a/rpc/core/src/notify/subscriber.rs b/rpc/core/src/notify/subscriber.rs index 3484abc68..36490b1f4 100644 --- a/rpc/core/src/notify/subscriber.rs +++ b/rpc/core/src/notify/subscriber.rs @@ -1,4 +1,4 @@ -use async_std::channel::{Receiver, Sender}; +use async_channel::{Receiver, Sender}; use async_trait::async_trait; use core::fmt::Debug; use kaspa_core::trace; diff --git a/rpc/core/src/server/collector.rs b/rpc/core/src/server/collector.rs index 58b027d06..6e9828184 100644 --- a/rpc/core/src/server/collector.rs +++ b/rpc/core/src/server/collector.rs @@ -1,5 +1,5 @@ use crate::notify::collector::CollectorFrom; -use async_std::channel::{Receiver, Sender}; +use async_channel::{Receiver, Sender}; use consensus_core::notify::Notification as ConsensusNotification; use kaspa_utils::channel::Channel; use std::sync::Arc; diff --git a/rpc/grpc/Cargo.toml b/rpc/grpc/Cargo.toml index 30b405497..167f84051 100644 --- a/rpc/grpc/Cargo.toml +++ b/rpc/grpc/Cargo.toml @@ -12,7 +12,7 @@ rpc-core.workspace = true kaspa-utils.workspace = true kaspa-core.workspace = true faster-hex.workspace = true -async-std.workspace = true +async-channel.workspace = true log.workspace = true async-trait = "0.1.57" futures = { version = "0.3" } diff --git a/simpa/Cargo.toml b/simpa/Cargo.toml index 80c21843d..fb08e40cb 100644 --- a/simpa/Cargo.toml +++ b/simpa/Cargo.toml @@ -19,6 +19,7 @@ futures-util.workspace = true rayon.workspace = true clap.workspace = true indexmap.workspace = true +log.workspace = true futures = "0.3" rand_distr = "0.4" diff --git a/simpa/src/main.rs b/simpa/src/main.rs index 5f4830fd6..e1de0653f 100644 --- a/simpa/src/main.rs +++ b/simpa/src/main.rs @@ -1,5 +1,6 @@ use clap::Parser; use consensus::{ + config::ConfigBuilder, consensus::{ test_consensus::{create_temp_db, load_existing_db}, Consensus, @@ -24,6 +25,7 @@ use consensus_core::{ use futures::{future::join_all, Future}; use hashes::Hash; use itertools::Itertools; +use kaspa_core::{info, warn}; use simulator::network::KaspaNetworkSimulator; use std::{collections::VecDeque, mem::size_of, sync::Arc}; @@ -57,10 +59,6 @@ struct Args { #[arg(short = 'n', long)] target_blocks: Option, - /// Avoid verbose simulation information - #[arg(short, long, default_value_t = false)] - quiet: bool, - /// Number of pool-thread threads used by the header and body processors. /// Defaults to the number of logical CPU cores. #[arg(short, long)] @@ -73,7 +71,7 @@ struct Args { /// Logging level for all subsystems {off, error, warn, info, debug, trace} /// -- You may also specify =,=,... to set the log level for individual subsystems - #[arg(long = "loglevel", default_value = "info")] + #[arg(long = "loglevel", default_value = format!("info,{}=trace", env!("CARGO_PKG_NAME")))] log_level: String, /// Output directory to save the simulation DB @@ -105,39 +103,50 @@ fn calculate_ghostdag_k(x: f64, delta: f64) -> u64 { } fn main() { + // Get CLI arguments let args = Args::parse(); + + // Initialize the logger kaspa_core::log::init_logger(&args.log_level); + + // Print package name and version + info!("{} v{}", env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION")); + + // Configure the panic behavior + kaspa_core::panic::configure_panic(); + assert!(args.bps * args.delay < 250.0, "The delay times bps product is larger than 250"); if args.miners > 1 { - println!( + warn!( "Warning: number of miners was configured to {}. Currently each miner added doubles the simulation memory and runtime footprint, while a single miner is sufficient for most simulation purposes (delay is simulated anyway).", args.miners ); } - let mut params = DEVNET_PARAMS.clone_with_skip_pow(); + let mut params = DEVNET_PARAMS; let mut perf_params = PERF_PARAMS; adjust_consensus_params(&args, &mut params); adjust_perf_params(&args, ¶ms, &mut perf_params); + let config = ConfigBuilder::new(params).set_perf_params(perf_params).skip_proof_of_work().build(); // Load an existing consensus or run the simulation let (consensus, _lifetime) = if let Some(input_dir) = args.input_dir { let (lifetime, db) = load_existing_db(input_dir); - let consensus = Arc::new(Consensus::with_perf_params(db, ¶ms, &perf_params)); + let consensus = Arc::new(Consensus::new(db, &config)); (consensus, lifetime) } else { let until = if args.target_blocks.is_none() { args.sim_time * 1000 } else { u64::MAX }; // milliseconds - let mut sim = KaspaNetworkSimulator::new(args.delay, args.bps, args.target_blocks, ¶ms, &perf_params, args.output_dir); - let (consensus, handles, lifetime) = sim.init(args.miners, args.tpb, !args.quiet).run(until); + let mut sim = KaspaNetworkSimulator::new(args.delay, args.bps, args.target_blocks, &config, args.output_dir); + let (consensus, handles, lifetime) = sim.init(args.miners, args.tpb).run(until); consensus.shutdown(handles); (consensus, lifetime) }; // Benchmark the DAG validation time let (_lifetime2, db2) = create_temp_db(); - let consensus2 = Arc::new(Consensus::with_perf_params(db2, ¶ms, &perf_params)); + let consensus2 = Arc::new(Consensus::new(db2, &config)); let handles2 = consensus2.init(); - validate(&consensus, &consensus2, ¶ms, args.delay, args.bps); + validate(&consensus, &consensus2, &config, args.delay, args.bps); consensus2.shutdown(handles2); drop(consensus); } @@ -154,7 +163,7 @@ fn adjust_consensus_params(args: &Args, params: &mut Params) { params.coinbase_maturity = (params.coinbase_maturity as f64 * f64::max(1.0, args.bps * args.delay * 0.25)) as u64; params.difficulty_window_size = (params.difficulty_window_size as f64 * args.bps) as usize; // Scale the DAA window linearly with BPS - println!( + info!( "The delay times bps product is larger than 2 (2Dλ={}), setting GHOSTDAG K={}, DAA window size={}", 2.0 * args.delay * args.bps, k, @@ -191,7 +200,7 @@ async fn validate(src_consensus: &Consensus, dst_consensus: &Consensus, params: let hashes = topologically_ordered_hashes(src_consensus, params.genesis_hash); let num_blocks = hashes.len(); let num_txs = print_stats(src_consensus, &hashes, delay, bps, params.ghostdag_k); - println!("Validating {num_blocks} blocks with {num_txs} transactions overall..."); + info!("Validating {num_blocks} blocks with {num_txs} transactions overall..."); let start = std::time::Instant::now(); let chunks = hashes.into_iter().chunks(1000); let mut iter = chunks.into_iter(); @@ -211,7 +220,7 @@ async fn validate(src_consensus: &Consensus, dst_consensus: &Consensus, params: // Assert that at least one body tip was resolved with valid UTXO assert!(dst_consensus.body_tips().iter().copied().any(|h| dst_consensus.block_status(h) == BlockStatus::StatusUTXOValid)); let elapsed = start.elapsed(); - println!( + info!( "Total validation time: {:?}, block processing rate: {:.2} (b/s), transaction processing rate: {:.2} (t/s)", elapsed, num_blocks as f64 / elapsed.as_secs_f64(), @@ -230,7 +239,7 @@ fn submit_chunk( src_consensus.headers_store.get_header(hash).unwrap(), src_consensus.block_transactions_store.get(hash).unwrap(), ); - let f = dst_consensus.validate_and_insert_block(block); + let f = dst_consensus.validate_and_insert_block(block, true); futures.push(f); } futures @@ -240,9 +249,9 @@ fn topologically_ordered_hashes(src_consensus: &Consensus, genesis_hash: Hash) - let mut queue: VecDeque = std::iter::once(genesis_hash).collect(); let mut visited = BlockHashSet::new(); let mut vec = Vec::new(); - let relations = src_consensus.relations_store.read(); + let relations = src_consensus.relations_stores.read(); while let Some(current) = queue.pop_front() { - for child in relations.get_children(current).unwrap().iter() { + for child in relations[0].get_children(current).unwrap().iter() { if visited.insert(*child) { queue.push_back(*child); vec.push(*child); @@ -265,7 +274,7 @@ fn print_stats(src_consensus: &Consensus, hashes: &[Hash], delay: f64, bps: f64, / hashes.len() as f64; let num_txs = hashes.iter().map(|&h| src_consensus.block_transactions_store.get(h).unwrap().len()).sum::(); let txs_mean = num_txs as f64 / hashes.len() as f64; - println!("[DELAY={delay}, BPS={bps}, GHOSTDAG K={k}]"); - println!("[Average stats of generated DAG] blues: {blues_mean}, reds: {reds_mean}, parents: {parents_mean}, txs: {txs_mean}"); + info!("[DELAY={delay}, BPS={bps}, GHOSTDAG K={k}]"); + info!("[Average stats of generated DAG] blues: {blues_mean}, reds: {reds_mean}, parents: {parents_mean}, txs: {txs_mean}"); num_txs } diff --git a/simpa/src/simulator/miner.rs b/simpa/src/simulator/miner.rs index 90fcad146..345492ace 100644 --- a/simpa/src/simulator/miner.rs +++ b/simpa/src/simulator/miner.rs @@ -14,6 +14,7 @@ use consensus_core::tx::{ use consensus_core::utxo::utxo_view::UtxoView; use futures::future::join_all; use indexmap::IndexSet; +use kaspa_core::trace; use rand::rngs::ThreadRng; use rand::Rng; use rand_distr::{Distribution, Exp}; @@ -53,7 +54,6 @@ pub struct Miner { target_txs_per_block: u64, target_blocks: Option, max_cached_outpoints: usize, - verbose: bool, } impl Miner { @@ -67,7 +67,6 @@ impl Miner { params: &Params, target_txs_per_block: u64, target_blocks: Option, - verbose: bool, ) -> Self { Self { id, @@ -84,7 +83,6 @@ impl Miner { target_txs_per_block, target_blocks, max_cached_outpoints: 100_000, - verbose, } } @@ -192,7 +190,7 @@ impl Miner { if self.report_progress(env) { Suspension::Halt } else { - self.futures.push(Box::pin(self.consensus.validate_and_insert_block(block))); + self.futures.push(Box::pin(self.consensus.validate_and_insert_block(block, true))); Suspension::Idle } } @@ -204,11 +202,11 @@ impl Miner { return true; // Exit } } - if !self.verbose { + if self.id != 0 { return false; } if self.num_blocks % 50 == 0 || self.sim_time / 5000 != env.now() / 5000 { - println!("Simulation time: {}. \tGenerated {} blocks.", env.now() as f64 / 1000.0, self.num_blocks); + trace!("Simulation time: {}\tGenerated {} blocks", env.now() as f64 / 1000.0, self.num_blocks); } self.sim_time = env.now(); false diff --git a/simpa/src/simulator/network.rs b/simpa/src/simulator/network.rs index 2b2ca06a4..caeee9b61 100644 --- a/simpa/src/simulator/network.rs +++ b/simpa/src/simulator/network.rs @@ -4,10 +4,9 @@ use std::thread::JoinHandle; use super::infra::Simulation; use super::miner::Miner; +use consensus::config::Config; use consensus::consensus::test_consensus::{create_permanent_db, create_temp_db, TempDbLifetime}; use consensus::consensus::Consensus; -use consensus::constants::perf::PerfParams; -use consensus::params::Params; use consensus_core::block::Block; type ConsensusWrapper = (Arc, Vec>, TempDbLifetime); @@ -19,34 +18,25 @@ pub struct KaspaNetworkSimulator { // Consensus instances consensuses: Vec, - params: Params, // Consensus params - perf_params: PerfParams, // Performance params + config: Config, // Consensus config bps: f64, // Blocks per second target_blocks: Option, // Target simulation blocks output_dir: Option, // Possible permanent output directory } impl KaspaNetworkSimulator { - pub fn new( - delay: f64, - bps: f64, - target_blocks: Option, - params: &Params, - perf_params: &PerfParams, - output_dir: Option, - ) -> Self { + pub fn new(delay: f64, bps: f64, target_blocks: Option, config: &Config, output_dir: Option) -> Self { Self { simulation: Simulation::new((delay * 1000.0) as u64), consensuses: Vec::new(), bps, - params: params.clone(), - perf_params: perf_params.clone(), + config: config.clone(), target_blocks, output_dir, } } - pub fn init(&mut self, num_miners: u64, target_txs_per_block: u64, verbose: bool) -> &mut Self { + pub fn init(&mut self, num_miners: u64, target_txs_per_block: u64) -> &mut Self { let secp = secp256k1::Secp256k1::new(); let mut rng = rand::thread_rng(); for i in 0..num_miners { @@ -55,7 +45,7 @@ impl KaspaNetworkSimulator { } else { create_temp_db() }; - let consensus = Arc::new(Consensus::with_perf_params(db, &self.params, &self.perf_params)); + let consensus = Arc::new(Consensus::new(db, &self.config)); let handles = consensus.init(); let (sk, pk) = secp.generate_keypair(&mut rng); let miner_process = Box::new(Miner::new( @@ -65,10 +55,9 @@ impl KaspaNetworkSimulator { sk, pk, consensus.clone(), - &self.params, + &self.config, target_txs_per_block, self.target_blocks, - verbose && i == 0, )); self.simulation.register(i, miner_process); self.consensuses.push((consensus, handles, lifetime)); diff --git a/utils/Cargo.toml b/utils/Cargo.toml index 8366a22ff..afac2b77d 100644 --- a/utils/Cargo.toml +++ b/utils/Cargo.toml @@ -7,5 +7,5 @@ include.workspace = true license.workspace = true [dependencies] -async-std.workspace = true +async-channel.workspace = true triggered = "0.1" diff --git a/utils/src/binary_heap.rs b/utils/src/binary_heap.rs new file mode 100644 index 000000000..2d9a8d71d --- /dev/null +++ b/utils/src/binary_heap.rs @@ -0,0 +1,23 @@ +use std::collections::BinaryHeap; + +pub trait BinaryHeapExtensions { + fn into_sorted_iter(self) -> BinaryHeapIntoSortedIter; +} + +pub struct BinaryHeapIntoSortedIter { + binary_heap: BinaryHeap, +} + +impl Iterator for BinaryHeapIntoSortedIter { + type Item = T; + + fn next(&mut self) -> Option { + self.binary_heap.pop() + } +} + +impl BinaryHeapExtensions for BinaryHeap { + fn into_sorted_iter(self) -> BinaryHeapIntoSortedIter { + BinaryHeapIntoSortedIter { binary_heap: self } + } +} diff --git a/utils/src/channel.rs b/utils/src/channel.rs index e60dd0981..fe75fddfc 100644 --- a/utils/src/channel.rs +++ b/utils/src/channel.rs @@ -1,4 +1,4 @@ -use async_std::channel::{unbounded, Receiver, Sender}; +use async_channel::{unbounded, Receiver, Sender}; /// Multiple producers multiple consumers channel #[derive(Clone, Debug)] diff --git a/utils/src/lib.rs b/utils/src/lib.rs index 0498a229a..3c7fca788 100644 --- a/utils/src/lib.rs +++ b/utils/src/lib.rs @@ -1,4 +1,5 @@ pub mod arc; +pub mod binary_heap; pub mod channel; pub mod option; pub mod refs;