Skip to content

Commit

Permalink
Virtual processor part 2 -- virtual parents selection rules (kaspanet#89
Browse files Browse the repository at this point in the history
)

* Minor

* Refactor virtual parents pick to prep for actual rule algos

* Lazy load kosherizing blocks and group reachability queries

* Implement mergeset size limit rule (+remove redundant arcs from services)

* Remove bounded merge breaking parents

* Use unwrap_option over reachability result

* Minor

* Add input/output options to simpa

* Avoid recomputing ghostdag data if possible

* New lint error (comes from new compiler version)

* Filter the selected parent early + bug fix in swap indexing
  • Loading branch information
michaelsutton authored Dec 20, 2022
1 parent d43974f commit 701a3e3
Show file tree
Hide file tree
Showing 12 changed files with 265 additions and 88 deletions.
12 changes: 7 additions & 5 deletions consensus/src/consensus/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ pub struct Consensus {
pub ghostdag_store: Arc<DbGhostdagStore>,

// Services and managers
statuses_service: Arc<MTStatusesService<DbStatusesStore>>,
relations_service: Arc<MTRelationsService<DbRelationsStore>>,
statuses_service: MTStatusesService<DbStatusesStore>,
relations_service: MTRelationsService<DbRelationsStore>,
reachability_service: MTReachabilityService<DbReachabilityStore>,
pub(super) difficulty_manager: DifficultyManager<DbHeadersStore>,
pub(super) dag_traversal_manager: DagTraversalManager<DbGhostdagStore, BlockWindowCacheStore>,
Expand Down Expand Up @@ -154,8 +154,8 @@ impl Consensus {
// Services and managers
//

let statuses_service = Arc::new(MTStatusesService::new(statuses_store.clone()));
let relations_service = Arc::new(MTRelationsService::new(relations_store.clone()));
let statuses_service = MTStatusesService::new(statuses_store.clone());
let relations_service = MTRelationsService::new(relations_store.clone());
let reachability_service = MTReachabilityService::new(reachability_store.clone());
let dag_traversal_manager = DagTraversalManager::new(
params.genesis_hash,
Expand Down Expand Up @@ -287,7 +287,7 @@ impl Consensus {
past_median_time_manager.clone(),
dag_traversal_manager.clone(),
difficulty_manager.clone(),
depth_manager,
depth_manager.clone(),
pruning_manager.clone(),
parents_manager.clone(),
counters.clone(),
Expand Down Expand Up @@ -332,13 +332,15 @@ impl Consensus {
virtual_state_store,
ghostdag_manager.clone(),
reachability_service.clone(),
relations_service.clone(),
dag_traversal_manager.clone(),
difficulty_manager.clone(),
coinbase_manager.clone(),
transaction_validator,
past_median_time_manager.clone(),
pruning_manager.clone(),
parents_manager,
depth_manager,
));

Self {
Expand Down
26 changes: 24 additions & 2 deletions consensus/src/consensus/test_consensus.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use std::{
env, fs,
path::PathBuf,
sync::{Arc, Weak},
thread::JoinHandle,
};
Expand Down Expand Up @@ -195,6 +196,12 @@ impl TempDbLifetime {
pub fn new(tempdir: tempfile::TempDir, weak_db_ref: Weak<DB>) -> Self {
Self { tempdir: Some(tempdir), weak_db_ref }
}

/// Tracks the DB reference and makes sure all strong refs are cleaned up
/// but does not remove the DB from disk when dropped.
pub fn without_destroy(weak_db_ref: Weak<DB>) -> Self {
Self { tempdir: None, weak_db_ref }
}
}

impl Drop for TempDbLifetime {
Expand Down Expand Up @@ -223,10 +230,25 @@ pub fn create_temp_db() -> (TempDbLifetime, Arc<DB>) {
let global_tempdir = env::temp_dir();
let kaspa_tempdir = global_tempdir.join("kaspa-rust");
fs::create_dir_all(kaspa_tempdir.as_path()).unwrap();

let db_tempdir = tempfile::tempdir_in(kaspa_tempdir.as_path()).unwrap();
let db_path = db_tempdir.path().to_owned();

let db = Arc::new(DB::open_default(db_path.to_str().unwrap()).unwrap());
(TempDbLifetime::new(db_tempdir, Arc::downgrade(&db)), db)
}

/// Creates a DB within the provided directory path.
/// Callers must keep the `TempDbLifetime` guard for as long as they wish the DB instance to exist.
pub fn create_permanent_db(db_path: String) -> (TempDbLifetime, Arc<DB>) {
let db_dir = PathBuf::from(db_path);
fs::create_dir(db_dir.as_path()).unwrap();
let db = Arc::new(DB::open_default(db_dir.to_str().unwrap()).unwrap());
(TempDbLifetime::without_destroy(Arc::downgrade(&db)), db)
}

/// Loads an existing DB from the provided directory path.
/// Callers must keep the `TempDbLifetime` guard for as long as they wish the DB instance to exist.
pub fn load_existing_db(db_path: String) -> (TempDbLifetime, Arc<DB>) {
let db_dir = PathBuf::from(db_path);
let db = Arc::new(DB::open_default(db_dir.to_str().unwrap()).unwrap());
(TempDbLifetime::without_destroy(Arc::downgrade(&db)), db)
}
1 change: 1 addition & 0 deletions consensus/src/model/services/statuses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use parking_lot::RwLock;
use std::sync::Arc;

/// Multi-threaded block-statuses service imp
#[derive(Clone)]
pub struct MTStatusesService<T: StatusesStoreReader> {
store: Arc<RwLock<T>>,
}
Expand Down
12 changes: 7 additions & 5 deletions consensus/src/pipeline/header_processor/post_pow_validation.rs
Original file line number Diff line number Diff line change
Expand Up @@ -103,15 +103,17 @@ impl HeaderProcessor {
let gd_data = ctx.ghostdag_data.as_ref().unwrap();
let merge_depth_root = self.depth_manager.calc_merge_depth_root(gd_data, ctx.pruning_point());
let finality_point = self.depth_manager.calc_finality_point(gd_data, ctx.pruning_point());
let non_bounded_merge_depth_violating_blues: Vec<Hash> =
self.depth_manager.non_bounded_merge_depth_violating_blues(gd_data, merge_depth_root).collect();
let mut kosherizing_blues: Option<Vec<Hash>> = None;

for red in gd_data.mergeset_reds.iter().cloned() {
for red in gd_data.mergeset_reds.iter().copied() {
if self.reachability_service.is_dag_ancestor_of(merge_depth_root, red) {
continue;
}

if !non_bounded_merge_depth_violating_blues.iter().any(|blue| self.reachability_service.is_dag_ancestor_of(red, *blue)) {
// Lazy load the kosherizing blocks since this case is extremely rare
if kosherizing_blues.is_none() {
kosherizing_blues = Some(self.depth_manager.kosherizing_blues(gd_data, merge_depth_root).collect());
}
if !self.reachability_service.is_dag_ancestor_of_any(red, &mut kosherizing_blues.as_ref().unwrap().iter().copied()) {
return Err(RuleError::ViolatingBoundedMergeDepth);
}
}
Expand Down
4 changes: 2 additions & 2 deletions consensus/src/pipeline/header_processor/processor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ impl HeaderProcessor {
block_window_cache_for_difficulty: Arc<BlockWindowCacheStore>,
block_window_cache_for_past_median_time: Arc<BlockWindowCacheStore>,
reachability_service: MTReachabilityService<DbReachabilityStore>,
relations_service: Arc<MTRelationsService<DbRelationsStore>>,
relations_service: MTRelationsService<DbRelationsStore>,
past_median_time_manager: PastMedianTimeManager<DbHeadersStore, DbGhostdagStore, BlockWindowCacheStore>,
dag_traversal_manager: DagTraversalManager<DbGhostdagStore, BlockWindowCacheStore>,
difficulty_manager: DifficultyManager<DbHeadersStore>,
Expand Down Expand Up @@ -391,7 +391,7 @@ impl HeaderProcessor {
header.timestamp = self.genesis_timestamp;
let header = Arc::new(header);
let mut ctx = HeaderProcessingContext::new(self.genesis_hash, &header, PruningPointInfo::from_genesis(self.genesis_hash));
ctx.ghostdag_data = Some(self.ghostdag_manager.genesis_ghostdag_data());
ctx.ghostdag_data = Some(Arc::new(self.ghostdag_manager.genesis_ghostdag_data()));
ctx.block_window_for_difficulty = Some(Default::default());
ctx.block_window_for_past_median_time = Some(Default::default());
ctx.mergeset_non_daa = Some(Default::default());
Expand Down
Loading

0 comments on commit 701a3e3

Please sign in to comment.