diff --git a/crates/re_arrow_store/benches/data_store.rs b/crates/re_arrow_store/benches/data_store.rs index 779b4e3bd8834..6e9aeda7a922c 100644 --- a/crates/re_arrow_store/benches/data_store.rs +++ b/crates/re_arrow_store/benches/data_store.rs @@ -5,8 +5,8 @@ use arrow2::array::UnionArray; use criterion::{criterion_group, criterion_main, Criterion}; use re_arrow_store::{ - DataStore, DataStoreConfig, DataStoreStats, GarbageCollectionTarget, LatestAtQuery, RangeQuery, - TimeInt, TimeRange, + DataStore, DataStoreConfig, GarbageCollectionTarget, LatestAtQuery, RangeQuery, TimeInt, + TimeRange, }; use re_log_types::{ component_types::{InstanceKey, Rect2D}, @@ -262,66 +262,44 @@ fn range(c: &mut Criterion) { } fn gc(c: &mut Criterion) { - for &packed in packed() { - let mut group = c.benchmark_group(format!( - "datastore/num_rows={NUM_ROWS}/num_instances={NUM_INSTANCES}/packed={packed}/gc" - )); - group.throughput(criterion::Throughput::Elements( - (NUM_INSTANCES * NUM_ROWS) as _, - )); - - let mut table = build_table(NUM_INSTANCES as usize, packed); - table.compute_all_size_bytes(); - - let bench = |store: &mut DataStore| { - let stats = DataStoreStats::from_store(store); + let mut group = c.benchmark_group(format!( + "datastore/num_rows={NUM_ROWS}/num_instances={NUM_INSTANCES}/gc" + )); + group.throughput(criterion::Throughput::Elements( + (NUM_INSTANCES * NUM_ROWS) as _, + )); + + let mut table = build_table(NUM_INSTANCES as usize, false); + table.compute_all_size_bytes(); + + // Default config + group.bench_function("default", |b| { + let store = insert_table(Default::default(), InstanceKey::name(), &table); + b.iter(|| { + let mut store = store.clone(); let (_, stats_diff) = store.gc(GarbageCollectionTarget::DropAtLeastFraction(1.0 / 3.0)); - - // NOTE: only temporal data and row metadata get purged! - let num_bytes_dropped = - (stats_diff.temporal.num_bytes + stats_diff.metadata_registry.num_bytes) as f64; - let num_bytes_dropped_expected_min = - (stats.temporal.num_bytes + stats.metadata_registry.num_bytes) as f64 * 0.95 / 3.0; - let num_bytes_dropped_expected_max = - (stats.temporal.num_bytes + stats.metadata_registry.num_bytes) as f64 * 1.05 / 3.0; - assert!( - num_bytes_dropped_expected_min <= num_bytes_dropped - && num_bytes_dropped <= num_bytes_dropped_expected_max, - "{} <= {} <= {}", - re_format::format_bytes(num_bytes_dropped_expected_min), - re_format::format_bytes(num_bytes_dropped), - re_format::format_bytes(num_bytes_dropped_expected_max), - ); - stats_diff - }; + }); + }); - // Default config - group.bench_function("default", |b| { - let store = insert_table(Default::default(), InstanceKey::name(), &table); + // Emulate more or less bucket + for &num_rows_per_bucket in num_rows_per_bucket() { + group.bench_function(format!("bucketsz={num_rows_per_bucket}"), |b| { + let store = insert_table( + DataStoreConfig { + indexed_bucket_num_rows: num_rows_per_bucket, + ..Default::default() + }, + InstanceKey::name(), + &table, + ); b.iter(|| { let mut store = store.clone(); - bench(&mut store) + let (_, stats_diff) = + store.gc(GarbageCollectionTarget::DropAtLeastFraction(1.0 / 3.0)); + stats_diff }); }); - - // Emulate more or less bucket - for &num_rows_per_bucket in num_rows_per_bucket() { - group.bench_function(format!("bucketsz={num_rows_per_bucket}"), |b| { - let store = insert_table( - DataStoreConfig { - indexed_bucket_num_rows: num_rows_per_bucket, - ..Default::default() - }, - InstanceKey::name(), - &table, - ); - b.iter(|| { - let mut store = store.clone(); - bench(&mut store) - }); - }); - } } }