|
| 1 | +//! Measures the memory overhead of the data store. |
| 2 | +
|
| 3 | +use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; |
| 4 | + |
| 5 | +thread_local! { |
| 6 | + static LIVE_BYTES_IN_THREAD: AtomicUsize = AtomicUsize::new(0); |
| 7 | +} |
| 8 | + |
| 9 | +pub struct TrackingAllocator { |
| 10 | + allocator: std::alloc::System, |
| 11 | +} |
| 12 | + |
| 13 | +#[global_allocator] |
| 14 | +pub static GLOBAL_ALLOCATOR: TrackingAllocator = TrackingAllocator { |
| 15 | + allocator: std::alloc::System, |
| 16 | +}; |
| 17 | + |
| 18 | +#[allow(unsafe_code)] |
| 19 | +// SAFETY: |
| 20 | +// We just do book-keeping and then let another allocator do all the actual work. |
| 21 | +unsafe impl std::alloc::GlobalAlloc for TrackingAllocator { |
| 22 | + #[allow(clippy::let_and_return)] |
| 23 | + unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 { |
| 24 | + LIVE_BYTES_IN_THREAD.with(|bytes| bytes.fetch_add(layout.size(), Relaxed)); |
| 25 | + |
| 26 | + // SAFETY: |
| 27 | + // Just deferring |
| 28 | + unsafe { self.allocator.alloc(layout) } |
| 29 | + } |
| 30 | + |
| 31 | + unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) { |
| 32 | + LIVE_BYTES_IN_THREAD.with(|bytes| bytes.fetch_sub(layout.size(), Relaxed)); |
| 33 | + |
| 34 | + // SAFETY: |
| 35 | + // Just deferring |
| 36 | + unsafe { self.allocator.dealloc(ptr, layout) }; |
| 37 | + } |
| 38 | +} |
| 39 | + |
| 40 | +fn live_bytes() -> usize { |
| 41 | + LIVE_BYTES_IN_THREAD.with(|bytes| bytes.load(Relaxed)) |
| 42 | +} |
| 43 | + |
| 44 | +/// Assumes all allocations are on the calling thread. |
| 45 | +/// |
| 46 | +/// The reason we use thread-local counting is so that |
| 47 | +/// the counting won't be confused by any other running threads (e.g. other tests). |
| 48 | +fn memory_use<R>(run: impl Fn() -> R) -> usize { |
| 49 | + let used_bytes_start = live_bytes(); |
| 50 | + let ret = run(); |
| 51 | + let bytes_used = live_bytes() - used_bytes_start; |
| 52 | + drop(ret); |
| 53 | + bytes_used |
| 54 | +} |
| 55 | + |
| 56 | +// ---------------------------------------------------------------------------- |
| 57 | + |
| 58 | +use re_data_store::{DataStore, DataStoreConfig}; |
| 59 | +use re_log_types::{DataRow, RowId, TimePoint, TimeType, Timeline}; |
| 60 | +use re_types::components::{InstanceKey, Scalar}; |
| 61 | +use re_types_core::Loggable as _; |
| 62 | + |
| 63 | +/// The memory overhead of storing many scalars in the store. |
| 64 | +#[test] |
| 65 | +fn scalar_memory_overhead() { |
| 66 | + re_log::setup_logging(); |
| 67 | + |
| 68 | + const NUM_SCALARS: usize = 1024 * 1024; |
| 69 | + |
| 70 | + let total_mem_use = memory_use(|| { |
| 71 | + let mut store = DataStore::new( |
| 72 | + re_log_types::StoreId::random(re_log_types::StoreKind::Recording), |
| 73 | + InstanceKey::name(), |
| 74 | + DataStoreConfig::default(), |
| 75 | + ); |
| 76 | + |
| 77 | + for i in 0..NUM_SCALARS { |
| 78 | + let entity_path = re_log_types::entity_path!("scalar"); |
| 79 | + let timepoint = |
| 80 | + TimePoint::default().with(Timeline::new("log_time", TimeType::Time), i as i64); |
| 81 | + let num_instances = 1; |
| 82 | + let row = DataRow::from_cells1_sized( |
| 83 | + RowId::new(), |
| 84 | + entity_path, |
| 85 | + timepoint, |
| 86 | + num_instances, |
| 87 | + vec![Scalar(i as f64)], |
| 88 | + ) |
| 89 | + .unwrap(); |
| 90 | + store.insert_row(&row).unwrap(); |
| 91 | + } |
| 92 | + |
| 93 | + store |
| 94 | + }); |
| 95 | + |
| 96 | + insta::assert_debug_snapshot!( |
| 97 | + "scalars_on_one_timeline", |
| 98 | + [ |
| 99 | + format!("{NUM_SCALARS} scalars"), |
| 100 | + format!("{} in total", re_format::format_bytes(total_mem_use as _)), |
| 101 | + format!( |
| 102 | + "{} per row", |
| 103 | + re_format::format_bytes(total_mem_use as f64 / NUM_SCALARS as f64) |
| 104 | + ), |
| 105 | + ] |
| 106 | + ); |
| 107 | +} |
0 commit comments