-
Notifications
You must be signed in to change notification settings - Fork 412
/
Copy pathmemory_test.rs
107 lines (89 loc) · 3.2 KB
/
memory_test.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
//! Measures the memory overhead of the data store.
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
thread_local! {
static LIVE_BYTES_IN_THREAD: AtomicUsize = AtomicUsize::new(0);
}
pub struct TrackingAllocator {
allocator: std::alloc::System,
}
#[global_allocator]
pub static GLOBAL_ALLOCATOR: TrackingAllocator = TrackingAllocator {
allocator: std::alloc::System,
};
#[allow(unsafe_code)]
// SAFETY:
// We just do book-keeping and then let another allocator do all the actual work.
unsafe impl std::alloc::GlobalAlloc for TrackingAllocator {
#[allow(clippy::let_and_return)]
unsafe fn alloc(&self, layout: std::alloc::Layout) -> *mut u8 {
LIVE_BYTES_IN_THREAD.with(|bytes| bytes.fetch_add(layout.size(), Relaxed));
// SAFETY:
// Just deferring
unsafe { self.allocator.alloc(layout) }
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: std::alloc::Layout) {
LIVE_BYTES_IN_THREAD.with(|bytes| bytes.fetch_sub(layout.size(), Relaxed));
// SAFETY:
// Just deferring
unsafe { self.allocator.dealloc(ptr, layout) };
}
}
fn live_bytes() -> usize {
LIVE_BYTES_IN_THREAD.with(|bytes| bytes.load(Relaxed))
}
/// Assumes all allocations are on the calling thread.
///
/// The reason we use thread-local counting is so that
/// the counting won't be confused by any other running threads (e.g. other tests).
fn memory_use<R>(run: impl Fn() -> R) -> usize {
let used_bytes_start = live_bytes();
let ret = run();
let bytes_used = live_bytes() - used_bytes_start;
drop(ret);
bytes_used
}
// ----------------------------------------------------------------------------
use re_data_store::{DataStore, DataStoreConfig};
use re_log_types::{DataRow, RowId, TimePoint, TimeType, Timeline};
use re_types::components::{InstanceKey, Scalar};
use re_types_core::Loggable as _;
/// The memory overhead of storing many scalars in the store.
#[test]
fn scalar_memory_overhead() {
re_log::setup_logging();
const NUM_SCALARS: usize = 1024 * 1024;
let total_mem_use = memory_use(|| {
let mut store = DataStore::new(
re_log_types::StoreId::random(re_log_types::StoreKind::Recording),
InstanceKey::name(),
DataStoreConfig::default(),
);
for i in 0..NUM_SCALARS {
let entity_path = re_log_types::entity_path!("scalar");
let timepoint =
TimePoint::default().with(Timeline::new("log_time", TimeType::Time), i as i64);
let num_instances = 1;
let row = DataRow::from_cells1_sized(
RowId::new(),
entity_path,
timepoint,
num_instances,
vec![Scalar(i as f64)],
)
.unwrap();
store.insert_row(&row).unwrap();
}
store
});
insta::assert_debug_snapshot!(
"scalars_on_one_timeline",
[
format!("{NUM_SCALARS} scalars"),
format!("{} in total", re_format::format_bytes(total_mem_use as _)),
format!(
"{} per row",
re_format::format_bytes(total_mem_use as f64 / NUM_SCALARS as f64)
),
]
);
}