1
+ use std:: collections:: BTreeMap ;
2
+
1
3
use nohash_hasher:: IntMap ;
2
4
3
5
use re_arrow_store:: { DataStoreConfig , TimeInt } ;
@@ -159,33 +161,31 @@ impl EntityDb {
159
161
/// A in-memory database built from a stream of [`LogMsg`]es.
160
162
#[ derive( Default ) ]
161
163
pub struct LogDb {
162
- /// Messages in the order they arrived
163
- chronological_row_ids : Vec < RowId > ,
164
- log_messages : ahash:: HashMap < RowId , LogMsg > ,
165
-
166
- /// Data that was logged with [`TimePoint::timeless`].
167
- /// We need to re-insert those in any new timelines
168
- /// that are created after they were logged.
169
- timeless_row_ids : Vec < RowId > ,
164
+ /// All [`EntityPathOpMsg`]s ever received.
165
+ entity_op_msgs : BTreeMap < RowId , EntityPathOpMsg > ,
170
166
171
167
/// Set by whomever created this [`LogDb`].
172
168
pub data_source : Option < re_smart_channel:: Source > ,
173
169
174
170
/// Comes in a special message, [`LogMsg::BeginRecordingMsg`].
175
- recording_info : Option < RecordingInfo > ,
171
+ recording_msg : Option < BeginRecordingMsg > ,
176
172
177
173
/// Where we store the entities.
178
174
pub entity_db : EntityDb ,
179
175
}
180
176
181
177
impl LogDb {
178
+ pub fn recording_msg ( & self ) -> Option < & BeginRecordingMsg > {
179
+ self . recording_msg . as_ref ( )
180
+ }
181
+
182
182
pub fn recording_info ( & self ) -> Option < & RecordingInfo > {
183
- self . recording_info . as_ref ( )
183
+ self . recording_msg ( ) . map ( |msg| & msg . info )
184
184
}
185
185
186
186
pub fn recording_id ( & self ) -> RecordingId {
187
- if let Some ( info ) = & self . recording_info {
188
- info. recording_id
187
+ if let Some ( msg ) = & self . recording_msg {
188
+ msg . info . recording_id
189
189
} else {
190
190
RecordingId :: ZERO
191
191
}
@@ -203,11 +203,16 @@ impl LogDb {
203
203
self . entity_db . tree . num_timeless_messages ( )
204
204
}
205
205
206
+ pub fn len ( & self ) -> usize {
207
+ self . entity_db . data_store . total_timeless_rows ( ) as usize
208
+ + self . entity_db . data_store . total_temporal_rows ( ) as usize
209
+ }
210
+
206
211
pub fn is_empty ( & self ) -> bool {
207
- self . log_messages . is_empty ( )
212
+ self . len ( ) == 0
208
213
}
209
214
210
- pub fn add ( & mut self , msg : LogMsg ) -> Result < ( ) , Error > {
215
+ pub fn add ( & mut self , msg : & LogMsg ) -> Result < ( ) , Error > {
211
216
crate :: profile_function!( ) ;
212
217
213
218
match & msg {
@@ -218,38 +223,27 @@ impl LogDb {
218
223
time_point,
219
224
path_op,
220
225
} = msg;
226
+ self . entity_op_msgs . insert ( * row_id, msg. clone ( ) ) ;
221
227
self . entity_db . add_path_op ( * row_id, time_point, path_op) ;
222
228
}
223
229
LogMsg :: ArrowMsg ( inner) => self . entity_db . try_add_arrow_msg ( inner) ?,
224
230
LogMsg :: Goodbye ( _) => { }
225
231
}
226
232
227
- // TODO(#1619): the following only makes sense because, while we support sending and
228
- // receiving batches, we don't actually do so yet.
229
- // We need to stop storing raw `LogMsg`s before we can benefit from our batching.
230
- self . chronological_row_ids . push ( msg. id ( ) ) ;
231
- self . log_messages . insert ( msg. id ( ) , msg) ;
232
-
233
233
Ok ( ( ) )
234
234
}
235
235
236
236
fn add_begin_recording_msg ( & mut self , msg : & BeginRecordingMsg ) {
237
- self . recording_info = Some ( msg. info . clone ( ) ) ;
237
+ self . recording_msg = Some ( msg. clone ( ) ) ;
238
238
}
239
239
240
- pub fn len ( & self ) -> usize {
241
- self . log_messages . len ( )
242
- }
243
-
244
- /// In the order they arrived
245
- pub fn chronological_log_messages ( & self ) -> impl Iterator < Item = & LogMsg > {
246
- self . chronological_row_ids
247
- . iter ( )
248
- . filter_map ( |id| self . get_log_msg ( id) )
240
+ /// Returns an iterator over all [`EntityPathOpMsg`]s that have been written to this `LogDb`.
241
+ pub fn iter_entity_op_msgs ( & self ) -> impl Iterator < Item = & EntityPathOpMsg > {
242
+ self . entity_op_msgs . values ( )
249
243
}
250
244
251
- pub fn get_log_msg ( & self , row_id : & RowId ) -> Option < & LogMsg > {
252
- self . log_messages . get ( row_id)
245
+ pub fn get_entity_op_msg ( & self , row_id : & RowId ) -> Option < & EntityPathOpMsg > {
246
+ self . entity_op_msgs . get ( row_id)
253
247
}
254
248
255
249
/// Free up some RAM by forgetting the older parts of all timelines.
@@ -263,26 +257,15 @@ impl LogDb {
263
257
let cutoff_times = self . entity_db . data_store . oldest_time_per_timeline ( ) ;
264
258
265
259
let Self {
266
- chronological_row_ids,
267
- log_messages,
268
- timeless_row_ids,
260
+ entity_op_msgs,
269
261
data_source : _,
270
- recording_info : _,
262
+ recording_msg : _,
271
263
entity_db,
272
264
} = self ;
273
265
274
266
{
275
- crate :: profile_scope!( "chronological_row_ids" ) ;
276
- chronological_row_ids. retain ( |row_id| !drop_row_ids. contains ( row_id) ) ;
277
- }
278
-
279
- {
280
- crate :: profile_scope!( "log_messages" ) ;
281
- log_messages. retain ( |row_id, _| !drop_row_ids. contains ( row_id) ) ;
282
- }
283
- {
284
- crate :: profile_scope!( "timeless_row_ids" ) ;
285
- timeless_row_ids. retain ( |row_id| !drop_row_ids. contains ( row_id) ) ;
267
+ crate :: profile_scope!( "entity_op_msgs" ) ;
268
+ entity_op_msgs. retain ( |row_id, _| !drop_row_ids. contains ( row_id) ) ;
286
269
}
287
270
288
271
entity_db. purge ( & cutoff_times, & drop_row_ids) ;
0 commit comments