From 44442177a5b1f56bd7c0655c656dc5569db83fbb Mon Sep 17 00:00:00 2001 From: Clement Rey Date: Tue, 23 Jan 2024 18:01:55 +0100 Subject: [PATCH] Primary caching 19 (final): de-statification (#4856) - Quick sanity pass over all the intermediary locks and refcounts to make sure we don't hold anything for longer than we need. - Get rid of all static globals and let the caches live with their associated stores in `EntityDb`. - `CacheKey` no longer requires a `StoreId`. --- - Fixes #4815 --- Part of the primary caching series of PR (index search, joins, deserialization): - #4592 - #4593 - #4659 - #4680 - #4681 - #4698 - #4711 - #4712 - #4721 - #4726 - #4773 - #4784 - #4785 - #4793 - #4800 - #4851 - #4852 - #4853 - #4856 --- Cargo.lock | 6 +- crates/re_entity_db/Cargo.toml | 6 +- crates/re_entity_db/src/entity_db.rs | 25 +- crates/re_entity_db/src/entity_properties.rs | 107 +-------- crates/re_entity_db/src/lib.rs | 2 + crates/re_query/Cargo.toml | 7 +- crates/re_query/src/lib.rs | 4 +- crates/re_query/src/util.rs | 101 +++++++- crates/re_query_cache/Cargo.toml | 1 - crates/re_query_cache/benches/latest_at.rs | 99 ++++---- crates/re_query_cache/src/cache.rs | 219 ++++++++++-------- crates/re_query_cache/src/cache_stats.rs | 163 ++++++------- crates/re_query_cache/src/latest_at.rs | 76 +++--- crates/re_query_cache/src/lib.rs | 20 +- crates/re_query_cache/src/query.rs | 142 ++++++------ crates/re_query_cache/src/range.rs | 13 +- crates/re_query_cache/tests/latest_at.rs | 163 +++++++------ crates/re_query_cache/tests/range.rs | 179 +++++++------- .../benches/bench_points.rs | 6 +- .../src/visualizers/entity_iterator.rs | 2 +- .../src/visualizer_system.rs | 3 +- .../src/visualizer_system.rs | 3 +- crates/re_viewer/src/app.rs | 13 +- crates/re_viewer/src/store_hub.rs | 14 +- crates/re_viewer/src/ui/memory_panel.rs | 16 +- 25 files changed, 745 insertions(+), 645 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b48b46d067cb..c0edd9a1455b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4631,6 +4631,8 @@ dependencies = [ "re_log", "re_log_encoding", "re_log_types", + "re_query", + "re_query_cache", "re_smart_channel", "re_tracing", "re_types", @@ -4779,13 +4781,14 @@ dependencies = [ "mimalloc", "rand", "re_data_store", - "re_entity_db", "re_format", "re_log", "re_log_types", "re_tracing", "re_types", "re_types_core", + "rmp-serde", + "serde", "smallvec", "thiserror", ] @@ -4807,7 +4810,6 @@ dependencies = [ "paste", "rand", "re_data_store", - "re_entity_db", "re_format", "re_log", "re_log_types", diff --git a/crates/re_entity_db/Cargo.toml b/crates/re_entity_db/Cargo.toml index eeac12b55eeb..7afcdf3f3070 100644 --- a/crates/re_entity_db/Cargo.toml +++ b/crates/re_entity_db/Cargo.toml @@ -20,16 +20,18 @@ all-features = true default = [] ## Enable (de)serialization using serde. -serde = ["dep:serde", "dep:rmp-serde", "re_log_types/serde"] +serde = ["dep:serde", "dep:rmp-serde", "re_log_types/serde", "re_query/serde"] [dependencies] re_data_store.workspace = true re_format.workspace = true re_int_histogram.workspace = true +re_log.workspace = true re_log_encoding = { workspace = true, optional = true } re_log_types.workspace = true -re_log.workspace = true +re_query.workspace = true +re_query_cache.workspace = true re_smart_channel.workspace = true re_tracing.workspace = true re_types_core.workspace = true diff --git a/crates/re_entity_db/src/entity_db.rs b/crates/re_entity_db/src/entity_db.rs index 8998d083603d..2fbae022968c 100644 --- a/crates/re_entity_db/src/entity_db.rs +++ b/crates/re_entity_db/src/entity_db.rs @@ -110,11 +110,20 @@ pub struct EntityDb { /// Stores all components for all entities for all timelines. data_store: DataStore, + /// Query caches for the data in [`Self::data_store`]. + query_caches: re_query_cache::Caches, + stats: IngestionStatistics, } impl EntityDb { pub fn new(store_id: StoreId) -> Self { + let data_store = re_data_store::DataStore::new( + store_id.clone(), + InstanceKey::name(), + DataStoreConfig::default(), + ); + let query_caches = re_query_cache::Caches::new(&data_store); Self { store_id: store_id.clone(), data_source: None, @@ -123,11 +132,8 @@ impl EntityDb { entity_path_from_hash: Default::default(), times_per_timeline: Default::default(), tree: crate::EntityTree::root(), - data_store: re_data_store::DataStore::new( - store_id.clone(), - InstanceKey::name(), - DataStoreConfig::default(), - ), + data_store, + query_caches, stats: IngestionStatistics::new(store_id), } } @@ -175,6 +181,11 @@ impl EntityDb { self.store_info().map(|ri| &ri.application_id) } + #[inline] + pub fn query_caches(&self) -> &re_query_cache::Caches { + &self.query_caches + } + #[inline] pub fn store(&self) -> &DataStore { &self.data_store @@ -315,6 +326,7 @@ impl EntityDb { // and/or pending clears. let original_store_events = &[store_event]; self.times_per_timeline.on_events(original_store_events); + self.query_caches.on_events(original_store_events); let clear_cascade = self.tree.on_store_additions(original_store_events); // Second-pass: update the [`DataStore`] by applying the [`ClearCascade`]. @@ -323,6 +335,7 @@ impl EntityDb { // notified of, again! let new_store_events = self.on_clear_cascade(clear_cascade); self.times_per_timeline.on_events(&new_store_events); + self.query_caches.on_events(&new_store_events); let clear_cascade = self.tree.on_store_additions(&new_store_events); // Clears don't affect `Clear` components themselves, therefore we cannot have recursive @@ -476,10 +489,12 @@ impl EntityDb { times_per_timeline, tree, data_store: _, + query_caches, stats: _, } = self; times_per_timeline.on_events(store_events); + query_caches.on_events(store_events); let store_events = store_events.iter().collect_vec(); let compacted = CompactedStoreEvents::new(&store_events); diff --git a/crates/re_entity_db/src/entity_properties.rs b/crates/re_entity_db/src/entity_properties.rs index 211c92b79ffc..9486b3499614 100644 --- a/crates/re_entity_db/src/entity_properties.rs +++ b/crates/re_entity_db/src/entity_properties.rs @@ -1,7 +1,7 @@ +use std::fmt::Formatter; + #[cfg(feature = "serde")] use re_log_types::EntityPath; -use re_log_types::TimeInt; -use std::fmt::Formatter; #[cfg(feature = "serde")] use crate::EditableAutoValue; @@ -95,7 +95,7 @@ impl FromIterator<(EntityPath, EntityProperties)> for EntityPropertyMap { #[cfg_attr(feature = "serde", serde(default))] pub struct EntityProperties { pub visible: bool, - pub visible_history: ExtraQueryHistory, + pub visible_history: re_query::ExtraQueryHistory, pub interactive: bool, /// What kind of color mapping should be applied (none, map, texture, transfer..)? @@ -142,7 +142,7 @@ impl Default for EntityProperties { fn default() -> Self { Self { visible: true, - visible_history: ExtraQueryHistory::default(), + visible_history: re_query::ExtraQueryHistory::default(), interactive: true, color_mapper: EditableAutoValue::default(), pinhole_image_plane_distance: EditableAutoValue::default(), @@ -269,105 +269,6 @@ impl EntityProperties { // ---------------------------------------------------------------------------- -/// One of the boundaries of the visible history. -/// -/// For [`VisibleHistoryBoundary::RelativeToTimeCursor`] and [`VisibleHistoryBoundary::Absolute`], -/// the value are either nanos or frames, depending on the type of timeline. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub enum VisibleHistoryBoundary { - /// Boundary is a value relative to the time cursor - RelativeToTimeCursor(i64), - - /// Boundary is an absolute value - Absolute(i64), - - /// The boundary extends to infinity. - Infinite, -} - -impl VisibleHistoryBoundary { - /// Value when the boundary is set to the current time cursor. - pub const AT_CURSOR: Self = Self::RelativeToTimeCursor(0); -} - -impl Default for VisibleHistoryBoundary { - fn default() -> Self { - Self::AT_CURSOR - } -} - -/// Visible history bounds. -#[derive(Clone, Copy, Default, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -pub struct VisibleHistory { - /// Low time boundary. - pub from: VisibleHistoryBoundary, - - /// High time boundary. - pub to: VisibleHistoryBoundary, -} - -impl VisibleHistory { - /// Value with the visible history feature is disabled. - pub const OFF: Self = Self { - from: VisibleHistoryBoundary::AT_CURSOR, - to: VisibleHistoryBoundary::AT_CURSOR, - }; - - pub const ALL: Self = Self { - from: VisibleHistoryBoundary::Infinite, - to: VisibleHistoryBoundary::Infinite, - }; - - pub fn from(&self, cursor: TimeInt) -> TimeInt { - match self.from { - VisibleHistoryBoundary::Absolute(value) => TimeInt::from(value), - VisibleHistoryBoundary::RelativeToTimeCursor(value) => cursor + TimeInt::from(value), - VisibleHistoryBoundary::Infinite => TimeInt::MIN, - } - } - - pub fn to(&self, cursor: TimeInt) -> TimeInt { - match self.to { - VisibleHistoryBoundary::Absolute(value) => TimeInt::from(value), - VisibleHistoryBoundary::RelativeToTimeCursor(value) => cursor + TimeInt::from(value), - VisibleHistoryBoundary::Infinite => TimeInt::MAX, - } - } -} - -/// When showing an entity in the history view, add this much history to it. -#[derive(Clone, Copy, Default, Debug, PartialEq, Eq)] -#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] -#[cfg_attr(feature = "serde", serde(default))] -pub struct ExtraQueryHistory { - /// Is the feature enabled? - pub enabled: bool, - - /// Visible history settings for time timelines - pub nanos: VisibleHistory, - - /// Visible history settings for frame timelines - pub sequences: VisibleHistory, -} - -impl ExtraQueryHistory { - /// Multiply/and these together. - #[allow(dead_code)] - fn with_child(&self, child: &Self) -> Self { - if child.enabled { - *child - } else if self.enabled { - *self - } else { - Self::default() - } - } -} - -// ---------------------------------------------------------------------------- - #[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] pub enum Colormap { diff --git a/crates/re_entity_db/src/lib.rs b/crates/re_entity_db/src/lib.rs index a33fff106f79..6410617d9e4f 100644 --- a/crates/re_entity_db/src/lib.rs +++ b/crates/re_entity_db/src/lib.rs @@ -30,6 +30,8 @@ pub(crate) use self::entity_tree::{ClearCascade, CompactedStoreEvents}; use re_log_types::DataTableError; pub use re_log_types::{EntityPath, EntityPathPart, TimeInt, Timeline}; +pub use re_query::{ExtraQueryHistory, VisibleHistory, VisibleHistoryBoundary}; + #[cfg(feature = "serde")] pub use blueprint::components::EntityPropertiesComponent; #[cfg(feature = "serde")] diff --git a/crates/re_query/Cargo.toml b/crates/re_query/Cargo.toml index deb3978bf07e..204e6cdd1308 100644 --- a/crates/re_query/Cargo.toml +++ b/crates/re_query/Cargo.toml @@ -19,10 +19,13 @@ all-features = true [features] default = [] +## Enable (de)serialization using serde. +serde = ["dep:serde", "dep:rmp-serde"] + + [dependencies] # Rerun dependencies: re_data_store.workspace = true -re_entity_db.workspace = true re_format = { workspace = true, features = ["arrow"] } re_log_types.workspace = true re_types_core.workspace = true @@ -34,6 +37,8 @@ arrow2.workspace = true backtrace.workspace = true document-features.workspace = true itertools = { workspace = true } +rmp-serde = { workspace = true, optional = true } +serde = { workspace = true, features = ["derive", "rc"], optional = true } smallvec.workspace = true thiserror.workspace = true diff --git a/crates/re_query/src/lib.rs b/crates/re_query/src/lib.rs index 91345299df53..bda0976587b8 100644 --- a/crates/re_query/src/lib.rs +++ b/crates/re_query/src/lib.rs @@ -8,7 +8,9 @@ mod util; pub use self::archetype_view::{ArchetypeView, ComponentWithInstances}; pub use self::query::{get_component_with_instances, query_archetype}; pub use self::range::range_archetype; -pub use self::util::query_archetype_with_history; +pub use self::util::{ + query_archetype_with_history, ExtraQueryHistory, VisibleHistory, VisibleHistoryBoundary, +}; // Used for doc-tests #[doc(hidden)] diff --git a/crates/re_query/src/util.rs b/crates/re_query/src/util.rs index 2487f0ff5693..7ee9657b5c3d 100644 --- a/crates/re_query/src/util.rs +++ b/crates/re_query/src/util.rs @@ -1,10 +1,109 @@ use re_data_store::{DataStore, LatestAtQuery, RangeQuery, TimeInt, TimeRange, Timeline}; -use re_entity_db::ExtraQueryHistory; use re_log_types::EntityPath; use re_types_core::Archetype; use crate::{query_archetype, range::range_archetype, ArchetypeView}; +// --- + +/// One of the boundaries of the visible history. +/// +/// For [`VisibleHistoryBoundary::RelativeToTimeCursor`] and [`VisibleHistoryBoundary::Absolute`], +/// the value are either nanos or frames, depending on the type of timeline. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] +pub enum VisibleHistoryBoundary { + /// Boundary is a value relative to the time cursor + RelativeToTimeCursor(i64), + + /// Boundary is an absolute value + Absolute(i64), + + /// The boundary extends to infinity. + Infinite, +} + +impl VisibleHistoryBoundary { + /// Value when the boundary is set to the current time cursor. + pub const AT_CURSOR: Self = Self::RelativeToTimeCursor(0); +} + +impl Default for VisibleHistoryBoundary { + fn default() -> Self { + Self::AT_CURSOR + } +} + +/// Visible history bounds. +#[derive(Clone, Copy, Default, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] +pub struct VisibleHistory { + /// Low time boundary. + pub from: VisibleHistoryBoundary, + + /// High time boundary. + pub to: VisibleHistoryBoundary, +} + +impl VisibleHistory { + /// Value with the visible history feature is disabled. + pub const OFF: Self = Self { + from: VisibleHistoryBoundary::AT_CURSOR, + to: VisibleHistoryBoundary::AT_CURSOR, + }; + + pub const ALL: Self = Self { + from: VisibleHistoryBoundary::Infinite, + to: VisibleHistoryBoundary::Infinite, + }; + + pub fn from(&self, cursor: TimeInt) -> TimeInt { + match self.from { + VisibleHistoryBoundary::Absolute(value) => TimeInt::from(value), + VisibleHistoryBoundary::RelativeToTimeCursor(value) => cursor + TimeInt::from(value), + VisibleHistoryBoundary::Infinite => TimeInt::MIN, + } + } + + pub fn to(&self, cursor: TimeInt) -> TimeInt { + match self.to { + VisibleHistoryBoundary::Absolute(value) => TimeInt::from(value), + VisibleHistoryBoundary::RelativeToTimeCursor(value) => cursor + TimeInt::from(value), + VisibleHistoryBoundary::Infinite => TimeInt::MAX, + } + } +} + +/// When showing an entity in the history view, add this much history to it. +#[derive(Clone, Copy, Default, Debug, PartialEq, Eq)] +#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] +#[cfg_attr(feature = "serde", serde(default))] +pub struct ExtraQueryHistory { + /// Is the feature enabled? + pub enabled: bool, + + /// Visible history settings for time timelines + pub nanos: VisibleHistory, + + /// Visible history settings for frame timelines + pub sequences: VisibleHistory, +} + +impl ExtraQueryHistory { + /// Multiply/and these together. + pub fn with_child(&self, child: &Self) -> Self { + if child.enabled { + *child + } else if self.enabled { + *self + } else { + Self::default() + } + } +} + +// --- + pub fn query_archetype_with_history<'a, A: Archetype + 'a, const N: usize>( store: &'a DataStore, timeline: &'a Timeline, diff --git a/crates/re_query_cache/Cargo.toml b/crates/re_query_cache/Cargo.toml index a8460f550907..3c5a38eb6b48 100644 --- a/crates/re_query_cache/Cargo.toml +++ b/crates/re_query_cache/Cargo.toml @@ -22,7 +22,6 @@ default = [] [dependencies] # Rerun dependencies: re_data_store.workspace = true -re_entity_db.workspace = true re_format.workspace = true re_log.workspace = true re_log_types.workspace = true diff --git a/crates/re_query_cache/benches/latest_at.rs b/crates/re_query_cache/benches/latest_at.rs index a856988a3185..dfd7f3e8827f 100644 --- a/crates/re_query_cache/benches/latest_at.rs +++ b/crates/re_query_cache/benches/latest_at.rs @@ -4,9 +4,9 @@ use criterion::{criterion_group, criterion_main, Criterion}; use itertools::Itertools; -use re_data_store::{DataStore, LatestAtQuery}; +use re_data_store::{DataStore, LatestAtQuery, StoreSubscriber}; use re_log_types::{entity_path, DataRow, EntityPath, RowId, TimeInt, TimeType, Timeline}; -use re_query_cache::query_archetype_pov1_comp1; +use re_query_cache::Caches; use re_types::{ archetypes::Points2D, components::{Color, InstanceKey, Position2D, Text}, @@ -73,9 +73,9 @@ fn mono_points(c: &mut Criterion) { { let mut group = c.benchmark_group("arrow_mono_points2"); group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let store = insert_rows(msgs.iter()); + let (caches, store) = insert_rows(msgs.iter()); group.bench_function("query", |b| { - b.iter(|| query_and_visit_points(&store, &paths)); + b.iter(|| query_and_visit_points(&caches, &store, &paths)); }); } } @@ -101,9 +101,9 @@ fn mono_strings(c: &mut Criterion) { { let mut group = c.benchmark_group("arrow_mono_strings2"); group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let store = insert_rows(msgs.iter()); + let (caches, store) = insert_rows(msgs.iter()); group.bench_function("query", |b| { - b.iter(|| query_and_visit_strings(&store, &paths)); + b.iter(|| query_and_visit_strings(&caches, &store, &paths)); }); } } @@ -126,9 +126,9 @@ fn batch_points(c: &mut Criterion) { { let mut group = c.benchmark_group("arrow_batch_points2"); group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let store = insert_rows(msgs.iter()); + let (caches, store) = insert_rows(msgs.iter()); group.bench_function("query", |b| { - b.iter(|| query_and_visit_points(&store, &paths)); + b.iter(|| query_and_visit_points(&caches, &store, &paths)); }); } } @@ -151,9 +151,9 @@ fn batch_strings(c: &mut Criterion) { { let mut group = c.benchmark_group("arrow_batch_strings2"); group.throughput(criterion::Throughput::Elements(NUM_POINTS as _)); - let store = insert_rows(msgs.iter()); + let (caches, store) = insert_rows(msgs.iter()); group.bench_function("query", |b| { - b.iter(|| query_and_visit_strings(&store, &paths)); + b.iter(|| query_and_visit_strings(&caches, &store, &paths)); }); } } @@ -253,16 +253,19 @@ fn build_strings_rows(paths: &[EntityPath], num_strings: usize) -> Vec .collect() } -fn insert_rows<'a>(msgs: impl Iterator) -> DataStore { +fn insert_rows<'a>(msgs: impl Iterator) -> (Caches, DataStore) { let mut store = DataStore::new( re_log_types::StoreId::random(re_log_types::StoreKind::Recording), InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); + msgs.for_each(|row| { - store.insert_row(row).unwrap(); + caches.on_events(&[store.insert_row(row).unwrap()]); }); - store + + (caches, store) } struct SavePoint { @@ -270,7 +273,11 @@ struct SavePoint { _color: Option, } -fn query_and_visit_points(store: &DataStore, paths: &[EntityPath]) -> Vec { +fn query_and_visit_points( + caches: &Caches, + store: &DataStore, + paths: &[EntityPath], +) -> Vec { let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); let query = LatestAtQuery::new(timeline_frame_nr, (NUM_FRAMES_POINTS as i64 / 2).into()); @@ -278,21 +285,22 @@ fn query_and_visit_points(store: &DataStore, paths: &[EntityPath]) -> Vec( - true, // cached? - store, - &query.clone().into(), - path, - |(_, _, positions, colors)| { - itertools::izip!(positions.iter(), colors.iter()).for_each(|(pos, color)| { - points.push(SavePoint { - _pos: *pos, - _color: *color, + caches + .query_archetype_pov1_comp1::( + true, // cached? + store, + &query.clone().into(), + path, + |(_, _, positions, colors)| { + itertools::izip!(positions.iter(), colors.iter()).for_each(|(pos, color)| { + points.push(SavePoint { + _pos: *pos, + _color: *color, + }); }); - }); - }, - ) - .unwrap(); + }, + ) + .unwrap(); } assert_eq!(NUM_POINTS as usize, points.len()); points @@ -302,27 +310,32 @@ struct SaveString { _label: Option, } -fn query_and_visit_strings(store: &DataStore, paths: &[EntityPath]) -> Vec { +fn query_and_visit_strings( + caches: &Caches, + store: &DataStore, + paths: &[EntityPath], +) -> Vec { let timeline_frame_nr = Timeline::new("frame_nr", TimeType::Sequence); let query = LatestAtQuery::new(timeline_frame_nr, (NUM_FRAMES_STRINGS as i64 / 2).into()); let mut strings = Vec::with_capacity(NUM_STRINGS as _); for path in paths { - query_archetype_pov1_comp1::( - true, // cached? - store, - &query.clone().into(), - path, - |(_, _, _, labels)| { - for label in labels.iter() { - strings.push(SaveString { - _label: label.clone(), - }); - } - }, - ) - .unwrap(); + caches + .query_archetype_pov1_comp1::( + true, // cached? + store, + &query.clone().into(), + path, + |(_, _, _, labels)| { + for label in labels.iter() { + strings.push(SaveString { + _label: label.clone(), + }); + } + }, + ) + .unwrap(); } assert_eq!(NUM_STRINGS as usize, strings.len()); diff --git a/crates/re_query_cache/src/cache.rs b/crates/re_query_cache/src/cache.rs index d3177ab53a4f..dd19c620c4fb 100644 --- a/crates/re_query_cache/src/cache.rs +++ b/crates/re_query_cache/src/cache.rs @@ -5,14 +5,11 @@ use std::{ }; use ahash::{HashMap, HashSet}; -use once_cell::sync::Lazy; use parking_lot::RwLock; use paste::paste; use seq_macro::seq; -use re_data_store::{ - LatestAtQuery, RangeQuery, StoreDiff, StoreEvent, StoreSubscriber, StoreSubscriberHandle, -}; +use re_data_store::{DataStore, LatestAtQuery, RangeQuery, StoreDiff, StoreEvent, StoreSubscriber}; use re_log_types::{EntityPath, RowId, StoreId, TimeInt, TimeRange, Timeline}; use re_query::ArchetypeView; use re_types_core::{ @@ -45,15 +42,34 @@ impl From for AnyQuery { // --- -/// All primary caches (all stores, all entities, everything). +/// Maintains the top-level cache mappings. // -// TODO(cmc): Centralize and harmonize all caches (query, jpeg, mesh). -static CACHES: Lazy = - Lazy::new(|| re_data_store::DataStore::register_subscriber(Box::::default())); +pub struct Caches { + /// The [`StoreId`] of the associated [`DataStore`]. + store_id: StoreId, -/// Maintains the top-level cache mappings. -#[derive(Default)] -pub struct Caches(pub(crate) RwLock>); + // NOTE: `Arc` so we can cheaply free the top-level lock early when needed. + per_cache_key: RwLock>>>, +} + +impl std::ops::Deref for Caches { + type Target = RwLock>>>; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.per_cache_key + } +} + +impl Caches { + #[inline] + pub fn new(store: &DataStore) -> Self { + Self { + store_id: store.id().clone(), + per_cache_key: Default::default(), + } + } +} #[derive(Default)] pub struct CachesPerArchetype { @@ -64,6 +80,8 @@ pub struct CachesPerArchetype { /// Different archetypes have different point-of-views, and therefore can end up with different /// results, even from the same raw data. // + // NOTE: `Arc` so we can cheaply free the archetype-level lock early when needed. + // // TODO(cmc): At some point we should probably just store the PoV and optional components rather // than an `ArchetypeName`: the query system doesn't care about archetypes. pub(crate) latest_at_per_archetype: RwLock>>>, @@ -75,6 +93,8 @@ pub struct CachesPerArchetype { /// Different archetypes have different point-of-views, and therefore can end up with different /// results, even from the same raw data. // + // NOTE: `Arc` so we can cheaply free the archetype-level lock early when needed. + // // TODO(cmc): At some point we should probably just store the PoV and optional components rather // than an `ArchetypeName`: the query system doesn't care about archetypes. pub(crate) range_per_archetype: RwLock>>>, @@ -103,17 +123,16 @@ impl Caches { // // TODO(#4731): expose palette command. #[inline] - pub fn clear() { - re_data_store::DataStore::with_subscriber_once(*CACHES, |caches: &Caches| { - caches.0.write().clear(); - }); + pub fn clear(&self) { + self.write().clear(); } /// Gives write access to the appropriate `LatestAtCache` according to the specified /// query parameters. #[inline] pub fn with_latest_at( - store_id: StoreId, + &self, + store: &DataStore, entity_path: EntityPath, query: &LatestAtQuery, mut f: F, @@ -122,34 +141,37 @@ impl Caches { A: Archetype, F: FnMut(&mut LatestAtCache) -> R, { - let key = CacheKey::new(store_id, entity_path, query.timeline); - - let cache = - re_data_store::DataStore::with_subscriber_once(*CACHES, move |caches: &Caches| { - let mut caches = caches.0.write(); - - let caches_per_archetype = caches.entry(key.clone()).or_default(); - - let removed_bytes = caches_per_archetype.handle_pending_invalidation(); - if removed_bytes > 0 { - re_log::trace!( - store_id = %key.store_id, - entity_path = %key.entity_path, - removed = removed_bytes, - "invalidated latest-at caches" - ); - } - - let mut latest_at_per_archetype = - caches_per_archetype.latest_at_per_archetype.write(); - let latest_at_cache = latest_at_per_archetype.entry(A::name()).or_default(); - - Arc::clone(latest_at_cache) + assert!( + self.store_id == *store.id(), + "attempted to use a query cache {} with the wrong datastore ({})", + self.store_id, + store.id(), + ); + + let key = CacheKey::new(entity_path, query.timeline); + + let cache = { + let caches_per_archetype = Arc::clone(self.write().entry(key.clone()).or_default()); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + let removed_bytes = caches_per_archetype.write().handle_pending_invalidation(); + // Implicitly releasing archetype-level cache mappings -- concurrent queries using the + // same `CacheKey` but a different `ArchetypeName` can run once again. + if removed_bytes > 0 { + re_log::trace!( + store_id=%self.store_id, + entity_path = %key.entity_path, + removed = removed_bytes, + "invalidated latest-at caches" + ); + } - // Implicitly releasing all intermediary locks. - }) - // NOTE: downcasting cannot fail, this is our own private handle. - .unwrap(); + let caches_per_archetype = caches_per_archetype.read(); + let mut latest_at_per_archetype = caches_per_archetype.latest_at_per_archetype.write(); + Arc::clone(latest_at_per_archetype.entry(A::name()).or_default()) + // Implicitly releasing bottom-level cache mappings -- identical concurrent queries + // can run once again. + }; let mut cache = cache.write(); f(&mut cache) @@ -159,7 +181,8 @@ impl Caches { /// query parameters. #[inline] pub fn with_range( - store_id: StoreId, + &self, + store: &DataStore, entity_path: EntityPath, query: &RangeQuery, mut f: F, @@ -168,51 +191,46 @@ impl Caches { A: Archetype, F: FnMut(&mut RangeCache) -> R, { - let key = CacheKey::new(store_id, entity_path, query.timeline); - - let cache = - re_data_store::DataStore::with_subscriber_once(*CACHES, move |caches: &Caches| { - let mut caches = caches.0.write(); - - let caches_per_archetype = caches.entry(key.clone()).or_default(); - - let removed_bytes = caches_per_archetype.handle_pending_invalidation(); - if removed_bytes > 0 { - re_log::trace!( - store_id = %key.store_id, - entity_path = %key.entity_path, - removed = removed_bytes, - "invalidated range caches" - ); - } - - let mut range_per_archetype = caches_per_archetype.range_per_archetype.write(); - let range_cache = range_per_archetype.entry(A::name()).or_default(); - - Arc::clone(range_cache) + assert!( + self.store_id == *store.id(), + "attempted to use a query cache {} with the wrong datastore ({})", + self.store_id, + store.id(), + ); + + let key = CacheKey::new(entity_path, query.timeline); + + let cache = { + let caches_per_archetype = Arc::clone(self.write().entry(key.clone()).or_default()); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + let removed_bytes = caches_per_archetype.write().handle_pending_invalidation(); + // Implicitly releasing archetype-level cache mappings -- concurrent queries using the + // same `CacheKey` but a different `ArchetypeName` can run once again. + if removed_bytes > 0 { + re_log::trace!( + store_id=%self.store_id, + entity_path = %key.entity_path, + removed = removed_bytes, + "invalidated latest-at caches" + ); + } - // Implicitly releasing all intermediary locks. - }) - // NOTE: downcasting cannot fail, this is our own private handle. - .unwrap(); + let caches_per_archetype = caches_per_archetype.read(); + let mut range_per_archetype = caches_per_archetype.range_per_archetype.write(); + Arc::clone(range_per_archetype.entry(A::name()).or_default()) + // Implicitly releasing bottom-level cache mappings -- identical concurrent queries + // can run once again. + }; let mut cache = cache.write(); f(&mut cache) } - - #[inline] - pub(crate) fn with R, R>(f: F) -> R { - // NOTE: downcasting cannot fail, this is our own private handle. - re_data_store::DataStore::with_subscriber(*CACHES, f).unwrap() - } } /// Uniquely identifies cached query results in the [`Caches`]. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct CacheKey { - /// Which [`re_data_store::DataStore`] is the query targeting? - pub store_id: StoreId, - /// Which [`EntityPath`] is the query targeting? pub entity_path: EntityPath, @@ -222,13 +240,8 @@ pub struct CacheKey { impl CacheKey { #[inline] - pub fn new( - store_id: impl Into, - entity_path: impl Into, - timeline: impl Into, - ) -> Self { + pub fn new(entity_path: impl Into, timeline: impl Into) -> Self { Self { - store_id: store_id.into(), entity_path: entity_path.into(), timeline: timeline.into(), } @@ -253,7 +266,6 @@ impl StoreSubscriber for Caches { self } - // TODO(cmc): support dropped recordings. fn on_events(&mut self, events: &[StoreEvent]) { re_tracing::profile_function!(format!("num_events={}", events.len())); @@ -265,6 +277,13 @@ impl StoreSubscriber for Caches { diff, } = event; + assert!( + self.store_id == *store_id, + "attempted to use a query cache {} with the wrong datastore ({})", + self.store_id, + store_id, + ); + let StoreDiff { kind: _, // Don't care: both additions and deletions invalidate query results. row_id: _, @@ -275,7 +294,7 @@ impl StoreSubscriber for Caches { #[derive(Default, Debug)] struct CompactedEvents { - timeless: HashSet<(StoreId, EntityPath)>, + timeless: HashSet, timeful: HashMap, } @@ -284,18 +303,21 @@ impl StoreSubscriber for Caches { re_tracing::profile_scope!("compact events"); if times.is_empty() { - compacted - .timeless - .insert((store_id.clone(), entity_path.clone())); + compacted.timeless.insert(entity_path.clone()); } for &(timeline, time) in times { - let key = CacheKey::new(store_id.clone(), entity_path.clone(), timeline); + let key = CacheKey::new(entity_path.clone(), timeline); let min_time = compacted.timeful.entry(key).or_insert(TimeInt::MAX); *min_time = TimeInt::min(*min_time, time); } } + let caches = self.write(); + // NOTE: Don't release the top-level lock -- even though this cannot happen yet with + // our current macro-architecture, we want to prevent queries from concurrently + // running while we're updating the invalidation flags. + // TODO(cmc): This is horribly stupid and slow and can easily be made faster by adding // yet another layer of caching indirection. // But since this pretty much never happens in practice, let's not go there until we @@ -303,10 +325,10 @@ impl StoreSubscriber for Caches { { re_tracing::profile_scope!("timeless"); - for (store_id, entity_path) in compacted.timeless { - for (key, caches_per_archetype) in self.0.write().iter_mut() { - if key.store_id == store_id && key.entity_path == entity_path { - caches_per_archetype.pending_timeless_invalidation = true; + for entity_path in compacted.timeless { + for (key, caches_per_archetype) in caches.iter() { + if key.entity_path == entity_path { + caches_per_archetype.write().pending_timeless_invalidation = true; } } } @@ -316,7 +338,12 @@ impl StoreSubscriber for Caches { re_tracing::profile_scope!("timeful"); for (key, time) in compacted.timeful { - if let Some(caches_per_archetype) = self.0.write().get_mut(&key) { + if let Some(caches_per_archetype) = caches.get(&key) { + // NOTE: Do _NOT_ lock from within the if clause itself or the guard will live + // for the remainder of the if statement and hell will ensue. + // is + // supposed to catch but it didn't, I don't know why. + let mut caches_per_archetype = caches_per_archetype.write(); if let Some(min_time) = caches_per_archetype.pending_timeful_invalidation.as_mut() { diff --git a/crates/re_query_cache/src/cache_stats.rs b/crates/re_query_cache/src/cache_stats.rs index c445af184e64..5378cea41b4b 100644 --- a/crates/re_query_cache/src/cache_stats.rs +++ b/crates/re_query_cache/src/cache_stats.rs @@ -68,7 +68,7 @@ impl Caches { /// Computes the stats for all primary caches. /// /// `per_component` toggles per-component stats. - pub fn stats(detailed_stats: bool) -> CachesStats { + pub fn stats(&self, detailed_stats: bool) -> CachesStats { re_tracing::profile_function!(); fn upsert_bucket_stats( @@ -83,96 +83,97 @@ impl Caches { } } - Self::with(|caches| { - let latest_at = caches - .0 - .read() - .iter() - .map(|(key, caches_per_arch)| { - (key.entity_path.clone(), { - let mut total_size_bytes = 0u64; - let mut total_rows = 0u64; - let mut per_component = detailed_stats.then(BTreeMap::default); - - for latest_at_cache in - caches_per_arch.latest_at_per_archetype.read().values() - { - let latest_at_cache @ LatestAtCache { - per_query_time: _, + let caches = self.read().clone(); + // Implicitly releasing top-level cache mappings -- concurrent queries can run once again. + + let latest_at = caches + .iter() + .map(|(key, caches_per_arch)| { + (key.entity_path.clone(), { + let mut total_size_bytes = 0u64; + let mut total_rows = 0u64; + let mut per_component = detailed_stats.then(BTreeMap::default); + + for latest_at_cache in caches_per_arch + .read() + .latest_at_per_archetype + .read() + .values() + { + let latest_at_cache @ LatestAtCache { + per_query_time: _, + per_data_time, + timeless, + .. + } = &*latest_at_cache.read(); + + total_size_bytes += latest_at_cache.total_size_bytes(); + total_rows = per_data_time.len() as u64 + timeless.is_some() as u64; + + if let Some(per_component) = per_component.as_mut() { + re_tracing::profile_scope!("detailed"); + + if let Some(bucket) = &timeless { + upsert_bucket_stats(per_component, bucket); + } + + for bucket in per_data_time.values() { + upsert_bucket_stats(per_component, bucket); + } + } + } + + CachedEntityStats { + total_size_bytes, + total_rows, + + per_component, + } + }) + }) + .collect(); + + let range = caches + .iter() + .map(|(key, caches_per_arch)| { + (key.entity_path.clone(), { + caches_per_arch + .read() + .range_per_archetype + .read() + .values() + .map(|range_cache| { + let range_cache @ RangeCache { per_data_time, timeless, - .. - } = &*latest_at_cache.read(); + } = &*range_cache.read(); - total_size_bytes += latest_at_cache.total_size_bytes(); - total_rows = per_data_time.len() as u64 + timeless.is_some() as u64; + let total_rows = per_data_time.data_times.len() as u64; + let mut per_component = detailed_stats.then(BTreeMap::default); if let Some(per_component) = per_component.as_mut() { re_tracing::profile_scope!("detailed"); - if let Some(bucket) = &timeless { - upsert_bucket_stats(per_component, bucket); - } - - for bucket in per_data_time.values() { - upsert_bucket_stats(per_component, &bucket.read()); - } + upsert_bucket_stats(per_component, timeless); + upsert_bucket_stats(per_component, per_data_time); } - } - - CachedEntityStats { - total_size_bytes, - total_rows, - per_component, - } - }) - }) - .collect(); - - let range = caches - .0 - .read() - .iter() - .map(|(key, caches_per_arch)| { - (key.entity_path.clone(), { - caches_per_arch - .range_per_archetype - .read() - .values() - .map(|range_cache| { - let range_cache @ RangeCache { - per_data_time, - timeless, - } = &*range_cache.read(); - - let total_rows = per_data_time.data_times.len() as u64; - - let mut per_component = detailed_stats.then(BTreeMap::default); - if let Some(per_component) = per_component.as_mut() { - re_tracing::profile_scope!("detailed"); - - upsert_bucket_stats(per_component, timeless); - upsert_bucket_stats(per_component, per_data_time); - } - - ( - key.timeline, - per_data_time.time_range().unwrap_or(TimeRange::EMPTY), - CachedEntityStats { - total_size_bytes: range_cache.total_size_bytes(), - total_rows, - - per_component, - }, - ) - }) - .collect() - }) + ( + key.timeline, + per_data_time.time_range().unwrap_or(TimeRange::EMPTY), + CachedEntityStats { + total_size_bytes: range_cache.total_size_bytes(), + total_rows, + + per_component, + }, + ) + }) + .collect() }) - .collect(); + }) + .collect(); - CachesStats { latest_at, range } - }) + CachesStats { latest_at, range } } } diff --git a/crates/re_query_cache/src/latest_at.rs b/crates/re_query_cache/src/latest_at.rs index efcbfe737146..5cce908a1ea4 100644 --- a/crates/re_query_cache/src/latest_at.rs +++ b/crates/re_query_cache/src/latest_at.rs @@ -1,6 +1,5 @@ use std::{collections::BTreeMap, sync::Arc}; -use parking_lot::RwLock; use paste::paste; use seq_macro::seq; @@ -20,13 +19,17 @@ pub struct LatestAtCache { /// /// If the data you're looking for isn't in here, try partially running the query and check /// if there is any data available for the resulting _data_ time in [`Self::per_data_time`]. - pub per_query_time: BTreeMap>>, + // + // NOTE: `Arc` so we can deduplicate buckets across query time & data time. + pub per_query_time: BTreeMap>, /// Organized by _data_ time. /// /// Due to how our latest-at semantics work, any number of queries at time `T+n` where `n >= 0` /// can result in a data time of `T`. - pub per_data_time: BTreeMap>>, + // + // NOTE: `Arc` so we can deduplicate buckets across query time & data time. + pub per_data_time: BTreeMap>, /// Dedicated bucket for timeless data, if any. /// @@ -78,7 +81,7 @@ impl LatestAtCache { // Only if that bucket is about to be dropped. if Arc::strong_count(bucket) == 1 { - removed_bytes += bucket.read().total_size_bytes; + removed_bytes += bucket.total_size_bytes; } false @@ -107,6 +110,7 @@ macro_rules! impl_query_archetype_latest_at { #[doc = "(combined) for `" $N "` point-of-view components and `" $M "` optional components."] #[allow(non_snake_case)] pub fn []<'a, A, $($pov,)+ $($comp,)* F>( + &self, store: &'a DataStore, query: &LatestAtQuery, entity_path: &'a EntityPath, @@ -151,19 +155,18 @@ macro_rules! impl_query_archetype_latest_at { Ok(()) }; - let upsert_results = | + let create_and_fill_bucket = | data_time: TimeInt, arch_view: &::re_query::ArchetypeView, - bucket: &mut crate::CacheBucket, - | -> crate::Result { + | -> crate::Result< crate::CacheBucket> { re_log::trace!(data_time=?data_time, ?data_time, "fill"); // Grabbing the current time is quite costly on web. #[cfg(not(target_arch = "wasm32"))] let now = web_time::Instant::now(); - let mut added_size_bytes = 0u64; - added_size_bytes += bucket.[]::(data_time, &arch_view)?; + let mut bucket = crate::CacheBucket::default(); + bucket.[]::(data_time, &arch_view)?; #[cfg(not(target_arch = "wasm32"))] { @@ -172,13 +175,13 @@ macro_rules! impl_query_archetype_latest_at { store_id=%store.id(), %entity_path, archetype=%A::name(), - added_size_bytes, + added_size_bytes=bucket.total_size_bytes, "cached new entry in {elapsed:?} ({:0.3} entries/s)", 1f64 / elapsed.as_secs_f64() ); } - Ok(added_size_bytes) + Ok(bucket) }; let mut latest_at_callback = |query: &LatestAtQuery, latest_at_cache: &mut crate::LatestAtCache| { @@ -187,13 +190,13 @@ macro_rules! impl_query_archetype_latest_at { let crate::LatestAtCache { per_query_time, per_data_time, timeless, total_size_bytes } = latest_at_cache; let query_time_bucket_at_query_time = match per_query_time.entry(query.at) { - std::collections::btree_map::Entry::Occupied(query_time_bucket_at_query_time) => { + std::collections::btree_map::Entry::Occupied(mut query_time_bucket_at_query_time) => { // Fastest path: we have an entry for this exact query time, no need to look any // further. re_log::trace!(query_time=?query.at, "cache hit (query time)"); - return iter_results(false, &query_time_bucket_at_query_time.get().read()); + return iter_results(false, query_time_bucket_at_query_time.get_mut()); } - entry => entry, + std::collections::btree_map::Entry::Vacant(entry) => entry, }; @@ -206,15 +209,17 @@ macro_rules! impl_query_archetype_latest_at { if let Some(data_time_bucket_at_data_time) = per_data_time.get(&data_time) { re_log::trace!(query_time=?query.at, ?data_time, "cache hit (data time)"); - *query_time_bucket_at_query_time.or_default() = std::sync::Arc::clone(&data_time_bucket_at_data_time); + query_time_bucket_at_query_time.insert(std::sync::Arc::clone(&data_time_bucket_at_data_time)); // We now know for a fact that a query at that data time would yield the same // results: copy the bucket accordingly so that the next cache hit for that query // time ends up taking the fastest path. let query_time_bucket_at_data_time = per_query_time.entry(data_time); - *query_time_bucket_at_data_time.or_default() = std::sync::Arc::clone(&data_time_bucket_at_data_time); + query_time_bucket_at_data_time + .and_modify(|v| *v = std::sync::Arc::clone(&data_time_bucket_at_data_time)) + .or_insert(std::sync::Arc::clone(&data_time_bucket_at_data_time)); - return iter_results(false, &data_time_bucket_at_data_time.read()); + return iter_results(false, &data_time_bucket_at_data_time); } } else { if let Some(timeless_bucket) = timeless.as_ref() { @@ -228,36 +233,33 @@ macro_rules! impl_query_archetype_latest_at { if let Some(data_time) = data_time { // Reminder: `None` means timeless. re_log::trace!(query_time=?query.at, ?data_time, "cache miss"); - // BEWARE: Do _not_ move this out of this scope, or a bucket would be created - // even when taking the timeless path! - let query_time_bucket_at_query_time = query_time_bucket_at_query_time.or_default(); - - { - let mut query_time_bucket_at_query_time = query_time_bucket_at_query_time.write(); - *total_size_bytes += upsert_results(data_time, &arch_view, &mut query_time_bucket_at_query_time)?; - } + let bucket = Arc::new(create_and_fill_bucket(data_time, &arch_view)?); + *total_size_bytes += bucket.total_size_bytes; + let query_time_bucket_at_query_time = query_time_bucket_at_query_time.insert(bucket); let data_time_bucket_at_data_time = per_data_time.entry(data_time); - *data_time_bucket_at_data_time.or_default() = std::sync::Arc::clone(&query_time_bucket_at_query_time); + data_time_bucket_at_data_time + .and_modify(|v| *v = std::sync::Arc::clone(&query_time_bucket_at_query_time)) + .or_insert(std::sync::Arc::clone(&query_time_bucket_at_query_time)); - iter_results(false, &query_time_bucket_at_query_time.read()) + iter_results(false, &query_time_bucket_at_query_time) } else { re_log::trace!(query_time=?query.at, "cache miss (timeless)"); - let mut timeless_bucket = crate::CacheBucket::default(); + let bucket = create_and_fill_bucket(TimeInt::MIN, &arch_view)?; + *total_size_bytes += bucket.total_size_bytes; - *total_size_bytes += upsert_results(TimeInt::MIN, &arch_view, &mut timeless_bucket)?; - iter_results(true, &timeless_bucket)?; + iter_results(true, &bucket)?; - *timeless = Some(timeless_bucket); + *timeless = Some(bucket); Ok(()) } }; - Caches::with_latest_at::( - store.id().clone(), + self.with_latest_at::( + store, entity_path.clone(), query, |latest_at_cache| latest_at_callback(query, latest_at_cache), @@ -274,6 +276,8 @@ macro_rules! impl_query_archetype_latest_at { }; } -seq!(NUM_COMP in 0..10 { - impl_query_archetype_latest_at!(for N=1, M=NUM_COMP); -}); +impl Caches { + seq!(NUM_COMP in 0..10 { + impl_query_archetype_latest_at!(for N=1, M=NUM_COMP); + }); +} diff --git a/crates/re_query_cache/src/lib.rs b/crates/re_query_cache/src/lib.rs index fc7f84439f86..706524d4f1ff 100644 --- a/crates/re_query_cache/src/lib.rs +++ b/crates/re_query_cache/src/lib.rs @@ -10,29 +10,11 @@ mod range; pub use self::cache::{AnyQuery, Caches}; pub use self::cache_stats::{CachedComponentStats, CachedEntityStats, CachesStats}; pub use self::flat_vec_deque::{ErasedFlatVecDeque, FlatVecDeque}; -pub use self::query::{ - query_archetype_pov1, query_archetype_with_history_pov1, MaybeCachedComponentData, -}; -seq_macro::seq!(NUM_COMP in 0..10 { paste::paste! { - pub use self::query::{#( - query_archetype_pov1_comp~NUM_COMP, - query_archetype_with_history_pov1_comp~NUM_COMP, - )*}; -}}); +pub use self::query::MaybeCachedComponentData; pub(crate) use self::cache::CacheBucket; pub(crate) use self::latest_at::LatestAtCache; -seq_macro::seq!(NUM_COMP in 0..10 { paste::paste! { - pub(crate) use self::latest_at::{#( - query_archetype_latest_at_pov1_comp~NUM_COMP, - )*}; -}}); pub(crate) use self::range::RangeCache; -seq_macro::seq!(NUM_COMP in 0..10 { paste::paste! { - pub(crate) use self::range::{#( - query_archetype_range_pov1_comp~NUM_COMP, - )*}; -}}); pub use re_query::{QueryError, Result}; // convenience diff --git a/crates/re_query_cache/src/query.rs b/crates/re_query_cache/src/query.rs index 13a0595d846b..2c1bc56cb018 100644 --- a/crates/re_query_cache/src/query.rs +++ b/crates/re_query_cache/src/query.rs @@ -2,11 +2,11 @@ use paste::paste; use seq_macro::seq; use re_data_store::{DataStore, LatestAtQuery, RangeQuery, TimeInt, TimeRange, Timeline}; -use re_entity_db::{ExtraQueryHistory, VisibleHistory}; use re_log_types::{EntityPath, RowId}; +use re_query::{ExtraQueryHistory, VisibleHistory}; use re_types_core::{components::InstanceKey, Archetype, Component}; -use crate::AnyQuery; +use crate::{AnyQuery, Caches}; // --- @@ -51,27 +51,30 @@ impl<'a, C> MaybeCachedComponentData<'a, C> { /// Cached implementation of [`re_query::query_archetype`] and [`re_query::range_archetype`] /// (combined) for 1 point-of-view component and no optional components. /// -/// Alias for [`query_archetype_pov1_comp0`]. -#[inline] -pub fn query_archetype_pov1<'a, A, R1, F>( - cached: bool, - store: &'a DataStore, - query: &AnyQuery, - entity_path: &'a EntityPath, - f: F, -) -> ::re_query::Result<()> -where - A: Archetype + 'a, - R1: Component + Send + Sync + 'static, - F: FnMut( - ( - (Option, RowId), - MaybeCachedComponentData<'_, InstanceKey>, - MaybeCachedComponentData<'_, R1>, +/// Alias for [`Self::query_archetype_pov1_comp0`]. +impl Caches { + #[inline] + pub fn query_archetype_pov1<'a, A, R1, F>( + &self, + cached: bool, + store: &'a DataStore, + query: &AnyQuery, + entity_path: &'a EntityPath, + f: F, + ) -> ::re_query::Result<()> + where + A: Archetype + 'a, + R1: Component + Send + Sync + 'static, + F: FnMut( + ( + (Option, RowId), + MaybeCachedComponentData<'_, InstanceKey>, + MaybeCachedComponentData<'_, R1>, + ), ), - ), -{ - query_archetype_pov1_comp0::(cached, store, query, entity_path, f) + { + self.query_archetype_pov1_comp0::(cached, store, query, entity_path, f) + } } macro_rules! impl_query_archetype { @@ -80,6 +83,7 @@ macro_rules! impl_query_archetype { #[doc = "(combined) for `" $N "` point-of-view components and `" $M "` optional components."] #[allow(non_snake_case)] pub fn []<'a, A, $($pov,)+ $($comp,)* F>( + &self, cached: bool, store: &'a DataStore, query: &AnyQuery, @@ -126,7 +130,7 @@ macro_rules! impl_query_archetype { AnyQuery::LatestAt(query) => { re_tracing::profile_scope!("latest_at", format!("{query:?}")); - crate::[]::( + self.[]::( store, query, entity_path, @@ -158,7 +162,7 @@ macro_rules! impl_query_archetype { AnyQuery::Range(query) => { re_tracing::profile_scope!("range", format!("{query:?}")); - crate::[]::( + self.[]::( store, query, entity_path, @@ -178,49 +182,54 @@ macro_rules! impl_query_archetype { }; } -seq!(NUM_COMP in 0..10 { - impl_query_archetype!(for N=1, M=NUM_COMP); -}); +impl Caches { + seq!(NUM_COMP in 0..10 { + impl_query_archetype!(for N=1, M=NUM_COMP); + }); +} // --- /// Cached implementation of [`re_query::query_archetype_with_history`] for 1 point-of-view component /// and no optional components. /// -/// Alias for [`query_archetype_with_history_pov1_comp0`]. -#[allow(clippy::too_many_arguments)] -#[inline] -pub fn query_archetype_with_history_pov1<'a, A, R1, F>( - cached_latest_at: bool, - cached_range: bool, - store: &'a DataStore, - timeline: &'a Timeline, - time: &'a TimeInt, - history: &ExtraQueryHistory, - ent_path: &'a EntityPath, - f: F, -) -> ::re_query::Result<()> -where - A: Archetype + 'a, - R1: Component + Send + Sync + 'static, - F: FnMut( - ( - (Option, RowId), - MaybeCachedComponentData<'_, InstanceKey>, - MaybeCachedComponentData<'_, R1>, +/// Alias for [`Self::query_archetype_with_history_pov1_comp0`]. +impl Caches { + #[allow(clippy::too_many_arguments)] + #[inline] + pub fn query_archetype_with_history_pov1<'a, A, R1, F>( + &self, + cached_latest_at: bool, + cached_range: bool, + store: &'a DataStore, + timeline: &'a Timeline, + time: &'a TimeInt, + history: &ExtraQueryHistory, + ent_path: &'a EntityPath, + f: F, + ) -> ::re_query::Result<()> + where + A: Archetype + 'a, + R1: Component + Send + Sync + 'static, + F: FnMut( + ( + (Option, RowId), + MaybeCachedComponentData<'_, InstanceKey>, + MaybeCachedComponentData<'_, R1>, + ), ), - ), -{ - query_archetype_with_history_pov1_comp0::( - cached_latest_at, - cached_range, - store, - timeline, - time, - history, - ent_path, - f, - ) + { + self.query_archetype_with_history_pov1_comp0::( + cached_latest_at, + cached_range, + store, + timeline, + time, + history, + ent_path, + f, + ) + } } /// Generates a function to cache a (potentially historical) query with N point-of-view components and M @@ -231,6 +240,7 @@ macro_rules! impl_query_archetype_with_history { #[doc = "components and `" $M "` optional components."] #[allow(clippy::too_many_arguments)] pub fn []<'a, A, $($pov,)+ $($comp,)* F>( + &self, cached_latest_at: bool, cached_range: bool, store: &'a DataStore, @@ -267,7 +277,7 @@ macro_rules! impl_query_archetype_with_history { ); let query = LatestAtQuery::new(*timeline, *time); - $crate::[]::( + self.[]::( cached_latest_at, store, &query.clone().into(), @@ -284,7 +294,7 @@ macro_rules! impl_query_archetype_with_history { let min_time = visible_history.from(*time); let max_time = visible_history.to(*time); let query = RangeQuery::new(*timeline, TimeRange::new(min_time, max_time)); - $crate::[]::( + self.[]::( cached_range, store, &query.clone().into(), @@ -304,6 +314,8 @@ macro_rules! impl_query_archetype_with_history { }; } -seq!(NUM_COMP in 0..10 { - impl_query_archetype_with_history!(for N=1, M=NUM_COMP); -}); +impl Caches { + seq!(NUM_COMP in 0..10 { + impl_query_archetype_with_history!(for N=1, M=NUM_COMP); + }); +} diff --git a/crates/re_query_cache/src/range.rs b/crates/re_query_cache/src/range.rs index 5754d2dd0494..1e99ce567ab3 100644 --- a/crates/re_query_cache/src/range.rs +++ b/crates/re_query_cache/src/range.rs @@ -121,6 +121,7 @@ macro_rules! impl_query_archetype_range { #[doc = "(combined) for `" $N "` point-of-view components and `" $M "` optional components."] #[allow(non_snake_case)] pub fn []<'a, A, $($pov,)+ $($comp,)* F>( + &self, store: &'a DataStore, query: &RangeQuery, entity_path: &'a EntityPath, @@ -246,8 +247,8 @@ macro_rules! impl_query_archetype_range { }; - Caches::with_range::( - store.id().clone(), + self.with_range::( + store, entity_path.clone(), query, |range_cache| range_callback(query, range_cache), @@ -264,6 +265,8 @@ macro_rules! impl_query_archetype_range { }; } -seq!(NUM_COMP in 0..10 { - impl_query_archetype_range!(for N=1, M=NUM_COMP); -}); +impl Caches { + seq!(NUM_COMP in 0..10 { + impl_query_archetype_range!(for N=1, M=NUM_COMP); + }); +} diff --git a/crates/re_query_cache/tests/latest_at.rs b/crates/re_query_cache/tests/latest_at.rs index 296af2c1eeaa..e98297065b6a 100644 --- a/crates/re_query_cache/tests/latest_at.rs +++ b/crates/re_query_cache/tests/latest_at.rs @@ -4,13 +4,13 @@ use itertools::Itertools as _; -use re_data_store::{DataStore, LatestAtQuery}; +use re_data_store::{DataStore, LatestAtQuery, StoreSubscriber}; use re_log_types::{ build_frame_nr, example_components::{MyColor, MyPoint, MyPoints}, DataRow, EntityPath, RowId, TimePoint, }; -use re_query_cache::query_archetype_pov1_comp1; +use re_query_cache::Caches; use re_types_core::{components::InstanceKey, Loggable as _}; // --- @@ -22,6 +22,7 @@ fn simple_query() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "point"; let timepoint = [build_frame_nr(123.into())]; @@ -29,7 +30,7 @@ fn simple_query() { // Create some positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timepoint, 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance let color_instances = vec![InstanceKey(1)]; @@ -42,10 +43,10 @@ fn simple_query() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } #[test] @@ -55,6 +56,7 @@ fn timeless_query() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "point"; let timepoint = [build_frame_nr(123.into())]; @@ -62,17 +64,17 @@ fn timeless_query() { // Create some positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timepoint, 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance.. timelessly! let color_instances = vec![InstanceKey(1)]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; let row = DataRow::from_cells2_sized(RowId::new(), ent_path, [], 1, (color_instances, colors)) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } #[test] @@ -82,6 +84,7 @@ fn no_instance_join_query() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "point"; let timepoint = [build_frame_nr(123.into())]; @@ -89,15 +92,15 @@ fn no_instance_join_query() { // Create some positions with an implicit instance let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timepoint, 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign them colors with explicit instances let colors = vec![MyColor::from_rgb(255, 0, 0), MyColor::from_rgb(0, 255, 0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timepoint, 2, colors).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } #[test] @@ -107,6 +110,7 @@ fn missing_column_join_query() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "point"; let timepoint = [build_frame_nr(123.into())]; @@ -114,10 +118,10 @@ fn missing_column_join_query() { // Create some positions with an implicit instance let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timepoint, 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } #[test] @@ -127,6 +131,7 @@ fn splatted_query() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "point"; let timepoint = [build_frame_nr(123.into())]; @@ -134,7 +139,7 @@ fn splatted_query() { // Create some positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timepoint, 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign all of them a color via splat let color_instances = vec![InstanceKey::SPLAT]; @@ -147,10 +152,10 @@ fn splatted_query() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(timepoint[0].0, timepoint[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } #[test] @@ -166,6 +171,7 @@ fn invalidation() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); // Create some positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; @@ -177,7 +183,7 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance let color_instances = vec![InstanceKey(1)]; @@ -190,9 +196,9 @@ fn invalidation() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // --- Modify present --- @@ -206,18 +212,18 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // Modify the optional component let colors = vec![MyColor::from_rgb(4, 5, 6), MyColor::from_rgb(7, 8, 9)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, present_data_timepoint, 2, colors) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // --- Modify past --- @@ -231,18 +237,18 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // Modify the optional component let colors = vec![MyColor::from_rgb(10, 11, 12), MyColor::from_rgb(13, 14, 15)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, past_data_timepoint, 2, colors) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // --- Modify future --- @@ -256,18 +262,18 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // Modify the optional component let colors = vec![MyColor::from_rgb(16, 17, 18)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, future_data_timepoint, 1, colors) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); }; let timeless = TimePoint::timeless(); @@ -328,6 +334,7 @@ fn invalidation_of_future_optionals() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "points"; @@ -339,40 +346,40 @@ fn invalidation_of_future_optionals() { let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timeless, 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; let row = DataRow::from_cells2_sized(RowId::new(), ent_path, frame2, 1, (color_instances, colors)) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 0, 255)]; let row = DataRow::from_cells2_sized(RowId::new(), ent_path, frame3, 1, (color_instances, colors)) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 255, 0)]; let row = DataRow::from_cells2_sized(RowId::new(), ent_path, frame3, 1, (color_instances, colors)) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } #[test] @@ -382,6 +389,7 @@ fn invalidation_timeless() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "points"; @@ -392,10 +400,10 @@ fn invalidation_timeless() { let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timeless.clone(), 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; @@ -407,10 +415,10 @@ fn invalidation_timeless() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 0, 255)]; @@ -422,51 +430,62 @@ fn invalidation_timeless() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); let query = re_data_store::LatestAtQuery::new(query_time[0].0, query_time[0].1); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } // --- -fn query_and_compare(store: &DataStore, query: &LatestAtQuery, ent_path: &EntityPath) { +fn insert_and_react(store: &mut DataStore, caches: &mut Caches, row: &DataRow) { + caches.on_events(&[store.insert_row(row).unwrap()]); +} + +fn query_and_compare( + caches: &Caches, + store: &DataStore, + query: &LatestAtQuery, + ent_path: &EntityPath, +) { for _ in 0..3 { let mut uncached_data_time = None; let mut uncached_instance_keys = Vec::new(); let mut uncached_positions = Vec::new(); let mut uncached_colors = Vec::new(); - query_archetype_pov1_comp1::( - false, // cached? - store, - &query.clone().into(), - ent_path, - |((data_time, _), instance_keys, positions, colors)| { - uncached_data_time = data_time; - uncached_instance_keys.extend(instance_keys.iter().copied()); - uncached_positions.extend(positions.iter().copied()); - uncached_colors.extend(colors.iter().copied()); - }, - ) - .unwrap(); + caches + .query_archetype_pov1_comp1::( + false, // cached? + store, + &query.clone().into(), + ent_path, + |((data_time, _), instance_keys, positions, colors)| { + uncached_data_time = data_time; + uncached_instance_keys.extend(instance_keys.iter().copied()); + uncached_positions.extend(positions.iter().copied()); + uncached_colors.extend(colors.iter().copied()); + }, + ) + .unwrap(); let mut cached_data_time = None; let mut cached_instance_keys = Vec::new(); let mut cached_positions = Vec::new(); let mut cached_colors = Vec::new(); - query_archetype_pov1_comp1::( - true, // cached? - store, - &query.clone().into(), - ent_path, - |((data_time, _), instance_keys, positions, colors)| { - cached_data_time = data_time; - cached_instance_keys.extend(instance_keys.iter().copied()); - cached_positions.extend(positions.iter().copied()); - cached_colors.extend(colors.iter().copied()); - }, - ) - .unwrap(); + caches + .query_archetype_pov1_comp1::( + true, // cached? + store, + &query.clone().into(), + ent_path, + |((data_time, _), instance_keys, positions, colors)| { + cached_data_time = data_time; + cached_instance_keys.extend(instance_keys.iter().copied()); + cached_positions.extend(positions.iter().copied()); + cached_colors.extend(colors.iter().copied()); + }, + ) + .unwrap(); let expected = re_query::query_archetype::(store, query, ent_path).unwrap(); let expected_data_time = expected.data_time(); diff --git a/crates/re_query_cache/tests/range.rs b/crates/re_query_cache/tests/range.rs index 86bb7af77f16..dd91cb9c77fa 100644 --- a/crates/re_query_cache/tests/range.rs +++ b/crates/re_query_cache/tests/range.rs @@ -4,13 +4,13 @@ use itertools::Itertools as _; -use re_data_store::{DataStore, RangeQuery}; +use re_data_store::{DataStore, RangeQuery, StoreSubscriber}; use re_log_types::{ build_frame_nr, example_components::{MyColor, MyLabel, MyPoint, MyPoints}, DataRow, EntityPath, RowId, TimeInt, TimePoint, TimeRange, }; -use re_query_cache::query_archetype_pov1_comp2; +use re_query_cache::Caches; use re_types::components::InstanceKey; use re_types_core::Loggable as _; @@ -23,6 +23,7 @@ fn simple_range() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path: EntityPath = "point".into(); @@ -33,7 +34,7 @@ fn simple_range() { let row = DataRow::from_cells1_sized(RowId::new(), ent_path.clone(), timepoint1, 2, positions) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance let color_instances = vec![InstanceKey(1)]; @@ -46,7 +47,7 @@ fn simple_range() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } let timepoint2 = [build_frame_nr(223.into())]; @@ -62,7 +63,7 @@ fn simple_range() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } let timepoint3 = [build_frame_nr(323.into())]; @@ -72,7 +73,7 @@ fn simple_range() { let row = DataRow::from_cells1_sized(RowId::new(), ent_path.clone(), timepoint3, 2, positions) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } // --- First test: `(timepoint1, timepoint3]` --- @@ -82,7 +83,7 @@ fn simple_range() { TimeRange::new((timepoint1[0].1.as_i64() + 1).into(), timepoint3[0].1), ); - query_and_compare(&store, &query, &ent_path); + query_and_compare(&caches, &store, &query, &ent_path); // --- Second test: `[timepoint1, timepoint3]` --- @@ -93,7 +94,7 @@ fn simple_range() { TimeRange::new(timepoint1[0].1, timepoint3[0].1), ); - query_and_compare(&store, &query, &ent_path); + query_and_compare(&caches, &store, &query, &ent_path); } #[test] @@ -103,6 +104,7 @@ fn timeless_range() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path: EntityPath = "point".into(); @@ -114,12 +116,12 @@ fn timeless_range() { DataRow::from_cells1(RowId::new(), ent_path.clone(), timepoint1, 2, &positions) .unwrap(); row.compute_all_size_bytes(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Insert timelessly too! let row = DataRow::from_cells1_sized(RowId::new(), ent_path.clone(), [], 2, &positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance let color_instances = vec![InstanceKey(1)]; @@ -132,7 +134,7 @@ fn timeless_range() { (color_instances.clone(), colors.clone()), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Insert timelessly too! let row = DataRow::from_cells2_sized( @@ -143,7 +145,7 @@ fn timeless_range() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } let timepoint2 = [build_frame_nr(223.into())]; @@ -159,7 +161,7 @@ fn timeless_range() { (color_instances.clone(), colors.clone()), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Insert timelessly too! let row = DataRow::from_cells2_sized( @@ -170,7 +172,7 @@ fn timeless_range() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } let timepoint3 = [build_frame_nr(323.into())]; @@ -180,12 +182,12 @@ fn timeless_range() { let row = DataRow::from_cells1_sized(RowId::new(), ent_path.clone(), timepoint3, 2, &positions) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Insert timelessly too! let row = DataRow::from_cells1_sized(RowId::new(), ent_path.clone(), [], 2, &positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } // --- First test: `(timepoint1, timepoint3]` --- @@ -195,7 +197,7 @@ fn timeless_range() { TimeRange::new((timepoint1[0].1.as_i64() + 1).into(), timepoint3[0].1), ); - query_and_compare(&store, &query, &ent_path); + query_and_compare(&caches, &store, &query, &ent_path); // --- Second test: `[timepoint1, timepoint3]` --- @@ -206,14 +208,14 @@ fn timeless_range() { TimeRange::new(timepoint1[0].1, timepoint3[0].1), ); - query_and_compare(&store, &query, &ent_path); + query_and_compare(&caches, &store, &query, &ent_path); // --- Third test: `[-inf, +inf]` --- let query = re_data_store::RangeQuery::new(timepoint1[0].0, TimeRange::new(TimeInt::MIN, TimeInt::MAX)); - query_and_compare(&store, &query, &ent_path); + query_and_compare(&caches, &store, &query, &ent_path); } #[test] @@ -223,6 +225,7 @@ fn simple_splatted_range() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path: EntityPath = "point".into(); @@ -233,7 +236,7 @@ fn simple_splatted_range() { let row = DataRow::from_cells1_sized(RowId::new(), ent_path.clone(), timepoint1, 2, positions) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance let color_instances = vec![InstanceKey(1)]; @@ -246,7 +249,7 @@ fn simple_splatted_range() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } let timepoint2 = [build_frame_nr(223.into())]; @@ -262,7 +265,7 @@ fn simple_splatted_range() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } let timepoint3 = [build_frame_nr(323.into())]; @@ -272,7 +275,7 @@ fn simple_splatted_range() { let row = DataRow::from_cells1_sized(RowId::new(), ent_path.clone(), timepoint3, 2, positions) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); } // --- First test: `(timepoint1, timepoint3]` --- @@ -282,7 +285,7 @@ fn simple_splatted_range() { TimeRange::new((timepoint1[0].1.as_i64() + 1).into(), timepoint3[0].1), ); - query_and_compare(&store, &query, &ent_path); + query_and_compare(&caches, &store, &query, &ent_path); // --- Second test: `[timepoint1, timepoint3]` --- @@ -293,7 +296,7 @@ fn simple_splatted_range() { TimeRange::new(timepoint1[0].1, timepoint3[0].1), ); - query_and_compare(&store, &query, &ent_path); + query_and_compare(&caches, &store, &query, &ent_path); } #[test] @@ -309,6 +312,7 @@ fn invalidation() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); // Create some positions with implicit instances let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; @@ -320,7 +324,7 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); // Assign one of them a color with an explicit instance let color_instances = vec![InstanceKey(1)]; @@ -333,9 +337,9 @@ fn invalidation() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // --- Modify present --- @@ -349,18 +353,18 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // Modify the optional component let colors = vec![MyColor::from_rgb(4, 5, 6), MyColor::from_rgb(7, 8, 9)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, present_data_timepoint, 2, colors) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // --- Modify past --- @@ -374,9 +378,9 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // Modify the optional component let colors = vec![MyColor::from_rgb(10, 11, 12), MyColor::from_rgb(13, 14, 15)]; @@ -388,9 +392,9 @@ fn invalidation() { colors, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // --- Modify future --- @@ -404,18 +408,18 @@ fn invalidation() { positions, ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); // Modify the optional component let colors = vec![MyColor::from_rgb(16, 17, 18)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, future_data_timepoint, 1, colors) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); }; let timeless = TimePoint::timeless(); @@ -470,6 +474,7 @@ fn invalidation_of_future_optionals() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "points"; @@ -481,36 +486,36 @@ fn invalidation_of_future_optionals() { let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timeless, 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; let row = DataRow::from_cells2_sized(RowId::new(), ent_path, frame2, 1, (color_instances, colors)) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 0, 255)]; let row = DataRow::from_cells2_sized(RowId::new(), ent_path, frame3, 1, (color_instances, colors)) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 255, 0)]; let row = DataRow::from_cells2_sized(RowId::new(), ent_path, frame3, 1, (color_instances, colors)) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } #[test] @@ -520,6 +525,7 @@ fn invalidation_timeless() { InstanceKey::name(), Default::default(), ); + let mut caches = Caches::new(&store); let ent_path = "points"; @@ -531,9 +537,9 @@ fn invalidation_timeless() { let positions = vec![MyPoint::new(1.0, 2.0), MyPoint::new(3.0, 4.0)]; let row = DataRow::from_cells1_sized(RowId::new(), ent_path, timeless.clone(), 2, positions).unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(255, 0, 0)]; @@ -545,9 +551,9 @@ fn invalidation_timeless() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); let color_instances = vec![InstanceKey::SPLAT]; let colors = vec![MyColor::from_rgb(0, 0, 255)]; @@ -559,50 +565,61 @@ fn invalidation_timeless() { (color_instances, colors), ) .unwrap(); - store.insert_row(&row).unwrap(); + insert_and_react(&mut store, &mut caches, &row); - query_and_compare(&store, &query, &ent_path.into()); + query_and_compare(&caches, &store, &query, &ent_path.into()); } // --- -fn query_and_compare(store: &DataStore, query: &RangeQuery, ent_path: &EntityPath) { +fn insert_and_react(store: &mut DataStore, caches: &mut Caches, row: &DataRow) { + caches.on_events(&[store.insert_row(row).unwrap()]); +} + +fn query_and_compare( + caches: &Caches, + store: &DataStore, + query: &RangeQuery, + ent_path: &EntityPath, +) { for _ in 0..3 { let mut uncached_data_times = Vec::new(); let mut uncached_instance_keys = Vec::new(); let mut uncached_positions = Vec::new(); let mut uncached_colors = Vec::new(); - query_archetype_pov1_comp2::( - false, // cached? - store, - &query.clone().into(), - ent_path, - |((data_time, _), instance_keys, positions, colors, _)| { - uncached_data_times.push(data_time); - uncached_instance_keys.push(instance_keys.to_vec()); - uncached_positions.push(positions.to_vec()); - uncached_colors.push(colors.to_vec()); - }, - ) - .unwrap(); + caches + .query_archetype_pov1_comp2::( + false, // cached? + store, + &query.clone().into(), + ent_path, + |((data_time, _), instance_keys, positions, colors, _)| { + uncached_data_times.push(data_time); + uncached_instance_keys.push(instance_keys.to_vec()); + uncached_positions.push(positions.to_vec()); + uncached_colors.push(colors.to_vec()); + }, + ) + .unwrap(); let mut cached_data_times = Vec::new(); let mut cached_instance_keys = Vec::new(); let mut cached_positions = Vec::new(); let mut cached_colors = Vec::new(); - query_archetype_pov1_comp2::( - true, // cached? - store, - &query.clone().into(), - ent_path, - |((data_time, _), instance_keys, positions, colors, _)| { - cached_data_times.push(data_time); - cached_instance_keys.push(instance_keys.to_vec()); - cached_positions.push(positions.to_vec()); - cached_colors.push(colors.to_vec()); - }, - ) - .unwrap(); + caches + .query_archetype_pov1_comp2::( + true, // cached? + store, + &query.clone().into(), + ent_path, + |((data_time, _), instance_keys, positions, colors, _)| { + cached_data_times.push(data_time); + cached_instance_keys.push(instance_keys.to_vec()); + cached_positions.push(positions.to_vec()); + cached_colors.push(colors.to_vec()); + }, + ) + .unwrap(); let mut expected_data_times = Vec::new(); let mut expected_instance_keys = Vec::new(); diff --git a/crates/re_space_view_spatial/benches/bench_points.rs b/crates/re_space_view_spatial/benches/bench_points.rs index 46cff79b6cab..5e63ad0e0bef 100644 --- a/crates/re_space_view_spatial/benches/bench_points.rs +++ b/crates/re_space_view_spatial/benches/bench_points.rs @@ -2,6 +2,7 @@ use re_data_store::{DataStore, LatestAtQuery}; use re_log_types::{DataRow, EntityPath, RowId, TimeInt, TimePoint, Timeline}; +use re_query_cache::Caches; use re_space_view_spatial::{LoadedPoints, Points3DComponentData}; use re_types::{ archetypes::Points3D, @@ -58,6 +59,7 @@ fn bench_points(c: &mut criterion::Criterion) { store.insert_row(&data_row).unwrap(); store }; + let caches = Caches::new(&store); let latest_at = LatestAtQuery::latest(timeline); let at = latest_at.at; @@ -72,7 +74,7 @@ fn bench_points(c: &mut criterion::Criterion) { let mut group = c.benchmark_group("Points3D"); group.bench_function(bench_name(*cached, "query_archetype"), |b| { b.iter(|| { - re_query_cache::query_archetype_pov1_comp5::< + caches.query_archetype_pov1_comp5::< Points3D, Position3D, Color, @@ -96,7 +98,7 @@ fn bench_points(c: &mut criterion::Criterion) { } for cached in CACHED { - re_query_cache::query_archetype_pov1_comp5::< + caches.query_archetype_pov1_comp5::< Points3D, Position3D, Color, diff --git a/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs b/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs index 2ca7c8e366bd..9e34213dce07 100644 --- a/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs +++ b/crates/re_space_view_spatial/src/visualizers/entity_iterator.rs @@ -183,7 +183,7 @@ macro_rules! impl_process_archetype { space_view_class_identifier: view_ctx.space_view_class_identifier(), }; - ::re_query_cache::[]::( + ctx.entity_db.query_caches().[]::( ctx.app_options.experimental_primary_caching_latest_at, ctx.app_options.experimental_primary_caching_range, ctx.entity_db.store(), diff --git a/crates/re_space_view_text_log/src/visualizer_system.rs b/crates/re_space_view_text_log/src/visualizer_system.rs index e8bd9290af43..4da792f1f71d 100644 --- a/crates/re_space_view_text_log/src/visualizer_system.rs +++ b/crates/re_space_view_text_log/src/visualizer_system.rs @@ -58,6 +58,7 @@ impl VisualizerSystem for TextLogSystem { query: &ViewQuery<'_>, _view_ctx: &ViewContextCollection, ) -> Result, SpaceViewSystemExecutionError> { + let query_caches = ctx.entity_db.query_caches(); let store = ctx.entity_db.store(); for data_result in query.iter_visible_data_results(Self::identifier()) { @@ -67,7 +68,7 @@ impl VisualizerSystem for TextLogSystem { let timeline_query = re_data_store::RangeQuery::new(query.timeline, TimeRange::EVERYTHING); - re_query_cache::query_archetype_pov1_comp2::( + query_caches.query_archetype_pov1_comp2::( ctx.app_options.experimental_primary_caching_range, store, &timeline_query.clone().into(), diff --git a/crates/re_space_view_time_series/src/visualizer_system.rs b/crates/re_space_view_time_series/src/visualizer_system.rs index 21e0616c55c3..f18feaf0a2d3 100644 --- a/crates/re_space_view_time_series/src/visualizer_system.rs +++ b/crates/re_space_view_time_series/src/visualizer_system.rs @@ -121,6 +121,7 @@ impl TimeSeriesSystem { ) -> Result<(), QueryError> { re_tracing::profile_function!(); + let query_caches = ctx.entity_db.query_caches(); let store = ctx.entity_db.store(); for data_result in query.iter_visible_data_results(Self::identifier()) { @@ -159,7 +160,7 @@ impl TimeSeriesSystem { let query = re_data_store::RangeQuery::new(query.timeline, TimeRange::new(from, to)); - re_query_cache::query_archetype_pov1_comp4::< + query_caches.query_archetype_pov1_comp4::< TimeSeriesScalar, Scalar, ScalarScattering, diff --git a/crates/re_viewer/src/app.rs b/crates/re_viewer/src/app.rs index 446fe48f973b..7d6d41b21fde 100644 --- a/crates/re_viewer/src/app.rs +++ b/crates/re_viewer/src/app.rs @@ -691,7 +691,6 @@ impl App { ui: &mut egui::Ui, gpu_resource_stats: &WgpuResourcePoolStatistics, store_stats: &StoreHubStats, - caches_stats: &re_query_cache::CachesStats, ) { let frame = egui::Frame { fill: ui.visuals().panel_fill, @@ -709,7 +708,6 @@ impl App { &self.startup_options.memory_limit, gpu_resource_stats, store_stats, - caches_stats, ); }); } @@ -738,7 +736,6 @@ impl App { gpu_resource_stats: &WgpuResourcePoolStatistics, store_context: Option<&StoreContext<'_>>, store_stats: &StoreHubStats, - caches_stats: &re_query_cache::CachesStats, ) { let mut main_panel_frame = egui::Frame::default(); if re_ui::CUSTOM_WINDOW_DECORATIONS { @@ -762,7 +759,7 @@ impl App { ui, ); - self.memory_panel_ui(ui, gpu_resource_stats, store_stats, caches_stats); + self.memory_panel_ui(ui, gpu_resource_stats, store_stats); self.style_panel_ui(egui_ctx, ui); @@ -1158,13 +1155,10 @@ impl eframe::App for App { render_ctx.gpu_resources.statistics() }; - let store_stats = store_hub.stats(); - let caches_stats = - re_query_cache::Caches::stats(self.memory_panel.primary_cache_detailed_stats_enabled()); + let store_stats = store_hub.stats(self.memory_panel.primary_cache_detailed_stats_enabled()); // do early, before doing too many allocations - self.memory_panel - .update(&gpu_resource_stats, &store_stats, &caches_stats); + self.memory_panel.update(&gpu_resource_stats, &store_stats); self.check_keyboard_shortcuts(egui_ctx); @@ -1203,7 +1197,6 @@ impl eframe::App for App { &gpu_resource_stats, store_context.as_ref(), &store_stats, - &caches_stats, ); if re_ui::CUSTOM_WINDOW_DECORATIONS { diff --git a/crates/re_viewer/src/store_hub.rs b/crates/re_viewer/src/store_hub.rs index 33f27d15b989..eee549099311 100644 --- a/crates/re_viewer/src/store_hub.rs +++ b/crates/re_viewer/src/store_hub.rs @@ -1,14 +1,14 @@ use ahash::{HashMap, HashMapExt}; use itertools::Itertools; +use re_data_store::StoreGeneration; use re_data_store::{DataStoreConfig, DataStoreStats}; use re_entity_db::EntityDb; use re_log_encoding::decoder::VersionPolicy; use re_log_types::{ApplicationId, StoreId, StoreKind}; +use re_query_cache::CachesStats; use re_viewer_context::{AppOptions, StoreContext}; -use re_data_store::StoreGeneration; - #[cfg(not(target_arch = "wasm32"))] use crate::{ loading::load_blueprint_file, @@ -41,7 +41,9 @@ pub struct StoreHub { pub struct StoreHubStats { pub blueprint_stats: DataStoreStats, pub blueprint_config: DataStoreConfig, + pub recording_stats: DataStoreStats, + pub recording_cached_stats: CachesStats, pub recording_config: DataStoreConfig, } @@ -399,9 +401,10 @@ impl StoreHub { } /// Populate a [`StoreHubStats`] based on the selected app. + // // TODO(jleibs): We probably want stats for all recordings, not just // the currently selected recording. - pub fn stats(&self) -> StoreHubStats { + pub fn stats(&self, detailed_cache_stats: bool) -> StoreHubStats { // If we have an app-id, then use it to look up the blueprint. let blueprint = self .selected_application_id @@ -426,6 +429,10 @@ impl StoreHub { .map(|entity_db| DataStoreStats::from_store(entity_db.store())) .unwrap_or_default(); + let recording_cached_stats = recording + .map(|entity_db| entity_db.query_caches().stats(detailed_cache_stats)) + .unwrap_or_default(); + let recording_config = recording .map(|entity_db| entity_db.store().config().clone()) .unwrap_or_default(); @@ -434,6 +441,7 @@ impl StoreHub { blueprint_stats, blueprint_config, recording_stats, + recording_cached_stats, recording_config, } } diff --git a/crates/re_viewer/src/ui/memory_panel.rs b/crates/re_viewer/src/ui/memory_panel.rs index 156e875826e4..fe865e595fdc 100644 --- a/crates/re_viewer/src/ui/memory_panel.rs +++ b/crates/re_viewer/src/ui/memory_panel.rs @@ -29,7 +29,6 @@ impl MemoryPanel { &mut self, gpu_resource_stats: &WgpuResourcePoolStatistics, store_stats: &StoreHubStats, - caches_stats: &CachesStats, ) { re_tracing::profile_function!(); self.history.capture( @@ -38,7 +37,7 @@ impl MemoryPanel { + gpu_resource_stats.total_texture_size_in_bytes) as _, ), Some(store_stats.recording_stats.total.num_bytes as _), - Some(caches_stats.total_size_bytes() as _), + Some(store_stats.recording_cached_stats.total_size_bytes() as _), Some(store_stats.blueprint_stats.total.num_bytes as _), ); } @@ -63,7 +62,6 @@ impl MemoryPanel { limit: &MemoryLimit, gpu_resource_stats: &WgpuResourcePoolStatistics, store_stats: &StoreHubStats, - caches_stats: &CachesStats, ) { re_tracing::profile_function!(); @@ -75,14 +73,7 @@ impl MemoryPanel { .min_width(250.0) .default_width(300.0) .show_inside(ui, |ui| { - self.left_side( - ui, - re_ui, - limit, - gpu_resource_stats, - store_stats, - caches_stats, - ); + self.left_side(ui, re_ui, limit, gpu_resource_stats, store_stats); }); egui::CentralPanel::default().show_inside(ui, |ui| { @@ -98,7 +89,6 @@ impl MemoryPanel { limit: &MemoryLimit, gpu_resource_stats: &WgpuResourcePoolStatistics, store_stats: &StoreHubStats, - caches_stats: &CachesStats, ) { ui.strong("Rerun Viewer resource usage"); @@ -123,7 +113,7 @@ impl MemoryPanel { ui.separator(); ui.collapsing("Primary Cache Resources", |ui| { - self.caches_stats(ui, re_ui, caches_stats); + self.caches_stats(ui, re_ui, &store_stats.recording_cached_stats); }); ui.separator();