diff --git a/.vscode/settings.json b/.vscode/settings.json index a1a7f0abc714..b9ea6558d528 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -71,5 +71,6 @@ // INCLUDING attempts to publish a new release! "rust-analyzer.cargo.buildScripts.enable": false, "python.analysis.extraPaths": ["rerun_py/"], - "ruff.args": ["--config", "rerun_py/pyproject.toml"] + "ruff.args": ["--config", "rerun_py/pyproject.toml"], + "stm32-for-vscode.openOCDPath": false } diff --git a/CHANGELOG.md b/CHANGELOG.md index 268c14c09115..56cdd155abba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Depthai Viewer changelog +## 0.0.7 + +- Install depthai_sdk from artifactory +- Change logos + +## 0.0.6 + +- App startup bugfixes + +## 0.0.5 + +- App startup bugfixes +- Better default focusing in 3d views + +## 0.0.4 + +- Disable depth settings if intrinsics aren't available. +- App startup bugfixes. + +## 0.0.3 + +- Added support for all devices. ## 0.0.2 diff --git a/Cargo.lock b/Cargo.lock index b0369291c38a..32f70da3af73 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -124,7 +124,7 @@ checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "api_demo" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "clap 4.1.4", @@ -1265,9 +1265,19 @@ dependencies = [ "syn 1.0.103", ] +[[package]] +name = "dcv-color-primitives" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6404eb429533f00a9430a015daca9235593068a1080860aa5cfbde6a8d9f7ca8" +dependencies = [ + "paste", + "wasm-bindgen", +] + [[package]] name = "depthai-viewer" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "backtrace", @@ -1382,7 +1392,7 @@ dependencies = [ [[package]] name = "dna" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "depthai-viewer", "itertools", @@ -2822,7 +2832,7 @@ dependencies = [ [[package]] name = "minimal" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "depthai-viewer", ] @@ -2835,7 +2845,7 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "minimal_options" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "clap 4.1.4", @@ -3253,7 +3263,7 @@ dependencies = [ [[package]] name = "objectron" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "clap 4.1.4", @@ -3871,7 +3881,7 @@ checksum = "f2ff9a1f06a88b01621b7ae906ef0211290d1c8a168a15542486a8f61c0833b9" [[package]] name = "raw_mesh" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "bytes", @@ -3911,7 +3921,7 @@ dependencies = [ [[package]] name = "re_analytics" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "crossbeam", @@ -3932,7 +3942,7 @@ dependencies = [ [[package]] name = "re_arrow_store" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "anyhow", @@ -3959,7 +3969,7 @@ dependencies = [ [[package]] name = "re_build_build_info" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "time 0.3.20", @@ -3967,18 +3977,18 @@ dependencies = [ [[package]] name = "re_build_info" -version = "0.0.2" +version = "0.0.8-alpha.0" [[package]] name = "re_build_web_viewer" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "cargo_metadata", ] [[package]] name = "re_data_store" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "criterion", @@ -4001,14 +4011,14 @@ dependencies = [ [[package]] name = "re_error" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", ] [[package]] name = "re_format" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "arrow2", "arrow2_convert", @@ -4018,7 +4028,7 @@ dependencies = [ [[package]] name = "re_int_histogram" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "criterion", "insta", @@ -4029,7 +4039,7 @@ dependencies = [ [[package]] name = "re_log" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "env_logger", "js-sys", @@ -4042,7 +4052,7 @@ dependencies = [ [[package]] name = "re_log_encoding" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "criterion", "ehttp", @@ -4067,7 +4077,7 @@ dependencies = [ [[package]] name = "re_log_types" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "array-init", @@ -4091,6 +4101,7 @@ dependencies = [ "rand", "re_format", "re_log", + "re_renderer", "re_string_interner", "re_tuid", "rmp-serde", @@ -4105,7 +4116,7 @@ dependencies = [ [[package]] name = "re_memory" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "backtrace", @@ -4125,7 +4136,7 @@ dependencies = [ [[package]] name = "re_query" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "arrow2", "criterion", @@ -4143,7 +4154,7 @@ dependencies = [ [[package]] name = "re_renderer" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "anyhow", @@ -4196,7 +4207,7 @@ dependencies = [ [[package]] name = "re_sdk" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "arrow2_convert", "document-features", @@ -4216,7 +4227,7 @@ dependencies = [ [[package]] name = "re_sdk_comms" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "anyhow", @@ -4232,7 +4243,7 @@ dependencies = [ [[package]] name = "re_smart_channel" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "crossbeam", "instant", @@ -4240,7 +4251,7 @@ dependencies = [ [[package]] name = "re_string_interner" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "nohash-hasher", @@ -4251,7 +4262,7 @@ dependencies = [ [[package]] name = "re_tensor_ops" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "ndarray", @@ -4261,7 +4272,7 @@ dependencies = [ [[package]] name = "re_tuid" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "arrow2", "arrow2_convert", @@ -4275,7 +4286,7 @@ dependencies = [ [[package]] name = "re_ui" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "eframe", "egui", @@ -4293,7 +4304,7 @@ dependencies = [ [[package]] name = "re_viewer" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "ahash 0.8.2", "anyhow", @@ -4303,6 +4314,7 @@ dependencies = [ "cocoa", "console_error_panic_hook", "crossbeam-channel", + "dcv-color-primitives", "eframe", "egui", "egui-wgpu", @@ -4363,7 +4375,7 @@ dependencies = [ [[package]] name = "re_web_viewer_server" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "cargo_metadata", "ctrlc", @@ -4380,7 +4392,7 @@ dependencies = [ [[package]] name = "re_ws_comms" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "anyhow", "bincode", @@ -4452,7 +4464,7 @@ checksum = "216080ab382b992234dda86873c18d4c48358f5cfcb70fd693d7f6f2131b628b" [[package]] name = "rerun_py" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "arrow2", "depthai-viewer", @@ -4556,7 +4568,7 @@ dependencies = [ [[package]] name = "run_wasm" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "cargo-run-wasm", "pico-args", @@ -5110,7 +5122,7 @@ dependencies = [ [[package]] name = "test_image_memory" -version = "0.0.2" +version = "0.0.8-alpha.0" dependencies = [ "depthai-viewer", "mimalloc", diff --git a/Cargo.toml b/Cargo.toml index 920cb2836f2c..5e1442a43535 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,39 +16,39 @@ include = ["../../LICENSE-APACHE", "../../LICENSE-MIT", "**/*.rs", "Cargo.toml"] license = "MIT OR Apache-2.0" repository = "https://github.com/rerun-io/rerun" rust-version = "1.67" -version = "0.0.2" +version = "0.0.8-alpha.0" [workspace.dependencies] # When using alpha-release, always use exact version, e.g. `version = "=0.x.y-alpha.z" # This is because we treat alpha-releases as incompatible, but semver doesn't. # In particular: if we compile rerun 0.3.0-alpha.0 we only want it to use # re_log_types 0.3.0-alpha.0, NOT 0.3.0-alpha.4 even though it is newer and semver-compatible. -re_sdk_comms = { path = "crates/re_sdk_comms", version = "0.0.2" } -re_analytics = { path = "crates/re_analytics", version = "0.0.2" } -re_arrow_store = { path = "crates/re_arrow_store", version = "0.0.2" } -re_build_build_info = { path = "crates/re_build_build_info", version = "0.0.2" } -re_build_info = { path = "crates/re_build_info", version = "0.0.2" } -re_build_web_viewer = { path = "crates/re_build_web_viewer", version = "0.0.2" } -re_data_store = { path = "crates/re_data_store", version = "0.0.2" } -re_error = { path = "crates/re_error", version = "0.0.2" } -re_format = { path = "crates/re_format", version = "0.0.2" } -re_int_histogram = { path = "crates/re_int_histogram", version = "0.0.2" } -re_log = { path = "crates/re_log", version = "0.0.2" } -re_log_encoding = { path = "crates/re_log_encoding", version = "0.0.2" } -re_log_types = { path = "crates/re_log_types", version = "0.0.2" } -re_memory = { path = "crates/re_memory", version = "0.0.2" } -re_query = { path = "crates/re_query", version = "0.0.2" } -re_renderer = { path = "crates/re_renderer", version = "0.0.2", default-features = false } -re_sdk = { path = "crates/re_sdk", version = "0.0.2" } -re_smart_channel = { path = "crates/re_smart_channel", version = "0.0.2" } -re_string_interner = { path = "crates/re_string_interner", version = "0.0.2" } -re_tensor_ops = { path = "crates/re_tensor_ops", version = "0.0.2" } -re_tuid = { path = "crates/re_tuid", version = "0.0.2" } -re_ui = { path = "crates/re_ui", version = "0.0.2" } -re_viewer = { path = "crates/re_viewer", version = "0.0.2", default-features = false } -re_web_viewer_server = { path = "crates/re_web_viewer_server", version = "0.0.2" } -re_ws_comms = { path = "crates/re_ws_comms", version = "0.0.2" } -depthai-viewer = { path = "crates/rerun", version = "0.0.2" } +re_sdk_comms = { path = "crates/re_sdk_comms", version = "0.0.8-alpha.0" } +re_analytics = { path = "crates/re_analytics", version = "0.0.8-alpha.0" } +re_arrow_store = { path = "crates/re_arrow_store", version = "0.0.8-alpha.0" } +re_build_build_info = { path = "crates/re_build_build_info", version = "0.0.8-alpha.0" } +re_build_info = { path = "crates/re_build_info", version = "0.0.8-alpha.0" } +re_build_web_viewer = { path = "crates/re_build_web_viewer", version = "0.0.8-alpha.0" } +re_data_store = { path = "crates/re_data_store", version = "0.0.8-alpha.0" } +re_error = { path = "crates/re_error", version = "0.0.8-alpha.0" } +re_format = { path = "crates/re_format", version = "0.0.8-alpha.0" } +re_int_histogram = { path = "crates/re_int_histogram", version = "0.0.8-alpha.0" } +re_log = { path = "crates/re_log", version = "0.0.8-alpha.0" } +re_log_encoding = { path = "crates/re_log_encoding", version = "0.0.8-alpha.0" } +re_log_types = { path = "crates/re_log_types", version = "0.0.8-alpha.0" } +re_memory = { path = "crates/re_memory", version = "0.0.8-alpha.0" } +re_query = { path = "crates/re_query", version = "0.0.8-alpha.0" } +re_renderer = { path = "crates/re_renderer", version = "0.0.8-alpha.0", default-features = false } +re_sdk = { path = "crates/re_sdk", version = "0.0.8-alpha.0" } +re_smart_channel = { path = "crates/re_smart_channel", version = "0.0.8-alpha.0" } +re_string_interner = { path = "crates/re_string_interner", version = "0.0.8-alpha.0" } +re_tensor_ops = { path = "crates/re_tensor_ops", version = "0.0.8-alpha.0" } +re_tuid = { path = "crates/re_tuid", version = "0.0.8-alpha.0" } +re_ui = { path = "crates/re_ui", version = "0.0.8-alpha.0" } +re_viewer = { path = "crates/re_viewer", version = "0.0.8-alpha.0", default-features = false } +re_web_viewer_server = { path = "crates/re_web_viewer_server", version = "0.0.8-alpha.0" } +re_ws_comms = { path = "crates/re_ws_comms", version = "0.0.8-alpha.0" } +depthai-viewer = { path = "crates/rerun", version = "0.0.8-alpha.0" } ahash = "0.8" anyhow = "1.0" diff --git a/crates/re_arrow_store/src/store_gc.rs b/crates/re_arrow_store/src/store_gc.rs index e418763eac33..638a6b691fb4 100644 --- a/crates/re_arrow_store/src/store_gc.rs +++ b/crates/re_arrow_store/src/store_gc.rs @@ -1,4 +1,5 @@ -use re_log_types::{RowId, SizeBytes as _, TimeInt, TimeRange}; +use ahash::HashSetExt; +use re_log_types::{RowId, SizeBytes as _, Time, TimeInt, TimeRange}; use crate::{ store::{IndexedBucketInner, IndexedTable}, @@ -157,6 +158,31 @@ impl DataStore { row_ids } + + pub fn gc_drop_by_cutoff_time(&mut self, cutoff_time: i64) -> ahash::HashSet { + let mut row_ids = ahash::HashSet::new(); + + for (_, table) in &mut self.tables.iter_mut() { + let mut row_ids_to_remove = Vec::new(); + { + let (_, bucket) = table.find_bucket(cutoff_time.into()); + for row_id in bucket.inner.write().col_row_id.iter() { + for time in self.metadata_registry.get(row_id).unwrap().times() { + if time.as_i64() < cutoff_time { + row_ids_to_remove.push((*row_id, time)); + if !row_ids.contains(row_id) { + row_ids.insert(*row_id); + } + } + } + } + } + for (row_id, time) in row_ids_to_remove { + table.try_drop_row(row_id, time.as_i64()); + } + } + row_ids + } } impl IndexedTable { diff --git a/crates/re_build_info/src/lib.rs b/crates/re_build_info/src/lib.rs index 484289120cda..954cf78143ae 100644 --- a/crates/re_build_info/src/lib.rs +++ b/crates/re_build_info/src/lib.rs @@ -14,7 +14,7 @@ pub use crate_version::CrateVersion; macro_rules! build_info { () => { $crate::BuildInfo { - crate_name: env!("CARGO_PKG_NAME"), + crate_name: "depthai-viewer", //env!("CARGO_PKG_NAME"), version: $crate::CrateVersion::parse(env!("CARGO_PKG_VERSION")), rustc_version: env!("RE_BUILD_RUSTC_VERSION"), llvm_version: env!("RE_BUILD_LLVM_VERSION"), diff --git a/crates/re_data_store/src/log_db.rs b/crates/re_data_store/src/log_db.rs index 9af248b9c8e0..2308af4cc59f 100644 --- a/crates/re_data_store/src/log_db.rs +++ b/crates/re_data_store/src/log_db.rs @@ -1,12 +1,13 @@ use std::collections::BTreeMap; +use ahash::{HashSet, HashSetExt}; use nohash_hasher::IntMap; use re_arrow_store::{DataStoreConfig, TimeInt}; use re_log_types::{ component_types::InstanceKey, ArrowMsg, BeginRecordingMsg, Component as _, ComponentPath, DataCell, DataRow, DataTable, EntityPath, EntityPathHash, EntityPathOpMsg, LogMsg, PathOp, - RecordingId, RecordingInfo, RowId, TimePoint, Timeline, + RecordingId, RecordingInfo, RowId, Time, TimePoint, Timeline, }; use crate::{Error, TimesPerTimeline}; @@ -277,4 +278,16 @@ impl LogDb { entity_db.purge(&cutoff_times, &drop_row_ids); } + + /// Free up some RAM by forgetting parts of the time that are more than `cutoff` ns in the past. + #[cfg(not(target_arch = "wasm32"))] + pub fn clear_by_cutoff(&mut self, cutoff: i64) { + let cutoff_time = Time::now().nanos_since_epoch() - cutoff; + let oldest = self.entity_db.data_store.oldest_time_per_timeline(); + let row_ids = self + .entity_db + .data_store + .gc_drop_by_cutoff_time(cutoff_time); + self.entity_db.purge(&oldest, &row_ids); + } } diff --git a/crates/re_log_types/Cargo.toml b/crates/re_log_types/Cargo.toml index 3af826321b61..f456ac8ad321 100644 --- a/crates/re_log_types/Cargo.toml +++ b/crates/re_log_types/Cargo.toml @@ -45,6 +45,7 @@ serde = [ [dependencies] # Rerun +re_renderer.workspace = true re_format.workspace = true re_log.workspace = true re_string_interner.workspace = true @@ -80,7 +81,6 @@ time = { workspace = true, default-features = false, features = [ typenum = "1.15" uuid = { version = "1.1", features = ["serde", "v4", "js"] } - # Optional dependencies: ecolor = { workspace = true, optional = true } glam = { workspace = true, optional = true } diff --git a/crates/re_log_types/src/component_types/tensor.rs b/crates/re_log_types/src/component_types/tensor.rs index cf02a3bc2b8e..06c1d8401ca7 100644 --- a/crates/re_log_types/src/component_types/tensor.rs +++ b/crates/re_log_types/src/component_types/tensor.rs @@ -1,14 +1,16 @@ -use arrow2::array::{FixedSizeBinaryArray, MutableFixedSizeBinaryArray}; +use arrow2::array::{ FixedSizeBinaryArray, MutableFixedSizeBinaryArray }; use arrow2::buffer::Buffer; use arrow2_convert::deserialize::ArrowDeserialize; use arrow2_convert::field::ArrowField; -use arrow2_convert::{serialize::ArrowSerialize, ArrowDeserialize, ArrowField, ArrowSerialize}; +use arrow2_convert::{ serialize::ArrowSerialize, ArrowDeserialize, ArrowField, ArrowSerialize }; use crate::Component; -use crate::{TensorDataType, TensorElement}; +use crate::{ TensorDataType, TensorElement }; use super::arrow_convert_shims::BinaryBuffer; +use re_renderer::renderer::TextureEncoding; + // ---------------------------------------------------------------------------- /// A unique id per [`Tensor`]. @@ -57,7 +59,7 @@ impl ArrowSerialize for TensorId { #[inline] fn arrow_serialize( v: &::Type, - array: &mut Self::MutableArrayType, + array: &mut Self::MutableArrayType ) -> arrow2::error::Result<()> { array.try_push(Some(v.0.as_bytes())) } @@ -68,10 +70,9 @@ impl ArrowDeserialize for TensorId { #[inline] fn arrow_deserialize( - v: <&Self::ArrayType as IntoIterator>::Item, + v: <&Self::ArrayType as IntoIterator>::Item ) -> Option<::Type> { - v.and_then(|bytes| uuid::Uuid::from_slice(bytes).ok()) - .map(Self) + v.and_then(|bytes| uuid::Uuid::from_slice(bytes).ok()).map(Self) } } @@ -160,12 +161,22 @@ pub enum TensorData { F32(Buffer), F64(Buffer), JPEG(BinaryBuffer), + NV12(BinaryBuffer), +} + +impl Into> for &TensorData { + fn into(self) -> Option { + match self { + &TensorData::NV12(_) => Some(TextureEncoding::Nv12), + _ => None, + } + } } impl TensorData { pub fn dtype(&self) -> TensorDataType { match self { - Self::U8(_) | Self::JPEG(_) => TensorDataType::U8, + Self::U8(_) | Self::JPEG(_) | Self::NV12(_) => TensorDataType::U8, Self::U16(_) => TensorDataType::U16, Self::U32(_) => TensorDataType::U32, Self::U64(_) => TensorDataType::U64, @@ -180,7 +191,7 @@ impl TensorData { pub fn size_in_bytes(&self) -> usize { match self { - Self::U8(buf) | Self::JPEG(buf) => buf.0.len(), + Self::U8(buf) | Self::JPEG(buf) | Self::NV12(buf) => buf.0.len(), Self::U16(buf) => buf.len(), Self::U32(buf) => buf.len(), Self::U64(buf) => buf.len(), @@ -199,7 +210,7 @@ impl TensorData { pub fn is_compressed_image(&self) -> bool { match self { - Self::U8(_) + | Self::U8(_) | Self::U16(_) | Self::U32(_) | Self::U64(_) @@ -210,7 +221,7 @@ impl TensorData { | Self::F32(_) | Self::F64(_) => false, - Self::JPEG(_) => true, + Self::JPEG(_) | Self::NV12(_) => true, } } } @@ -229,6 +240,7 @@ impl std::fmt::Debug for TensorData { Self::F32(_) => write!(f, "F32({} bytes)", self.size_in_bytes()), Self::F64(_) => write!(f, "F64({} bytes)", self.size_in_bytes()), Self::JPEG(_) => write!(f, "JPEG({} bytes)", self.size_in_bytes()), + Self::NV12(_) => write!(f, "NV12({} bytes)", self.size_in_bytes()), } } } @@ -401,43 +413,77 @@ impl Tensor { self.shape.as_slice() } + /// Calculates the real dimensions of the tensor, taking into account the encoding. + #[inline] + pub fn real_shape(&self) -> Vec { + match &self.data { + &TensorData::NV12(_) => { + let shape = self.shape.as_slice(); + match shape { + [y, x] => { + vec![ + TensorDimension::height((((y.size as f64) * 2.0) / 3.0) as u64), + TensorDimension::width(x.size) + ] + } + _ => panic!("Invalid shape for NV12 encoding: {:?}", shape), + } + } + _ => self.shape().into(), + } + } + #[inline] pub fn num_dim(&self) -> usize { self.shape.len() } /// If this tensor is shaped as an image, return the height, width, and channels/depth of it. + /// Takes into account the encoding pub fn image_height_width_channels(&self) -> Option<[u64; 3]> { - if self.shape.len() == 2 { - Some([self.shape[0].size, self.shape[1].size, 1]) - } else if self.shape.len() == 3 { - let channels = self.shape[2].size; - // gray, rgb, rgba - if matches!(channels, 1 | 3 | 4) { - Some([self.shape[0].size, self.shape[1].size, channels]) - } else { - None + match &self.data { + &TensorData::NV12(_) => { + let shape = self.real_shape(); + if let [y, x] = shape.as_slice() { + Some([y.size, x.size, 1]) + } else { + None + } + } + _ => { + if self.shape.len() == 2 { + Some([self.shape[0].size, self.shape[1].size, 1]) + } else if self.shape.len() == 3 { + let channels = self.shape[2].size; + // gray, rgb, rgba + if matches!(channels, 1 | 3 | 4) { + Some([self.shape[0].size, self.shape[1].size, channels]) + } else { + None + } + } else { + None + } } - } else { - None } } pub fn is_shaped_like_an_image(&self) -> bool { - self.num_dim() == 2 - || self.num_dim() == 3 && { - matches!( - self.shape.last().unwrap().size, - // gray, rgb, rgba - 1 | 3 | 4 - ) - } + self.num_dim() == 2 || + (self.num_dim() == 3 && + ({ + matches!( + self.shape.last().unwrap().size, + // gray, rgb, rgba + 1 | 3 | 4 + ) + })) } #[inline] pub fn is_vector(&self) -> bool { let shape = &self.shape; - shape.len() == 1 || { shape.len() == 2 && (shape[0].size == 1 || shape[1].size == 1) } + shape.len() == 1 || ({ shape.len() == 2 && (shape[0].size == 1 || shape[1].size == 1) }) } #[inline] @@ -452,7 +498,7 @@ impl Tensor { if size <= index { return None; } - offset += *index as usize * stride; + offset += (*index as usize) * stride; stride *= *size as usize; } @@ -467,10 +513,47 @@ impl Tensor { TensorData::I64(buf) => Some(TensorElement::I64(buf[offset])), TensorData::F32(buf) => Some(TensorElement::F32(buf[offset])), TensorData::F64(buf) => Some(TensorElement::F64(buf[offset])), + // Doesn't make sense to get a single value for NV12, use get_nv12_pixel instead. + // You would need to call get once for each channel. + // That would meant that you have to manually supply the channel, so using get_nv12_pixel is easier. + TensorData::NV12(_) => None, TensorData::JPEG(_) => None, // Too expensive to unpack here. } } + pub fn get_nv12_pixel(&self, index: &[u64; 2]) -> Option<[TensorElement; 3]> { + let [row, col] = index; + match self.real_shape().as_slice() { + [h, w] => { + match &self.data { + TensorData::NV12(buf) => { + let uv_offset = (w.size * h.size) as u64; + let y = ((buf[(*row * w.size + *col) as usize] as f64) - 16.0) / 216.0; + let u = + ((buf[(uv_offset + (*row / 2) * w.size + *col) as usize] as f64) - + 128.0) / + 224.0; + let v = + ((buf[((uv_offset + (*row / 2) * w.size + *col) as usize) + 1] as f64) - + 128.0) / + 224.0; + let r = y + 1.402 * v; + let g = y - 0.344 * u + 0.714 * v; + let b = y + 1.772 * u; + + Some([ + TensorElement::U8(f64::clamp(r * 255.0, 0.0, 255.0) as u8), + TensorElement::U8(f64::clamp(g * 255.0, 0.0, 255.0) as u8), + TensorElement::U8(f64::clamp(b * 255.0, 0.0, 255.0) as u8), + ]) + } + _ => None, + } + } + _ => None, + } + } + pub fn dtype(&self) -> TensorDataType { self.data.dtype() } @@ -492,8 +575,7 @@ pub enum TensorCastError { #[error("ndarray type mismatch with tensor storage")] TypeMismatch, - #[error("tensor shape did not match storage length")] - BadTensorShape { + #[error("tensor shape did not match storage length")] BadTensorShape { #[from] source: ndarray::ShapeError, }, @@ -611,20 +693,21 @@ impl<'a> TryFrom<&'a Tensor> for ::ndarray::ArrayViewD<'a, half::f16> { #[cfg(feature = "image")] #[derive(thiserror::Error, Clone, Debug)] pub enum TensorImageLoadError { - #[error(transparent)] - Image(std::sync::Arc), + #[error(transparent)] Image(std::sync::Arc), - #[error("Unsupported JPEG color type: {0:?}. Only RGB Jpegs are supported")] - UnsupportedJpegColorType(image::ColorType), + #[error( + "Unsupported JPEG color type: {0:?}. Only RGB Jpegs are supported" + )] UnsupportedJpegColorType(image::ColorType), - #[error("Unsupported color type: {0:?}. We support 8-bit, 16-bit, and f32 images, and RGB, RGBA, Luminance, and Luminance-Alpha.")] - UnsupportedImageColorType(image::ColorType), + #[error( + "Unsupported color type: {0:?}. We support 8-bit, 16-bit, and f32 images, and RGB, RGBA, Luminance, and Luminance-Alpha." + )] UnsupportedImageColorType(image::ColorType), - #[error("Failed to load file: {0}")] - ReadError(std::sync::Arc), + #[error("Failed to load file: {0}")] ReadError(std::sync::Arc), - #[error("The encoded tensor did not match its metadata {expected:?} != {found:?}")] - InvalidMetaData { + #[error( + "The encoded tensor did not match its metadata {expected:?} != {found:?}" + )] InvalidMetaData { expected: Vec, found: Vec, }, @@ -650,11 +733,11 @@ impl From for TensorImageLoadError { #[cfg(feature = "image")] #[derive(thiserror::Error, Debug)] pub enum TensorImageSaveError { - #[error("Expected image-shaped tensor, got {0:?}")] - ShapeNotAnImage(Vec), + #[error("Expected image-shaped tensor, got {0:?}")] ShapeNotAnImage(Vec), - #[error("Cannot convert tensor with {0} channels and datatype {1} to an image")] - UnsupportedChannelsDtype(u64, TensorDataType), + #[error( + "Cannot convert tensor with {0} channels and datatype {1} to an image" + )] UnsupportedChannelsDtype(u64, TensorDataType), #[error("The tensor data did not match tensor dimensions")] BadData, @@ -666,7 +749,7 @@ impl Tensor { shape: Vec, data: TensorData, meaning: TensorDataMeaning, - meter: Option, + meter: Option ) -> Self { Self { tensor_id, @@ -685,7 +768,7 @@ impl Tensor { /// Requires the `image` feature. #[cfg(not(target_arch = "wasm32"))] pub fn tensor_from_jpeg_file( - image_path: impl AsRef, + image_path: impl AsRef ) -> Result { let jpeg_bytes = std::fs::read(image_path)?; Self::tensor_from_jpeg_bytes(jpeg_bytes) @@ -699,9 +782,7 @@ impl Tensor { let jpeg = image::codecs::jpeg::JpegDecoder::new(std::io::Cursor::new(&jpeg_bytes))?; if jpeg.color_type() != image::ColorType::Rgb8 { // TODO(emilk): support gray-scale jpeg as well - return Err(TensorImageLoadError::UnsupportedJpegColorType( - jpeg.color_type(), - )); + return Err(TensorImageLoadError::UnsupportedJpegColorType(jpeg.color_type())); } let (w, h) = jpeg.dimensions(); @@ -710,7 +791,7 @@ impl Tensor { shape: vec![ TensorDimension::height(h as _), TensorDimension::width(w as _), - TensorDimension::depth(3), + TensorDimension::depth(3) ], data: TensorData::JPEG(jpeg_bytes.into()), meaning: TensorDataMeaning::Unknown, @@ -724,7 +805,7 @@ impl Tensor { /// /// This is a convenience function that calls [`DecodedTensor::from_image`]. pub fn from_image( - image: impl Into, + image: impl Into ) -> Result { Self::from_dynamic_image(image.into()) } @@ -740,21 +821,21 @@ impl Tensor { /// Predicts if [`Self::to_dynamic_image`] is likely to succeed, without doing anything expensive pub fn could_be_dynamic_image(&self) -> bool { - self.is_shaped_like_an_image() - && matches!( + self.is_shaped_like_an_image() && + matches!( self.dtype(), - TensorDataType::U8 - | TensorDataType::U16 - | TensorDataType::F16 - | TensorDataType::F32 - | TensorDataType::F64 + TensorDataType::U8 | + TensorDataType::U16 | + TensorDataType::F16 | + TensorDataType::F32 | + TensorDataType::F64 ) } /// Try to convert an image-like tensor into an [`image::DynamicImage`]. pub fn to_dynamic_image(&self) -> Result { - use ecolor::{gamma_u8_from_linear_f32, linear_u8_from_linear_f32}; - use image::{DynamicImage, GrayImage, RgbImage, RgbaImage}; + use ecolor::{ gamma_u8_from_linear_f32, linear_u8_from_linear_f32 }; + use image::{ DynamicImage, GrayImage, RgbImage, RgbaImage }; type Rgb16Image = image::ImageBuffer, Vec>; type Rgba16Image = image::ImageBuffer, Vec>; @@ -766,87 +847,85 @@ impl Tensor { let w = w as u32; let h = h as u32; - let dyn_img_result = - match (channels, &self.data) { - (1, TensorData::U8(buf)) => { - GrayImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageLuma8) - } - (1, TensorData::U16(buf)) => Gray16Image::from_raw(w, h, buf.as_slice().to_vec()) - .map(DynamicImage::ImageLuma16), - // TODO(emilk) f16 - (1, TensorData::F32(buf)) => { - let pixels = buf - .iter() - .map(|pixel| gamma_u8_from_linear_f32(*pixel)) - .collect(); - GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) - } - (1, TensorData::F64(buf)) => { - let pixels = buf - .iter() - .map(|&pixel| gamma_u8_from_linear_f32(pixel as f32)) - .collect(); - GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) - } + let dyn_img_result = match (channels, &self.data) { + (1, TensorData::U8(buf)) => { + GrayImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageLuma8) + } + (1, TensorData::U16(buf)) => + Gray16Image::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageLuma16), + // TODO(emilk) f16 + (1, TensorData::F32(buf)) => { + let pixels = buf + .iter() + .map(|pixel| gamma_u8_from_linear_f32(*pixel)) + .collect(); + GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) + } + (1, TensorData::F64(buf)) => { + let pixels = buf + .iter() + .map(|&pixel| gamma_u8_from_linear_f32(pixel as f32)) + .collect(); + GrayImage::from_raw(w, h, pixels).map(DynamicImage::ImageLuma8) + } - (3, TensorData::U8(buf)) => { - RgbImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgb8) - } - (3, TensorData::U16(buf)) => Rgb16Image::from_raw(w, h, buf.as_slice().to_vec()) - .map(DynamicImage::ImageRgb16), - (3, TensorData::F32(buf)) => { - let pixels = buf.iter().copied().map(gamma_u8_from_linear_f32).collect(); - RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) - } - (3, TensorData::F64(buf)) => { - let pixels = buf - .iter() - .map(|&comp| gamma_u8_from_linear_f32(comp as f32)) - .collect(); - RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) - } + (3, TensorData::U8(buf)) => { + RgbImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgb8) + } + (3, TensorData::U16(buf)) => + Rgb16Image::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgb16), + (3, TensorData::F32(buf)) => { + let pixels = buf.iter().copied().map(gamma_u8_from_linear_f32).collect(); + RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) + } + (3, TensorData::F64(buf)) => { + let pixels = buf + .iter() + .map(|&comp| gamma_u8_from_linear_f32(comp as f32)) + .collect(); + RgbImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgb8) + } - (4, TensorData::U8(buf)) => { - RgbaImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgba8) - } - (4, TensorData::U16(buf)) => Rgba16Image::from_raw(w, h, buf.as_slice().to_vec()) - .map(DynamicImage::ImageRgba16), - (4, TensorData::F32(buf)) => { - let rgba: &[[f32; 4]] = bytemuck::cast_slice(buf.as_slice()); - let pixels: Vec = rgba - .iter() - .flat_map(|&[r, g, b, a]| { - let r = gamma_u8_from_linear_f32(r); - let g = gamma_u8_from_linear_f32(g); - let b = gamma_u8_from_linear_f32(b); - let a = linear_u8_from_linear_f32(a); - [r, g, b, a] - }) - .collect(); - RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) - } - (4, TensorData::F64(buf)) => { - let rgba: &[[f64; 4]] = bytemuck::cast_slice(buf.as_slice()); - let pixels: Vec = rgba - .iter() - .flat_map(|&[r, g, b, a]| { - let r = gamma_u8_from_linear_f32(r as _); - let g = gamma_u8_from_linear_f32(g as _); - let b = gamma_u8_from_linear_f32(b as _); - let a = linear_u8_from_linear_f32(a as _); - [r, g, b, a] - }) - .collect(); - RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) - } + (4, TensorData::U8(buf)) => { + RgbaImage::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgba8) + } + (4, TensorData::U16(buf)) => + Rgba16Image::from_raw(w, h, buf.as_slice().to_vec()).map(DynamicImage::ImageRgba16), + (4, TensorData::F32(buf)) => { + let rgba: &[[f32; 4]] = bytemuck::cast_slice(buf.as_slice()); + let pixels: Vec = rgba + .iter() + .flat_map(|&[r, g, b, a]| { + let r = gamma_u8_from_linear_f32(r); + let g = gamma_u8_from_linear_f32(g); + let b = gamma_u8_from_linear_f32(b); + let a = linear_u8_from_linear_f32(a); + [r, g, b, a] + }) + .collect(); + RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) + } + (4, TensorData::F64(buf)) => { + let rgba: &[[f64; 4]] = bytemuck::cast_slice(buf.as_slice()); + let pixels: Vec = rgba + .iter() + .flat_map(|&[r, g, b, a]| { + let r = gamma_u8_from_linear_f32(r as _); + let g = gamma_u8_from_linear_f32(g as _); + let b = gamma_u8_from_linear_f32(b as _); + let a = linear_u8_from_linear_f32(a as _); + [r, g, b, a] + }) + .collect(); + RgbaImage::from_raw(w, h, pixels).map(DynamicImage::ImageRgba8) + } - (_, _) => { - return Err(TensorImageSaveError::UnsupportedChannelsDtype( - channels, - self.data.dtype(), - )) - } - }; + (_, _) => { + return Err( + TensorImageSaveError::UnsupportedChannelsDtype(channels, self.data.dtype()) + ); + } + }; dyn_img_result.ok_or(TensorImageSaveError::BadData) } @@ -877,7 +956,7 @@ impl TryFrom for DecodedTensor { fn try_from(tensor: Tensor) -> Result { match &tensor.data { - TensorData::U8(_) + | TensorData::U8(_) | TensorData::U16(_) | TensorData::U32(_) | TensorData::U64(_) @@ -886,7 +965,8 @@ impl TryFrom for DecodedTensor { | TensorData::I32(_) | TensorData::I64(_) | TensorData::F32(_) - | TensorData::F64(_) => Ok(Self(tensor)), + | TensorData::F64(_) + | TensorData::NV12(_) => Ok(Self(tensor)), TensorData::JPEG(_) => Err(tensor), } @@ -899,7 +979,7 @@ impl DecodedTensor { /// /// Requires the `image` feature. pub fn from_image( - image: impl Into, + image: impl Into ) -> Result { Self::from_dynamic_image(image.into()) } @@ -908,7 +988,7 @@ impl DecodedTensor { /// /// Requires the `image` feature. pub fn from_dynamic_image( - image: image::DynamicImage, + image: image::DynamicImage ) -> Result { let (w, h) = (image.width(), image.height()); @@ -943,9 +1023,7 @@ impl DecodedTensor { } _ => { // It is very annoying that DynamicImage is #[non_exhaustive] - return Err(TensorImageLoadError::UnsupportedImageColorType( - image.color(), - )); + return Err(TensorImageLoadError::UnsupportedImageColorType(image.color())); } }; let tensor = Tensor { @@ -953,7 +1031,7 @@ impl DecodedTensor { shape: vec![ TensorDimension::height(h as _), TensorDimension::width(w as _), - TensorDimension::depth(depth), + TensorDimension::depth(depth) ], data, meaning: TensorDataMeaning::Unknown, @@ -964,9 +1042,9 @@ impl DecodedTensor { pub fn try_decode(maybe_encoded_tensor: Tensor) -> Result { crate::profile_function!(); - + // NV12 get's decoded in the shader, so we don't need to do anything here. match &maybe_encoded_tensor.data { - TensorData::U8(_) + | TensorData::U8(_) | TensorData::U16(_) | TensorData::U32(_) | TensorData::U64(_) @@ -975,7 +1053,8 @@ impl DecodedTensor { | TensorData::I32(_) | TensorData::I64(_) | TensorData::F32(_) - | TensorData::F64(_) => Ok(Self(maybe_encoded_tensor)), + | TensorData::F64(_) + | TensorData::NV12(_) => Ok(Self(maybe_encoded_tensor)), TensorData::JPEG(buf) => { use image::io::Reader as ImageReader; @@ -1037,7 +1116,7 @@ fn test_ndarray() { TensorDimension { size: 2, name: None, - }, + } ], data: TensorData::U16(vec![1, 2, 3, 4].into()), meaning: TensorDataMeaning::Unknown, @@ -1053,7 +1132,7 @@ fn test_ndarray() { #[test] fn test_arrow() { - use arrow2_convert::{deserialize::TryIntoCollection, serialize::TryIntoArrow}; + use arrow2_convert::{ deserialize::TryIntoCollection, serialize::TryIntoArrow }; let tensors_in = vec![ Tensor { @@ -1075,7 +1154,7 @@ fn test_arrow() { data: TensorData::F32(vec![1.23, 2.45].into()), meaning: TensorDataMeaning::Unknown, meter: None, - }, + } ]; let array: Box = tensors_in.iter().try_into_arrow().unwrap(); diff --git a/crates/re_log_types/src/component_types/xlink_stats.rs b/crates/re_log_types/src/component_types/xlink_stats.rs index 82b5f3b5522c..8b6da1b10d2e 100644 --- a/crates/re_log_types/src/component_types/xlink_stats.rs +++ b/crates/re_log_types/src/component_types/xlink_stats.rs @@ -5,13 +5,16 @@ use arrow2_convert::{field::I128, ArrowDeserialize, ArrowField, ArrowSerialize}; // TODO(filip): Convert to use i128 /// Stats about the XLink connection throughput -#[derive(Clone, Debug, PartialEq, ArrowField, ArrowSerialize, ArrowDeserialize)] +#[derive(Clone, Copy, Debug, PartialEq, ArrowField, ArrowSerialize, ArrowDeserialize)] pub struct XlinkStats { /// Bytes read from the XLink by the host (PC) pub bytes_read: i64, /// Bytes written to the XLink by the host (PC) pub bytes_written: i64, + + /// Time in s from epoch when the stats were collected + pub timestamp: f64, } impl XlinkStats { diff --git a/crates/re_log_types/src/lib.rs b/crates/re_log_types/src/lib.rs index af76f0b2c4a8..b23bc29b4ab0 100644 --- a/crates/re_log_types/src/lib.rs +++ b/crates/re_log_types/src/lib.rs @@ -273,6 +273,7 @@ impl std::fmt::Display for PythonVersion { } type SysExePath = String; +type VenvSitePackages = String; #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))] @@ -280,7 +281,7 @@ pub enum RecordingSource { Unknown, /// The official Rerun Python Logging SDK - PythonSdk(PythonVersion, SysExePath), + PythonSdk(PythonVersion, SysExePath, VenvSitePackages), /// The official Rerun Rust Logging SDK RustSdk { @@ -296,7 +297,7 @@ impl std::fmt::Display for RecordingSource { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Unknown => "Unknown".fmt(f), - Self::PythonSdk(version, _) => write!(f, "Python {version} SDK"), + Self::PythonSdk(version, _, _) => write!(f, "Python {version} SDK"), Self::RustSdk { rustc_version: rust_version, llvm_version: _, diff --git a/crates/re_memory/src/memory_limit.rs b/crates/re_memory/src/memory_limit.rs index ec59cb66e48f..1c54aabbe008 100644 --- a/crates/re_memory/src/memory_limit.rs +++ b/crates/re_memory/src/memory_limit.rs @@ -5,14 +5,14 @@ pub struct MemoryLimit { /// This is primarily compared to what is reported by [`crate::AccountingAllocator`] ('counted'). /// We limit based on this instead of `resident` (RSS) because `counted` is what we have immediate /// control over, while RSS depends on what our allocator (MiMalloc) decides to do. - /// Default is Some(100MB) + /// Default is Some(500MB) pub limit: Option, } impl Default for MemoryLimit { fn default() -> Self { Self { - limit: re_format::parse_bytes("100MB"), + limit: re_format::parse_bytes("500MB"), } } } diff --git a/crates/re_renderer/shader/decodings.wgsl b/crates/re_renderer/shader/decodings.wgsl new file mode 100644 index 000000000000..4e774a135f9a --- /dev/null +++ b/crates/re_renderer/shader/decodings.wgsl @@ -0,0 +1,19 @@ +#import <./types.wgsl> + +fn decode_nv12(texture: texture_2d, in_tex_coords: Vec2) -> Vec4 { + let texture_dim = Vec2(textureDimensions(texture).xy); + let uv_offset = u32(floor(texture_dim.y / 1.5)); + let uv_row = u32(floor(in_tex_coords.y * texture_dim.y) / 2.0); + var uv_col = u32(floor(in_tex_coords.x * texture_dim.x / 2.0)) * 2u; + + let coords = UVec2(in_tex_coords * Vec2(texture_dim.x, texture_dim.y)); + let y = (f32(textureLoad(texture, coords, 0).r) - 16.0) / 219.0; + let u = (f32(textureLoad(texture, UVec2(u32(uv_col), uv_offset + uv_row), 0).r) - 128.0) / 224.0; + let v = (f32(textureLoad(texture, UVec2((u32(uv_col) + 1u), uv_offset + uv_row), 0).r) - 128.0) / 224.0; + + // Get RGB values and apply reverse gamma correction since we are rendering to sRGB framebuffer + let r = pow(y + 1.402 * v, 2.2); + let g = pow(y - (0.344 * u + 0.714 * v), 2.2); + let b = pow(y + 1.772 * u, 2.2); + return Vec4(r, g, b, 1.0); +} diff --git a/crates/re_renderer/shader/depth_cloud.wgsl b/crates/re_renderer/shader/depth_cloud.wgsl index db39dd6d20d2..fbd49c85bb4f 100644 --- a/crates/re_renderer/shader/depth_cloud.wgsl +++ b/crates/re_renderer/shader/depth_cloud.wgsl @@ -10,6 +10,16 @@ #import <./utils/size.wgsl> #import <./utils/sphere_quad.wgsl> #import <./utils/srgb.wgsl> +#import <./decodings.wgsl> + +const SAMPLE_TYPE_FLOAT_FILTER = 1u; +const SAMPLE_TYPE_FLOAT_NOFILTER = 2u; +const SAMPLE_TYPE_SINT_NOFILTER = 3u; +const SAMPLE_TYPE_UINT_NOFILTER = 4u; +// ------------------- +// Encoded textures +// ------------------- +const SAMPLE_TYPE_NV12 = 5u; // --- @@ -43,8 +53,11 @@ struct DepthCloudInfo { /// Configures color mapping mode, see `colormap.wgsl`. colormap: u32, - /// Is the albedo texture rgb or mono - albedo_color_space: u32, + /// Configures the sample type for the albedo texture. + albedo_sample_type: u32, + + /// Uint or filterable float. + depth_sample_type: u32, /// Changes between the opaque and outline draw-phases. radius_boost_in_ui_points: f32, @@ -57,11 +70,27 @@ const ALBEDO_COLOR_MONO: u32 = 1u; var depth_cloud_info: DepthCloudInfo; @group(1) @binding(1) -var depth_texture: texture_2d; +var depth_texture_float: texture_2d; -/// Only sampled if `DepthCloudInfo::colormap == ALBEDO_TEXTURE`. @group(1) @binding(2) -var albedo_texture: texture_2d; +var depth_texture_uint: texture_2d; + +// ----------------------------------------------- +// Different kinds of albedo textures +// Only sampled when colormap == ALBEDO_TEXTURE +// ----------------------------------------------- +@group(1) @binding(3) +var albedo_texture_float_nofilter: texture_2d; + +@group(1) @binding(4) +var albedo_texture_sint: texture_2d; + +@group(1) @binding(5) +var albedo_texture_uint: texture_2d; + +@group(1) @binding(6) +var albedo_texture_float_filterable: texture_2d; + struct VertexOut { @builtin(position) @@ -93,11 +122,19 @@ struct PointData { // Backprojects the depth texture using the intrinsics passed in the uniform buffer. fn compute_point_data(quad_idx: u32) -> PointData { - let wh = textureDimensions(depth_texture); - let texcoords = UVec2(quad_idx % wh.x, quad_idx / wh.x); - - // TODO(cmc): expose knobs to linearize/normalize/flip/cam-to-plane depth. - let world_space_depth = depth_cloud_info.world_depth_from_texture_value * textureLoad(depth_texture, texcoords, 0).x; + var wh: UVec2; + var texcoords: UVec2; + var world_space_depth: f32 = depth_cloud_info.world_depth_from_texture_value; + if depth_cloud_info.depth_sample_type == SAMPLE_TYPE_FLOAT_FILTER { + wh = textureDimensions(depth_texture_float); + // TODO(cmc): expose knobs to linearize/normalize/flip/cam-to-plane depth. + texcoords = UVec2(quad_idx % wh.x, quad_idx / wh.x); + world_space_depth = world_space_depth * textureLoad(depth_texture_float, texcoords, 0).x; + } else { + wh = textureDimensions(depth_texture_uint); + texcoords = UVec2(quad_idx % wh.x, quad_idx / wh.x); + world_space_depth = world_space_depth * f32(textureLoad(depth_texture_uint, texcoords, 0).x); + } var data: PointData; if 0.0 < world_space_depth && world_space_depth < f32max { @@ -106,15 +143,24 @@ fn compute_point_data(quad_idx: u32) -> PointData { var color: Vec4; if depth_cloud_info.colormap == ALBEDO_TEXTURE { - color = textureSampleLevel( - albedo_texture, - trilinear_sampler, - Vec2(texcoords) / Vec2(textureDimensions(albedo_texture)), - 0.0 - ); - if depth_cloud_info.albedo_color_space == ALBEDO_COLOR_MONO { - color = Vec4(linear_from_srgb(Vec3(color.r)), 1.0); - } + if depth_cloud_info.albedo_sample_type == SAMPLE_TYPE_NV12 { + color = decode_nv12(albedo_texture_uint, Vec2(f32(texcoords.x), f32(texcoords.y)) / Vec2(f32(wh.x), f32(wh.x))); + } else { // TODO(filip): Support all sample types like in rectangle_fs.wgsl + if depth_cloud_info.depth_sample_type == SAMPLE_TYPE_FLOAT_FILTER { + color = textureSampleLevel( + depth_texture_float, + trilinear_sampler, + Vec2(texcoords) / Vec2(textureDimensions(depth_texture_float)), + 0.0 + ); + } else { + color = Vec4(textureLoad( + depth_texture_uint, + texcoords, + 0 + )) / 255.0; + } + } } else { color = Vec4(colormap_srgb(depth_cloud_info.colormap, world_space_depth), 1.0); } diff --git a/crates/re_renderer/shader/rectangle.wgsl b/crates/re_renderer/shader/rectangle.wgsl index afd0de119ca2..8459532fd753 100644 --- a/crates/re_renderer/shader/rectangle.wgsl +++ b/crates/re_renderer/shader/rectangle.wgsl @@ -10,6 +10,11 @@ const SAMPLE_TYPE_FLOAT_FILTER = 1u; const SAMPLE_TYPE_FLOAT_NOFILTER = 2u; const SAMPLE_TYPE_SINT_NOFILTER = 3u; const SAMPLE_TYPE_UINT_NOFILTER = 4u; +// ------------------ +// Encoded textures +// ------------------ +const SAMPLE_TYPE_NV12 = 5u; + // How do we do colormapping? const COLOR_MAPPER_OFF = 1u; diff --git a/crates/re_renderer/shader/rectangle_fs.wgsl b/crates/re_renderer/shader/rectangle_fs.wgsl index 62f65952fe21..8cb214285d93 100644 --- a/crates/re_renderer/shader/rectangle_fs.wgsl +++ b/crates/re_renderer/shader/rectangle_fs.wgsl @@ -1,4 +1,5 @@ #import <./rectangle.wgsl> +#import <./decodings.wgsl> fn is_magnifying(pixel_coord: Vec2) -> bool { return fwidth(pixel_coord.x) < 1.0; @@ -64,6 +65,8 @@ fn fs_main(in: VertexOut) -> @location(0) Vec4 { let bottom = mix(v01, v11, fract(coord.x)); sampled_value = mix(top, bottom, fract(coord.y)); } + } else if rect_info.sample_type == SAMPLE_TYPE_NV12 { + sampled_value = decode_nv12(texture_uint, in.texcoord); } else { return ERROR_RGBA; // unknown sample type } diff --git a/crates/re_renderer/shader/rectangle_vs.wgsl b/crates/re_renderer/shader/rectangle_vs.wgsl index e0758c17c23a..3d30c441ba0a 100644 --- a/crates/re_renderer/shader/rectangle_vs.wgsl +++ b/crates/re_renderer/shader/rectangle_vs.wgsl @@ -4,11 +4,14 @@ fn vs_main(@builtin(vertex_index) v_idx: u32) -> VertexOut { let texcoord = Vec2(f32(v_idx / 2u), f32(v_idx % 2u)); let pos = texcoord.x * rect_info.extent_u + texcoord.y * rect_info.extent_v + - rect_info.top_left_corner_position; + rect_info.top_left_corner_position; var out: VertexOut; out.position = apply_depth_offset(frame.projection_from_world * Vec4(pos, 1.0), rect_info.depth_offset); + // out.texcoord = (texcoord.x * rect_info.extent_u + texcoord.y * rect_info.extent_v).xy; out.texcoord = texcoord; - + if rect_info.sample_type == SAMPLE_TYPE_NV12 { + out.texcoord.y *= (2.0 / 3.0); + } return out; } diff --git a/crates/re_renderer/src/renderer/depth_cloud.rs b/crates/re_renderer/src/renderer/depth_cloud.rs index 676d37e45391..3d2bce3114f6 100644 --- a/crates/re_renderer/src/renderer/depth_cloud.rs +++ b/crates/re_renderer/src/renderer/depth_cloud.rs @@ -16,34 +16,60 @@ use smallvec::smallvec; use crate::{ allocator::create_and_fill_uniform_buffer_batch, - draw_phases::{DrawPhase, OutlineMaskProcessor}, + draw_phases::{ DrawPhase, OutlineMaskProcessor }, include_shader_module, - resource_managers::{GpuTexture2D, ResourceManagerError}, + resource_managers::{ GpuTexture2D, ResourceManagerError }, view_builder::ViewBuilder, wgpu_resources::{ - BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroup, GpuBindGroupLayoutHandle, - GpuRenderPipelineHandle, GpuTexture, PipelineLayoutDesc, RenderPipelineDesc, TextureDesc, + BindGroupDesc, + BindGroupEntry, + BindGroupLayoutDesc, + GpuBindGroup, + GpuBindGroupLayoutHandle, + GpuRenderPipelineHandle, + GpuTexture, + PipelineLayoutDesc, + RenderPipelineDesc, + TextureDesc, }, - Colormap, OutlineMaskPreference, PickingLayerObjectId, PickingLayerProcessor, + Colormap, + OutlineMaskPreference, + PickingLayerObjectId, + PickingLayerProcessor, + texture_info, }; use super::{ - DrawData, FileResolver, FileSystem, RenderContext, Renderer, SharedRendererData, + DrawData, + FileResolver, + FileSystem, + RenderContext, + Renderer, + SharedRendererData, WgpuResourcePools, + ColormappedTexture, }; // --- - -#[derive(Debug, Clone, Copy)] -enum AlbedoColorSpace { - Rgb, - Mono, -} - mod gpu_data { - use crate::{wgpu_buffer_types, PickingLayerObjectId}; + use crate::{ + wgpu_buffer_types::{ self, U32RowPadded }, + PickingLayerObjectId, + renderer::TextureEncoding, + texture_info, + }; + + // Keep in sync with mirror in depth_cloud.wgsl - use super::{AlbedoColorSpace, DepthCloudAlbedoData}; + // Which texture to read from? + const SAMPLE_TYPE_FLOAT_FILTER: u32 = 1; + const SAMPLE_TYPE_FLOAT_NOFILTER: u32 = 2; + const SAMPLE_TYPE_SINT_NOFILTER: u32 = 3; + const SAMPLE_TYPE_UINT_NOFILTER: u32 = 4; + // ------------------ + // Encoded textures + // ------------------ + const SAMPLE_TYPE_NV12: u32 = 5; /// Keep in sync with mirror in `depth_cloud.wgsl.` #[repr(C, align(256))] @@ -69,19 +95,23 @@ mod gpu_data { /// Which colormap should be used. pub colormap: u32, - /// Is the albedo texture rgb or mono - pub albedo_color_space: wgpu_buffer_types::U32RowPadded, + /// Which texture sample to use + pub albedo_sample_type: U32RowPadded, + + /// Which texture sample to use + pub depth_sample_type: U32RowPadded, /// Changes over different draw-phases. pub radius_boost_in_ui_points: wgpu_buffer_types::F32RowPadded, - pub end_padding: [wgpu_buffer_types::PaddingRow; 16 - 4 - 3 - 1 - 1 - 1 - 1], + pub end_padding: [wgpu_buffer_types::PaddingRow; 16 - 4 - 3 - 1 - 1 - 1 - 1 - 1], } impl DepthCloudInfoUBO { pub fn from_depth_cloud( radius_boost_in_ui_points: f32, depth_cloud: &super::DepthCloud, + device_features: wgpu::Features ) -> Self { let super::DepthCloud { world_from_obj, @@ -90,14 +120,62 @@ mod gpu_data { point_radius_from_world_depth, max_depth_in_world, depth_dimensions: _, - depth_texture: _, + depth_texture, colormap, outline_mask_id, picking_object_id, - albedo_dimensions: _, - albedo_data: _, + albedo_texture, } = depth_cloud; + let albedo_sample_type = match albedo_texture { + Some(colormapped_texture) => { + match colormapped_texture.texture.format().sample_type(None) { + Some(wgpu::TextureSampleType::Float { .. }) => + match colormapped_texture.encoding { + Some(TextureEncoding::Nv12) => SAMPLE_TYPE_NV12, + _ => { + if + texture_info::is_float_filterable( + colormapped_texture.texture.format(), + device_features + ) + { + SAMPLE_TYPE_FLOAT_FILTER + } else { + SAMPLE_TYPE_FLOAT_NOFILTER + } + } + } + Some(wgpu::TextureSampleType::Uint) => { + match colormapped_texture.encoding { + Some(TextureEncoding::Nv12) => SAMPLE_TYPE_NV12, + _ => SAMPLE_TYPE_UINT_NOFILTER, + } + } + Some(wgpu::TextureSampleType::Sint) => SAMPLE_TYPE_SINT_NOFILTER, + _ => 0, + } + } + _ => { 0 } + }; + + let depth_sample_type = match depth_texture.texture.format().sample_type(None) { + Some(wgpu::TextureSampleType::Float { .. }) => { + if + texture_info::is_float_filterable( + depth_texture.texture.format(), + device_features + ) + { + SAMPLE_TYPE_FLOAT_FILTER + } else { + SAMPLE_TYPE_FLOAT_NOFILTER + } + } + Some(wgpu::TextureSampleType::Uint) => SAMPLE_TYPE_UINT_NOFILTER, + _ => panic!("Depth texture must be float or uint"), + }; + Self { world_from_obj: (*world_from_obj).into(), depth_camera_intrinsics: (*depth_camera_intrinsics).into(), @@ -106,36 +184,16 @@ mod gpu_data { point_radius_from_world_depth: *point_radius_from_world_depth, max_depth_in_world: *max_depth_in_world, colormap: *colormap as u32, - albedo_color_space: (depth_cloud - .albedo_data - .as_ref() - .map(|albedo_data| match albedo_data { - DepthCloudAlbedoData::Mono8(_) => AlbedoColorSpace::Mono, - _ => AlbedoColorSpace::Rgb, - }) - .unwrap_or(AlbedoColorSpace::Rgb) as u32) - .into(), radius_boost_in_ui_points: radius_boost_in_ui_points.into(), picking_layer_object_id: *picking_object_id, + albedo_sample_type: albedo_sample_type.into(), + depth_sample_type: depth_sample_type.into(), end_padding: Default::default(), } } } } -/// The raw data for the (optional) albedo texture. -// -// TODO(cmc): support more albedo data types. -// TODO(cmc): arrow buffers for u8... -#[derive(Debug, Clone)] -pub enum DepthCloudAlbedoData { - Rgb8(Vec), - Rgb8Srgb(Vec), - Rgba8(Vec), - Rgba8Srgb(Vec), - Mono8(Vec), -} - pub struct DepthCloud { /// The extrinsics of the camera used for the projection. pub world_from_obj: glam::Mat4, @@ -160,7 +218,7 @@ pub struct DepthCloud { /// The actual data for the depth texture. /// /// Only textures with sample type `Float` are supported. - pub depth_texture: GpuTexture2D, + pub depth_texture: ColormappedTexture, /// Configures color mapping mode. pub colormap: Colormap, @@ -171,15 +229,7 @@ pub struct DepthCloud { /// Picking object id that applies for the entire depth cloud. pub picking_object_id: PickingLayerObjectId, - /// The dimensions of the (optional) albedo texture in pixels. - /// - /// Irrelevant if [`Self::albedo_data`] isn't set. - pub albedo_dimensions: glam::UVec2, - - /// The actual data for the (optional) albedo texture. - /// - /// If set, takes precedence over [`Self::colormap`]. - pub albedo_data: Option, + pub albedo_texture: Option, } impl DepthCloud { @@ -237,35 +287,33 @@ impl DrawData for DepthCloudDrawData { #[derive(thiserror::Error, Debug)] pub enum DepthCloudDrawDataError { - #[error("Depth texture format was {0:?}, only formats with sample type float are supported")] - InvalidDepthTextureFormat(wgpu::TextureFormat), + #[error( + "Depth texture format was {0:?}, only formats with sample type float are supported" + )] InvalidDepthTextureFormat(wgpu::TextureFormat), - #[error(transparent)] - ResourceManagerError(#[from] ResourceManagerError), + #[error("Invalid albedo texture format {0:?}")] InvalidAlbedoTextureFormat(wgpu::TextureFormat), + + #[error(transparent)] ResourceManagerError(#[from] ResourceManagerError), } impl DepthCloudDrawData { pub fn new( ctx: &mut RenderContext, - depth_clouds: &DepthClouds, + depth_clouds: &DepthClouds ) -> Result { crate::profile_function!(); - let DepthClouds { - clouds: depth_clouds, - radius_boost_in_ui_points_for_outlines, - } = depth_clouds; + let DepthClouds { clouds: depth_clouds, radius_boost_in_ui_points_for_outlines } = + depth_clouds; - let bg_layout = ctx - .renderers + let bg_layout = ctx.renderers .write() .get_or_create::<_, DepthCloudRenderer>( &ctx.shared_renderer_data, &mut ctx.gpu_resources, &ctx.device, - &mut ctx.resolver, - ) - .bind_group_layout; + &mut ctx.resolver + ).bind_group_layout; if depth_clouds.is_empty() { return Ok(DepthCloudDrawData { @@ -276,91 +324,84 @@ impl DepthCloudDrawData { let depth_cloud_ubo_binding_outlines = create_and_fill_uniform_buffer_batch( ctx, "depth_cloud_ubos".into(), - depth_clouds.iter().map(|dc| { - gpu_data::DepthCloudInfoUBO::from_depth_cloud( - *radius_boost_in_ui_points_for_outlines, - dc, - ) - }), + depth_clouds + .iter() + .map(|dc| { + gpu_data::DepthCloudInfoUBO::from_depth_cloud( + *radius_boost_in_ui_points_for_outlines, + dc, + ctx.device.features() + ) + }) ); let depth_cloud_ubo_binding_opaque = create_and_fill_uniform_buffer_batch( ctx, "depth_cloud_ubos".into(), depth_clouds .iter() - .map(|dc| gpu_data::DepthCloudInfoUBO::from_depth_cloud(0.0, dc)), + .map(|dc| + gpu_data::DepthCloudInfoUBO::from_depth_cloud(0.0, dc, ctx.device.features()) + ) ); let mut instances = Vec::with_capacity(depth_clouds.len()); + let mut albedo_texture_float_filterable = + ctx.texture_manager_2d.zeroed_texture_float().handle; + let mut albedo_texture_float_nofilter = + ctx.texture_manager_2d.zeroed_texture_float().handle; + let mut albedo_texture_sint = ctx.texture_manager_2d.zeroed_texture_sint().handle; + let mut albedo_texture_uint = ctx.texture_manager_2d.zeroed_texture_uint().handle; + + let mut depth_texture_float = ctx.texture_manager_2d.zeroed_texture_float().handle; + let mut depth_texture_uint = ctx.texture_manager_2d.zeroed_texture_uint().handle; + for (depth_cloud, ubo_outlines, ubo_opaque) in itertools::izip!( depth_clouds, depth_cloud_ubo_binding_outlines, depth_cloud_ubo_binding_opaque ) { - if !matches!( - depth_cloud.depth_texture.format().sample_type(None), - Some(wgpu::TextureSampleType::Float { filterable: _ }) - ) { - return Err(DepthCloudDrawDataError::InvalidDepthTextureFormat( - depth_cloud.depth_texture.format(), - )); + let depth_texture = &depth_cloud.depth_texture.texture; + let depth_texture_format = depth_texture.creation_desc.format; + match depth_texture_format.sample_type(None) { + Some(wgpu::TextureSampleType::Float { .. }) => { + depth_texture_float = depth_texture.handle; + } + Some(wgpu::TextureSampleType::Uint) => { + depth_texture_uint = depth_texture.handle; + } + _ => { + return Err( + DepthCloudDrawDataError::InvalidDepthTextureFormat(depth_texture_format) + ); + } } - let albedo_texture = depth_cloud - .albedo_data - .as_ref() - .map_or_else(|| { - create_and_upload_texture( - ctx, - (1, 1).into(), - wgpu::TextureFormat::Rgba8Unorm, - [0u8; 4].as_slice(), - ) - }, |data| match data { - DepthCloudAlbedoData::Rgba8(data) => create_and_upload_texture( - ctx, - depth_cloud.albedo_dimensions, - wgpu::TextureFormat::Rgba8Unorm, - data.as_slice(), - ), - DepthCloudAlbedoData::Rgba8Srgb(data) => create_and_upload_texture( - ctx, - depth_cloud.albedo_dimensions, - wgpu::TextureFormat::Rgba8UnormSrgb, - data.as_slice(), - ), - DepthCloudAlbedoData::Rgb8(data) => { - let data = data - .chunks(3) - .into_iter() - .flat_map(|c| [c[0], c[1], c[2], 255]) - .collect_vec(); - create_and_upload_texture( - ctx, - depth_cloud.albedo_dimensions, - wgpu::TextureFormat::Rgba8Unorm, - data.as_slice(), - ) + if + let Some(albedo_texture) = depth_cloud.albedo_texture + .as_ref() + .and_then(|t| Some(&t.texture)) + { + let texture_format = albedo_texture.creation_desc.format; + match texture_format.sample_type(None) { + Some(wgpu::TextureSampleType::Float { .. }) => { + if texture_info::is_float_filterable(texture_format, ctx.device.features()) { + albedo_texture_float_filterable = albedo_texture.handle; + } else { + albedo_texture_float_nofilter = albedo_texture.handle; + } } - DepthCloudAlbedoData::Rgb8Srgb(data) => { - let data = data - .chunks(3) - .into_iter() - .flat_map(|c| [c[0], c[1], c[2], 255]) - .collect_vec(); - create_and_upload_texture( - ctx, - depth_cloud.albedo_dimensions, - wgpu::TextureFormat::Rgba8UnormSrgb, - data.as_slice(), - ) + Some(wgpu::TextureSampleType::Sint) => { + albedo_texture_sint = albedo_texture.handle; + } + Some(wgpu::TextureSampleType::Uint) => { + albedo_texture_uint = albedo_texture.handle; + } + _ => { + return Err( + DepthCloudDrawDataError::InvalidAlbedoTextureFormat(texture_format) + ); } - DepthCloudAlbedoData::Mono8(data) => create_and_upload_texture( - ctx, - depth_cloud.albedo_dimensions, - wgpu::TextureFormat::R8Unorm, - data.as_slice(), - ), - }); + } + } let mk_bind_group = |label, ubo: BindGroupEntry| { ctx.gpu_resources.bind_groups.alloc( @@ -370,11 +411,15 @@ impl DepthCloudDrawData { label, entries: smallvec![ ubo, - BindGroupEntry::DefaultTextureView(depth_cloud.depth_texture.handle), - BindGroupEntry::DefaultTextureView(albedo_texture.handle) + BindGroupEntry::DefaultTextureView(depth_texture_float), + BindGroupEntry::DefaultTextureView(depth_texture_uint), + BindGroupEntry::DefaultTextureView(albedo_texture_float_nofilter), + BindGroupEntry::DefaultTextureView(albedo_texture_sint), + BindGroupEntry::DefaultTextureView(albedo_texture_uint), + BindGroupEntry::DefaultTextureView(albedo_texture_float_filterable) ], layout: bg_layout, - }), + }) ) }; @@ -393,54 +438,6 @@ impl DepthCloudDrawData { } } -fn create_and_upload_texture( - ctx: &RenderContext, - dimensions: glam::UVec2, - format: wgpu::TextureFormat, - data: &[T], -) -> GpuTexture { - crate::profile_function!(); - - let texture_size = wgpu::Extent3d { - width: dimensions.x, - height: dimensions.y, - depth_or_array_layers: 1, - }; - let texture_desc = TextureDesc { - label: "texture".into(), - size: texture_size, - mip_level_count: 1, - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - format, - usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST, - }; - let texture = ctx.gpu_resources.textures.alloc(&ctx.device, &texture_desc); - - let format_info = texture_desc.format; - let width_blocks = dimensions.x / (format_info.block_dimensions().0 as u32); - - let mut texture_staging = ctx.cpu_write_gpu_read_belt.lock().allocate::( - &ctx.device, - &ctx.gpu_resources.buffers, - data.len(), - ); - texture_staging.extend_from_slice(data); - - texture_staging.copy_to_texture2d( - ctx.active_frame.before_view_builder_encoder.lock().get(), - wgpu::ImageCopyTexture { - texture: &texture.inner.texture, - mip_level: 0, - origin: wgpu::Origin3d::ZERO, - aspect: wgpu::TextureAspect::All, - }, - glam::UVec2::new(texture_size.width, texture_size.height), - ); - - texture -} - pub struct DepthCloudRenderer { render_pipeline_color: GpuRenderPipelineHandle, render_pipeline_picking_layer: GpuRenderPipelineHandle, @@ -452,18 +449,14 @@ impl Renderer for DepthCloudRenderer { type RendererDrawData = DepthCloudDrawData; fn participated_phases() -> &'static [DrawPhase] { - &[ - DrawPhase::Opaque, - DrawPhase::PickingLayer, - DrawPhase::OutlineMask, - ] + &[DrawPhase::Opaque, DrawPhase::PickingLayer, DrawPhase::OutlineMask] } fn create_renderer( shared_data: &SharedRendererData, pools: &mut WgpuResourcePools, device: &wgpu::Device, - resolver: &mut FileResolver, + resolver: &mut FileResolver ) -> Self { crate::profile_function!(); @@ -478,8 +471,9 @@ impl Renderer for DepthCloudRenderer { ty: wgpu::BindingType::Buffer { ty: wgpu::BufferBindingType::Uniform, has_dynamic_offset: false, - min_binding_size: (std::mem::size_of::() - as u64) + min_binding_size: ( + std::mem::size_of::() as u64 + ) .try_into() .ok(), }, @@ -489,7 +483,7 @@ impl Renderer for DepthCloudRenderer { binding: 1, visibility: wgpu::ShaderStages::VERTEX, ty: wgpu::BindingType::Texture { - sample_type: wgpu::TextureSampleType::Float { filterable: false }, + sample_type: wgpu::TextureSampleType::Float { filterable: true }, view_dimension: wgpu::TextureViewDimension::D2, multisampled: false, }, @@ -499,14 +493,54 @@ impl Renderer for DepthCloudRenderer { binding: 2, visibility: wgpu::ShaderStages::VERTEX, ty: wgpu::BindingType::Texture { - sample_type: wgpu::TextureSampleType::Float { filterable: true }, + sample_type: wgpu::TextureSampleType::Uint, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 3, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: false }, view_dimension: wgpu::TextureViewDimension::D2, multisampled: false, }, count: None, }, + wgpu::BindGroupLayoutEntry { + binding: 4, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Sint, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 5, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Uint, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + }, + wgpu::BindGroupLayoutEntry { + binding: 6, + visibility: wgpu::ShaderStages::VERTEX, + ty: wgpu::BindingType::Texture { + sample_type: wgpu::TextureSampleType::Float { filterable: true }, + view_dimension: wgpu::TextureViewDimension::D2, + multisampled: false, + }, + count: None, + } ], - }), + }) ); let pipeline_layout = pools.pipeline_layouts.get_or_create( @@ -515,13 +549,13 @@ impl Renderer for DepthCloudRenderer { label: "depth_cloud_rp_layout".into(), entries: vec![shared_data.global_bindings.layout, bind_group_layout], }), - &pools.bind_group_layouts, + &pools.bind_group_layouts ); let shader_module = pools.shader_modules.get_or_create( device, resolver, - &include_shader_module!("../../shader/depth_cloud.wgsl"), + &include_shader_module!("../../shader/depth_cloud.wgsl") ); let render_pipeline_desc_color = RenderPipelineDesc { @@ -549,7 +583,7 @@ impl Renderer for DepthCloudRenderer { device, &render_pipeline_desc_color, &pools.pipeline_layouts, - &pools.shader_modules, + &pools.shader_modules ); let render_pipeline_picking_layer = pools.render_pipelines.get_or_create( device, @@ -562,7 +596,7 @@ impl Renderer for DepthCloudRenderer { ..render_pipeline_desc_color.clone() }), &pools.pipeline_layouts, - &pools.shader_modules, + &pools.shader_modules ); let render_pipeline_outline_mask = pools.render_pipelines.get_or_create( device, @@ -573,12 +607,12 @@ impl Renderer for DepthCloudRenderer { depth_stencil: OutlineMaskProcessor::MASK_DEPTH_STATE, // Alpha to coverage doesn't work with the mask integer target. multisample: OutlineMaskProcessor::mask_default_msaa_state( - shared_data.config.hardware_tier, + shared_data.config.hardware_tier ), ..render_pipeline_desc_color }), &pools.pipeline_layouts, - &pools.shader_modules, + &pools.shader_modules ); DepthCloudRenderer { @@ -594,7 +628,7 @@ impl Renderer for DepthCloudRenderer { pools: &'a WgpuResourcePools, phase: DrawPhase, pass: &mut wgpu::RenderPass<'a>, - draw_data: &'a Self::RendererDrawData, + draw_data: &'a Self::RendererDrawData ) -> anyhow::Result<()> { crate::profile_function!(); if draw_data.instances.is_empty() { diff --git a/crates/re_renderer/src/renderer/mod.rs b/crates/re_renderer/src/renderer/mod.rs index b8b2bc508968..9e900498944c 100644 --- a/crates/re_renderer/src/renderer/mod.rs +++ b/crates/re_renderer/src/renderer/mod.rs @@ -15,7 +15,7 @@ pub use point_cloud::{ mod depth_cloud; pub use self::depth_cloud::{ - DepthCloud, DepthCloudAlbedoData, DepthCloudDrawData, DepthCloudRenderer, DepthClouds, + DepthCloud, DepthCloudDrawData, DepthCloudRenderer, DepthClouds, }; mod test_triangle; @@ -24,7 +24,7 @@ pub use test_triangle::TestTriangleDrawData; mod rectangles; pub use rectangles::{ ColorMapper, ColormappedTexture, RectangleDrawData, RectangleOptions, TextureFilterMag, - TextureFilterMin, TexturedRect, + TextureFilterMin, TexturedRect, TextureEncoding }; mod mesh_renderer; diff --git a/crates/re_renderer/src/renderer/rectangles.rs b/crates/re_renderer/src/renderer/rectangles.rs index 8b380a30a2bd..3a84ab0057ca 100644 --- a/crates/re_renderer/src/renderer/rectangles.rs +++ b/crates/re_renderer/src/renderer/rectangles.rs @@ -10,26 +10,41 @@ //! Since we're not allowed to bind many textures at once (no widespread bindless support!), //! we are forced to have individual bind groups per rectangle and thus a draw call per rectangle. -use itertools::{izip, Itertools as _}; +use itertools::{ izip, Itertools as _ }; use smallvec::smallvec; use crate::{ allocator::create_and_fill_uniform_buffer_batch, depth_offset::DepthOffset, - draw_phases::{DrawPhase, OutlineMaskProcessor}, + draw_phases::{ DrawPhase, OutlineMaskProcessor }, include_shader_module, - resource_managers::{GpuTexture2D, ResourceManagerError}, + resource_managers::{ GpuTexture2D, ResourceManagerError }, texture_info, view_builder::ViewBuilder, wgpu_resources::{ - BindGroupDesc, BindGroupEntry, BindGroupLayoutDesc, GpuBindGroup, GpuBindGroupLayoutHandle, - GpuRenderPipelineHandle, PipelineLayoutDesc, RenderPipelineDesc, SamplerDesc, + BindGroupDesc, + BindGroupEntry, + BindGroupLayoutDesc, + GpuBindGroup, + GpuBindGroupLayoutHandle, + GpuRenderPipelineHandle, + PipelineLayoutDesc, + RenderPipelineDesc, + SamplerDesc, }, - Colormap, OutlineMaskPreference, PickingLayerProcessor, Rgba, + Colormap, + OutlineMaskPreference, + PickingLayerProcessor, + Rgba, }; use super::{ - DrawData, FileResolver, FileSystem, RenderContext, Renderer, SharedRendererData, + DrawData, + FileResolver, + FileSystem, + RenderContext, + Renderer, + SharedRendererData, WgpuResourcePools, }; @@ -49,11 +64,21 @@ pub enum TextureFilterMin { // TODO(andreas): Offer mipmapping here? } +#[derive(Clone, Debug, PartialEq)] +pub enum TextureEncoding { + Mono, + Rgb, + Rgba, + Nv12, +} + /// Describes a texture and how to map it to a color. #[derive(Clone)] pub struct ColormappedTexture { pub texture: GpuTexture2D, + pub encoding: Option, + /// Min/max range of the values in the texture. /// Used to normalize the input values (squash them to the 0-1 range). pub range: [f32; 2], @@ -95,6 +120,22 @@ impl ColormappedTexture { range: [0.0, 1.0], gamma: 1.0, color_mapper: None, + encoding: None, + } + } + + /// Calculate the real texture width and height, + /// taking into account the texture's encoding. + pub fn width_height(&self) -> [u32; 2] { + let texture_dim = self.texture.width_height(); + match &self.encoding { + &Some(TextureEncoding::Nv12) => { + let real_dim = + glam::Vec2::new(texture_dim[0] as f32, texture_dim[1] as f32) * + glam::Vec2::new(1.0, 2.0 / 3.0); + [real_dim.x as u32, real_dim.y as u32] + } + _ => texture_dim, } } } @@ -144,11 +185,9 @@ impl Default for RectangleOptions { #[derive(thiserror::Error, Debug)] pub enum RectangleError { - #[error(transparent)] - ResourceManagerError(#[from] ResourceManagerError), + #[error(transparent)] ResourceManagerError(#[from] ResourceManagerError), - #[error("Texture required special features: {0:?}")] - SpecialFeatures(wgpu::Features), + #[error("Texture required special features: {0:?}")] SpecialFeatures(wgpu::Features), // There's really no need for users to be able to sample depth textures. // We don't get filtering of depth textures any way. @@ -158,20 +197,21 @@ pub enum RectangleError { #[error("Color mapping is being applied to a four-component RGBA texture")] ColormappingRgbaTexture, - #[error("Only 1 and 4 component textures are supported, got {0} components")] - UnsupportedComponentCount(u8), + #[error( + "Only 1 and 4 component textures are supported, got {0} components" + )] UnsupportedComponentCount(u8), #[error("No color mapper was supplied for this 1-component texture")] MissingColorMapper, - #[error("Invalid color map texture format: {0:?}")] - UnsupportedColormapTextureFormat(wgpu::TextureFormat), + #[error("Invalid color map texture format: {0:?}")] UnsupportedColormapTextureFormat( + wgpu::TextureFormat, + ), } mod gpu_data { - use crate::{texture_info, wgpu_buffer_types}; - - use super::{ColorMapper, RectangleError, TexturedRect}; + use super::{ ColorMapper, RectangleError, TextureEncoding, TexturedRect }; + use crate::{ texture_info, wgpu_buffer_types }; // Keep in sync with mirror in rectangle.wgsl @@ -180,6 +220,10 @@ mod gpu_data { const SAMPLE_TYPE_FLOAT_NOFILTER: u32 = 2; const SAMPLE_TYPE_SINT_NOFILTER: u32 = 3; const SAMPLE_TYPE_UINT_NOFILTER: u32 = 4; + // ------------------ + // Encoded textures + // ------------------ + const SAMPLE_TYPE_NV12: u32 = 5; // How do we do colormapping? const COLOR_MAPPER_OFF: u32 = 1; @@ -219,7 +263,7 @@ mod gpu_data { impl UniformBuffer { pub fn from_textured_rect( rectangle: &super::TexturedRect, - device_features: wgpu::Features, + device_features: wgpu::Features ) -> Result { let texture_format = rectangle.colormapped_texture.texture.format(); @@ -236,6 +280,7 @@ mod gpu_data { range, gamma, color_mapper, + encoding, } = colormapped_texture; let super::RectangleOptions { @@ -247,15 +292,20 @@ mod gpu_data { } = options; let sample_type = match texture_format.sample_type(None) { - Some(wgpu::TextureSampleType::Float { .. }) => { - if texture_info::is_float_filterable(texture_format, device_features) { - SAMPLE_TYPE_FLOAT_FILTER - } else { - SAMPLE_TYPE_FLOAT_NOFILTER - } + Some(wgpu::TextureSampleType::Float { .. }) => if + texture_info::is_float_filterable(texture_format, device_features) + { + SAMPLE_TYPE_FLOAT_FILTER + } else { + SAMPLE_TYPE_FLOAT_NOFILTER } Some(wgpu::TextureSampleType::Sint) => SAMPLE_TYPE_SINT_NOFILTER, - Some(wgpu::TextureSampleType::Uint) => SAMPLE_TYPE_UINT_NOFILTER, + Some(wgpu::TextureSampleType::Uint) => { + match encoding { + Some(TextureEncoding::Nv12) => SAMPLE_TYPE_NV12, + _ => SAMPLE_TYPE_UINT_NOFILTER, + } + } _ => { return Err(RectangleError::DepthTexturesNotSupported); } @@ -265,18 +315,22 @@ mod gpu_data { let color_mapper_int; match texture_info::num_texture_components(texture_format) { - 1 => match color_mapper { - Some(ColorMapper::Function(colormap)) => { - color_mapper_int = COLOR_MAPPER_FUNCTION; - colormap_function = *colormap as u32; - } - Some(ColorMapper::Texture(_)) => { - color_mapper_int = COLOR_MAPPER_TEXTURE; - } - None => { - return Err(RectangleError::MissingColorMapper); + 1 => + match color_mapper { + Some(ColorMapper::Function(colormap)) => { + color_mapper_int = COLOR_MAPPER_FUNCTION; + colormap_function = *colormap as u32; + } + Some(ColorMapper::Texture(_)) => { + color_mapper_int = COLOR_MAPPER_TEXTURE; + } + None => { + if encoding != &Some(TextureEncoding::Nv12) { + return Err(RectangleError::MissingColorMapper); + } + color_mapper_int = COLOR_MAPPER_OFF; + } } - }, 4 => { if color_mapper.is_some() { return Err(RectangleError::ColormappingRgbaTexture); @@ -285,7 +339,7 @@ mod gpu_data { } } num_components => { - return Err(RectangleError::UnsupportedComponentCount(num_components)) + return Err(RectangleError::UnsupportedComponentCount(num_components)); } } @@ -297,7 +351,6 @@ mod gpu_data { super::TextureFilterMag::Linear => FILTER_BILINEAR, super::TextureFilterMag::Nearest => FILTER_NEAREST, }; - Ok(Self { top_left_corner_position: (*top_left_corner_position).into(), colormap_function, @@ -336,7 +389,7 @@ impl DrawData for RectangleDrawData { impl RectangleDrawData { pub fn new( ctx: &mut RenderContext, - rectangles: &[TexturedRect], + rectangles: &[TexturedRect] ) -> Result { crate::profile_function!(); @@ -345,7 +398,7 @@ impl RectangleDrawData { &ctx.shared_renderer_data, &mut ctx.gpu_resources, &ctx.device, - &mut ctx.resolver, + &mut ctx.resolver ); if rectangles.is_empty() { @@ -363,7 +416,7 @@ impl RectangleDrawData { let uniform_buffer_bindings = create_and_fill_uniform_buffer_batch( ctx, "rectangle uniform buffers".into(), - uniform_buffers.into_iter(), + uniform_buffers.into_iter() ); let mut instances = Vec::with_capacity(rectangles.len()); @@ -371,12 +424,12 @@ impl RectangleDrawData { let options = &rectangle.options; let sampler = ctx.gpu_resources.samplers.get_or_create( &ctx.device, - &SamplerDesc { + &(SamplerDesc { label: format!( "rectangle sampler mag {:?} min {:?}", - options.texture_filter_magnification, options.texture_filter_minification - ) - .into(), + options.texture_filter_magnification, + options.texture_filter_minification + ).into(), mag_filter: match options.texture_filter_magnification { TextureFilterMag::Linear => wgpu::FilterMode::Linear, TextureFilterMag::Nearest => wgpu::FilterMode::Nearest, @@ -387,15 +440,13 @@ impl RectangleDrawData { }, mipmap_filter: wgpu::FilterMode::Nearest, ..Default::default() - }, + }) ); let texture = &rectangle.colormapped_texture.texture; let texture_format = texture.creation_desc.format; if texture_format.required_features() != Default::default() { - return Err(RectangleError::SpecialFeatures( - texture_format.required_features(), - )); + return Err(RectangleError::SpecialFeatures(texture_format.required_features())); } // We set up several texture sources, then instruct the shader to read from at most one of them. @@ -424,8 +475,8 @@ impl RectangleDrawData { } // We also set up an optional colormap texture. - let colormap_texture = if let Some(ColorMapper::Texture(handle)) = - &rectangle.colormapped_texture.color_mapper + let colormap_texture = if + let Some(ColorMapper::Texture(handle)) = &rectangle.colormapped_texture.color_mapper { let format = handle.format(); if format != wgpu::TextureFormat::Rgba8UnormSrgb { @@ -440,7 +491,7 @@ impl RectangleDrawData { bind_group: ctx.gpu_resources.bind_groups.alloc( &ctx.device, &ctx.gpu_resources, - &BindGroupDesc { + &(BindGroupDesc { label: "RectangleInstance::bind_group".into(), entries: smallvec![ uniform_buffer, @@ -449,10 +500,10 @@ impl RectangleDrawData { BindGroupEntry::DefaultTextureView(texture_sint), BindGroupEntry::DefaultTextureView(texture_uint), BindGroupEntry::DefaultTextureView(colormap_texture), - BindGroupEntry::DefaultTextureView(texture_float_filterable), + BindGroupEntry::DefaultTextureView(texture_float_filterable) ], layout: rectangle_renderer.bind_group_layout, - }, + }) ), draw_outline_mask: rectangle.options.outline_mask.is_some(), }); @@ -476,13 +527,13 @@ impl Renderer for RectangleRenderer { shared_data: &SharedRendererData, pools: &mut WgpuResourcePools, device: &wgpu::Device, - resolver: &mut FileResolver, + resolver: &mut FileResolver ) -> Self { crate::profile_function!(); let bind_group_layout = pools.bind_group_layouts.get_or_create( device, - &BindGroupLayoutDesc { + &(BindGroupLayoutDesc { label: "RectangleRenderer::bind_group_layout".into(), entries: vec![ wgpu::BindGroupLayoutEntry { @@ -493,8 +544,9 @@ impl Renderer for RectangleRenderer { // We could use dynamic offset here into a single large buffer. // But we have to set a new texture anyways and its doubtful that splitting the bind group is of any use. has_dynamic_offset: false, - min_binding_size: (std::mem::size_of::() - as u64) + min_binding_size: ( + std::mem::size_of::() as u64 + ) .try_into() .ok(), }, @@ -561,29 +613,29 @@ impl Renderer for RectangleRenderer { multisampled: false, }, count: None, - }, + } ], - }, + }) ); let pipeline_layout = pools.pipeline_layouts.get_or_create( device, - &PipelineLayoutDesc { + &(PipelineLayoutDesc { label: "RectangleRenderer::pipeline_layout".into(), entries: vec![shared_data.global_bindings.layout, bind_group_layout], - }, - &pools.bind_group_layouts, + }), + &pools.bind_group_layouts ); let shader_module_vs = pools.shader_modules.get_or_create( device, resolver, - &include_shader_module!("../../shader/rectangle_vs.wgsl"), + &include_shader_module!("../../shader/rectangle_vs.wgsl") ); let shader_module_fs = pools.shader_modules.get_or_create( device, resolver, - &include_shader_module!("../../shader/rectangle_fs.wgsl"), + &include_shader_module!("../../shader/rectangle_fs.wgsl") ); let render_pipeline_desc_color = RenderPipelineDesc { @@ -594,12 +646,14 @@ impl Renderer for RectangleRenderer { fragment_entrypoint: "fs_main".into(), fragment_handle: shader_module_fs, vertex_buffers: smallvec![], - render_targets: smallvec![Some(wgpu::ColorTargetState { - format: ViewBuilder::MAIN_TARGET_COLOR_FORMAT, - // TODO(andreas): have two render pipelines, an opaque one and a transparent one. Transparent shouldn't write depth! - blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING), - write_mask: wgpu::ColorWrites::ALL, - })], + render_targets: smallvec![ + Some(wgpu::ColorTargetState { + format: ViewBuilder::MAIN_TARGET_COLOR_FORMAT, + // TODO(andreas): have two render pipelines, an opaque one and a transparent one. Transparent shouldn't write depth! + blend: Some(wgpu::BlendState::PREMULTIPLIED_ALPHA_BLENDING), + write_mask: wgpu::ColorWrites::ALL, + }) + ], primitive: wgpu::PrimitiveState { topology: wgpu::PrimitiveTopology::TriangleStrip, cull_mode: None, @@ -612,35 +666,35 @@ impl Renderer for RectangleRenderer { device, &render_pipeline_desc_color, &pools.pipeline_layouts, - &pools.shader_modules, + &pools.shader_modules ); let render_pipeline_picking_layer = pools.render_pipelines.get_or_create( device, - &RenderPipelineDesc { + &(RenderPipelineDesc { label: "RectangleRenderer::render_pipeline_picking_layer".into(), fragment_entrypoint: "fs_main_picking_layer".into(), render_targets: smallvec![Some(PickingLayerProcessor::PICKING_LAYER_FORMAT.into())], depth_stencil: PickingLayerProcessor::PICKING_LAYER_DEPTH_STATE, multisample: PickingLayerProcessor::PICKING_LAYER_MSAA_STATE, ..render_pipeline_desc_color.clone() - }, + }), &pools.pipeline_layouts, - &pools.shader_modules, + &pools.shader_modules ); let render_pipeline_outline_mask = pools.render_pipelines.get_or_create( device, - &RenderPipelineDesc { + &(RenderPipelineDesc { label: "RectangleRenderer::render_pipeline_outline_mask".into(), fragment_entrypoint: "fs_main_outline_mask".into(), render_targets: smallvec![Some(OutlineMaskProcessor::MASK_FORMAT.into())], depth_stencil: OutlineMaskProcessor::MASK_DEPTH_STATE, multisample: OutlineMaskProcessor::mask_default_msaa_state( - shared_data.config.hardware_tier, + shared_data.config.hardware_tier ), ..render_pipeline_desc_color - }, + }), &pools.pipeline_layouts, - &pools.shader_modules, + &pools.shader_modules ); RectangleRenderer { @@ -656,7 +710,7 @@ impl Renderer for RectangleRenderer { pools: &'a WgpuResourcePools, phase: DrawPhase, pass: &mut wgpu::RenderPass<'a>, - draw_data: &'a Self::RendererDrawData, + draw_data: &'a Self::RendererDrawData ) -> anyhow::Result<()> { crate::profile_function!(); if draw_data.instances.is_empty() { @@ -686,10 +740,6 @@ impl Renderer for RectangleRenderer { fn participated_phases() -> &'static [DrawPhase] { // TODO(andreas): This a hack. We have both opaque and transparent. - &[ - DrawPhase::OutlineMask, - DrawPhase::Opaque, - DrawPhase::PickingLayer, - ] + &[DrawPhase::OutlineMask, DrawPhase::Opaque, DrawPhase::PickingLayer] } } diff --git a/crates/re_renderer/src/workspace_shaders.rs b/crates/re_renderer/src/workspace_shaders.rs index 7d999da73645..845b90623493 100644 --- a/crates/re_renderer/src/workspace_shaders.rs +++ b/crates/re_renderer/src/workspace_shaders.rs @@ -37,6 +37,12 @@ pub fn init() { fs.create_file(virtpath, content).unwrap(); } + { + let virtpath = Path::new("shader/decodings.wgsl"); + let content = include_str!("../shader/decodings.wgsl").into(); + fs.create_file(virtpath, content).unwrap(); + } + { let virtpath = Path::new("shader/depth_cloud.wgsl"); let content = include_str!("../shader/depth_cloud.wgsl").into(); diff --git a/crates/re_sdk/src/lib.rs b/crates/re_sdk/src/lib.rs index a7e7fe27607f..bddf42773f19 100644 --- a/crates/re_sdk/src/lib.rs +++ b/crates/re_sdk/src/lib.rs @@ -77,7 +77,8 @@ pub mod components { EncodedMesh3D, InstanceKey, KeypointId, Label, LineStrip2D, LineStrip3D, Mat3x3, Mesh3D, MeshFormat, MeshId, Pinhole, Point2D, Point3D, Quaternion, Radius, RawMesh3D, Rect2D, Rigid3, Scalar, ScalarPlotProps, Size3D, Tensor, TensorData, TensorDataMeaning, - TensorDimension, TensorId, TextEntry, Transform, Vec2D, Vec3D, Vec4D, ViewCoordinates, + TensorDimension, TensorId, TextEntry, Transform, Vec2D, Vec3D, Vec4D, + ViewCoordinates, }; } diff --git a/crates/re_ui/data/icons/app_icon_mac.png b/crates/re_ui/data/icons/app_icon_mac.png index 80df0b007d75..1b951089cf2a 100644 Binary files a/crates/re_ui/data/icons/app_icon_mac.png and b/crates/re_ui/data/icons/app_icon_mac.png differ diff --git a/crates/re_ui/data/icons/app_icon_windows.png b/crates/re_ui/data/icons/app_icon_windows.png index fd3787d6dce8..1b951089cf2a 100644 Binary files a/crates/re_ui/data/icons/app_icon_windows.png and b/crates/re_ui/data/icons/app_icon_windows.png differ diff --git a/crates/re_ui/data/icons/rerun_menu.png b/crates/re_ui/data/icons/rerun_menu.png index b74059bf3dfa..decd05d31ed4 100644 Binary files a/crates/re_ui/data/icons/rerun_menu.png and b/crates/re_ui/data/icons/rerun_menu.png differ diff --git a/crates/re_ui/data/logo_dark_mode.png b/crates/re_ui/data/logo_dark_mode.png index 1b951089cf2a..decd05d31ed4 100644 Binary files a/crates/re_ui/data/logo_dark_mode.png and b/crates/re_ui/data/logo_dark_mode.png differ diff --git a/crates/re_ui/data/logo_light_mode.png b/crates/re_ui/data/logo_light_mode.png index 1b951089cf2a..decd05d31ed4 100644 Binary files a/crates/re_ui/data/logo_light_mode.png and b/crates/re_ui/data/logo_light_mode.png differ diff --git a/crates/re_ui/src/command.rs b/crates/re_ui/src/command.rs index f41bf4fa43b0..f38ae44e7110 100644 --- a/crates/re_ui/src/command.rs +++ b/crates/re_ui/src/command.rs @@ -40,12 +40,11 @@ pub enum Command { SelectionNext, ToggleCommandPalette, - // Playback: - PlaybackTogglePlayPause, - PlaybackStepBack, - PlaybackStepForward, - PlaybackRestart, + // PlaybackTogglePlayPause, + // PlaybackStepBack, + // PlaybackStepForward, + // PlaybackRestart, } impl Command { @@ -114,18 +113,18 @@ impl Command { ("Command palette…", "Toggle the command palette window") } - Command::PlaybackTogglePlayPause => { - ("Toggle play/pause", "Either play or pause the time") - } - Command::PlaybackStepBack => ( - "Step time back", - "Move the time marker back to the previous point in time with any data", - ), - Command::PlaybackStepForward => ( - "Step time forward", - "Move the time marker to the next point in time with any data", - ), - Command::PlaybackRestart => ("Restart", "Restart from beginning of timeline"), + // Command::PlaybackTogglePlayPause => { + // ("Toggle play/pause", "Either play or pause the time") + // } + // Command::PlaybackStepBack => ( + // "Step time back", + // "Move the time marker back to the previous point in time with any data", + // ), + // Command::PlaybackStepForward => ( + // "Step time forward", + // "Move the time marker to the next point in time with any data", + // ), + // Command::PlaybackRestart => ("Restart", "Restart from beginning of timeline"), } } @@ -181,11 +180,10 @@ impl Command { Command::SelectionPrevious => Some(ctrl_shift(Key::ArrowLeft)), Command::SelectionNext => Some(ctrl_shift(Key::ArrowRight)), Command::ToggleCommandPalette => Some(cmd(Key::P)), - - Command::PlaybackTogglePlayPause => Some(key(Key::Space)), - Command::PlaybackStepBack => Some(key(Key::ArrowLeft)), - Command::PlaybackStepForward => Some(key(Key::ArrowRight)), - Command::PlaybackRestart => Some(cmd(Key::ArrowLeft)), + // Command::PlaybackTogglePlayPause => Some(key(Key::Space)), + // Command::PlaybackStepBack => Some(key(Key::ArrowLeft)), + // Command::PlaybackStepForward => Some(key(Key::ArrowRight)), + // Command::PlaybackRestart => Some(cmd(Key::ArrowLeft)), } } diff --git a/crates/re_ui/src/lib.rs b/crates/re_ui/src/lib.rs index a823ec04dfdd..69324b333f85 100644 --- a/crates/re_ui/src/lib.rs +++ b/crates/re_ui/src/lib.rs @@ -171,6 +171,7 @@ impl ReUi { egui::ComboBox::from_id_source(label) .selected_text(selected_text) .width(Self::box_width()) + .wrap(true) .show_ui(ui, menu_contents) .response }, diff --git a/crates/re_viewer/Cargo.toml b/crates/re_viewer/Cargo.toml index 645f75e77749..0904e3683c31 100644 --- a/crates/re_viewer/Cargo.toml +++ b/crates/re_viewer/Cargo.toml @@ -69,6 +69,7 @@ re_analytics = { workspace = true, optional = true } # External +dcv-color-primitives = "0.5.3" ahash.workspace = true anyhow.workspace = true bytemuck = { version = "1.11", features = ["extern_crate_alloc"] } diff --git a/crates/re_viewer/src/app.rs b/crates/re_viewer/src/app.rs index 07ddc2cbe37b..58a0fac35025 100644 --- a/crates/re_viewer/src/app.rs +++ b/crates/re_viewer/src/app.rs @@ -50,6 +50,12 @@ pub struct StartupOptions { pub persist_state: bool, } +#[derive(Clone, Default)] +pub struct BackendEnvironment { + pub python_path: String, + pub venv_site_packages: String, +} + // ---------------------------------------------------------------------------- #[cfg(not(target_arch = "wasm32"))] @@ -103,7 +109,7 @@ pub struct App { icon_status: AppIconStatus, #[cfg(not(target_arch = "wasm32"))] - python_path: Option, + backend_environment: Option, #[cfg(not(target_arch = "wasm32"))] backend_handle: Option, @@ -111,13 +117,13 @@ pub struct App { impl App { #[cfg(not(target_arch = "wasm32"))] - fn spawn_backend(python_path: &Option) -> Option { - // TODO(filip): Is there some way I can know for sure where depthai_viewer_backend is? - let Some(py_path) = python_path else { - panic!("Python path is missing, exiting..."); + fn spawn_backend(environment: &Option) -> Option { + let Some(environment) = environment else { + panic!("Backend environment is missing, exiting..."); }; - let backend_handle = match std::process::Command::new(py_path) + let backend_handle = match std::process::Command::new(environment.python_path.clone()) .args(["-m", "depthai_viewer._backend.main"]) + .env("PYTHONPATH", environment.venv_site_packages.clone()) .spawn() { Ok(child) => { @@ -125,14 +131,10 @@ impl App { Some(child) } Err(err) => { - eprintln!("Failed to start depthai viewer: {err}"); + eprintln!("Failed to start depthai viewer backend: {err}."); None } }; - // assert!( - // backend_handle.is_some(), - // "Couldn't start backend, exiting..." - // ); backend_handle } @@ -166,8 +168,11 @@ impl App { analytics.on_viewer_started(&build_info, app_env); #[cfg(not(target_arch = "wasm32"))] - let python_path = match app_env { - AppEnvironment::PythonSdk(_, py_path) => Some(py_path.clone()), + let backend_environment = match app_env { + AppEnvironment::PythonSdk(_, py_path, venv_site_packages) => Some(BackendEnvironment { + python_path: py_path.clone(), + venv_site_packages: venv_site_packages.clone(), + }), _ => None, }; @@ -199,9 +204,9 @@ impl App { icon_status: AppIconStatus::NotSetTryAgain, #[cfg(not(target_arch = "wasm32"))] - python_path: python_path.clone(), + backend_environment: backend_environment.clone(), #[cfg(not(target_arch = "wasm32"))] - backend_handle: App::spawn_backend(&python_path), + backend_handle: App::spawn_backend(&backend_environment), } } @@ -308,7 +313,7 @@ impl App { Command::Quit => { self.state.depthai_state.shutdown(); if let Some(backend_handle) = &mut self.backend_handle { - backend_handle.kill(); + backend_handle.kill().expect("Failed to kill backend"); } _frame.close(); } @@ -368,20 +373,18 @@ impl App { } Command::ToggleCommandPalette => { self.cmd_palette.toggle(); - } - - Command::PlaybackTogglePlayPause => { - self.run_time_control_command(TimeControlCommand::TogglePlayPause); - } - Command::PlaybackStepBack => { - self.run_time_control_command(TimeControlCommand::StepBack); - } - Command::PlaybackStepForward => { - self.run_time_control_command(TimeControlCommand::StepForward); - } - Command::PlaybackRestart => { - self.run_time_control_command(TimeControlCommand::Restart); - } + } // Command::PlaybackTogglePlayPause => { + // self.run_time_control_command(TimeControlCommand::TogglePlayPause); + // } + // Command::PlaybackStepBack => { + // self.run_time_control_command(TimeControlCommand::StepBack); + // } + // Command::PlaybackStepForward => { + // self.run_time_control_command(TimeControlCommand::StepForward); + // } + // Command::PlaybackRestart => { + // self.run_time_control_command(TimeControlCommand::Restart); + // } } } @@ -489,19 +492,19 @@ impl eframe::App for App { handle.kill(); self.state.depthai_state.reset(); re_log::debug!("Backend process has exited, restarting!"); - self.backend_handle = App::spawn_backend(&self.python_path); + self.backend_handle = App::spawn_backend(&self.backend_environment); } } Err(_) => {} }, - None => self.backend_handle = App::spawn_backend(&self.python_path), + None => self.backend_handle = App::spawn_backend(&self.backend_environment), }; } #[cfg(not(target_arch = "wasm32"))] { if self.backend_handle.is_none() { - self.backend_handle = App::spawn_backend(&self.python_path); + self.backend_handle = App::spawn_backend(&self.backend_environment); }; } @@ -850,12 +853,15 @@ impl App { } } + #[cfg(not(target_arch = "wasm32"))] + for log_db in self.log_dbs.values_mut() { + log_db.clear_by_cutoff(2e9 as i64); + } + use re_format::format_bytes; use re_memory::MemoryUse; - let limit = self.startup_options.memory_limit; let mem_use_before = MemoryUse::capture(); - if let Some(minimum_fraction_to_purge) = limit.is_exceeded_by(&mem_use_before) { let fraction_to_purge = (minimum_fraction_to_purge + 0.2).clamp(0.25, 1.0); @@ -880,23 +886,23 @@ impl App { log_db.purge_fraction_of_ram(fraction_to_purge); } self.state.cache.purge_memory(); - } - let mem_use_after = MemoryUse::capture(); + let mem_use_after = MemoryUse::capture(); - let freed_memory = mem_use_before - mem_use_after; + let freed_memory = mem_use_before - mem_use_after; - if let (Some(counted_before), Some(counted_diff)) = - (mem_use_before.counted, freed_memory.counted) - { - re_log::debug!( - "Freed up {} ({:.1}%)", - format_bytes(counted_diff as _), - 100.0 * counted_diff as f32 / counted_before as f32 - ); - } + if let (Some(counted_before), Some(counted_diff)) = + (mem_use_before.counted, freed_memory.counted) + { + re_log::debug!( + "Freed up {} ({:.1}%)", + format_bytes(counted_diff as _), + 100.0 * counted_diff as f32 / counted_before as f32 + ); + } - self.memory_panel.note_memory_purge(); + self.memory_panel.note_memory_purge(); + } } } @@ -1017,7 +1023,7 @@ struct AppState { /// Configuration for the current recording (found in [`LogDb`]). recording_configs: IntMap, - #[serde(skip)] // Quick fix for subscriptions setting, just don't remembet space views + #[serde(skip)] // Quick fix for subscriptions setting, just don't remember space views blueprints: HashMap, /// Which view panel is currently being shown diff --git a/crates/re_viewer/src/depthai/api.rs b/crates/re_viewer/src/depthai/api.rs index cea17653b1a8..32b83793c5bd 100644 --- a/crates/re_viewer/src/depthai/api.rs +++ b/crates/re_viewer/src/depthai/api.rs @@ -1,5 +1,5 @@ use super::depthai; -use super::ws::{BackWsMessage as WsMessage, WebSocket, WsMessageData, WsMessageType}; +use super::ws::{ BackWsMessage as WsMessage, WebSocket, WsMessageData, WsMessageType }; #[derive(Clone, serde::Serialize, serde::Deserialize)] pub struct ApiError { @@ -26,25 +26,29 @@ impl BackendCommChannel { pub fn set_subscriptions(&mut self, subscriptions: &Vec) { self.ws.send( - serde_json::to_string( - &(WsMessage { - kind: WsMessageType::Subscriptions, - data: WsMessageData::Subscriptions(subscriptions.clone()), - }), - ) - .unwrap(), + serde_json + ::to_string( + &(WsMessage { + kind: WsMessageType::Subscriptions, + data: WsMessageData::Subscriptions(subscriptions.clone()), + ..Default::default() + }) + ) + .unwrap() ); } pub fn set_pipeline(&mut self, config: &depthai::DeviceConfig, runtime_only: bool) { self.ws.send( - serde_json::to_string( - &(WsMessage { - kind: WsMessageType::Pipeline, - data: WsMessageData::Pipeline((config.clone(), runtime_only)), - }), - ) - .unwrap(), + serde_json + ::to_string( + &(WsMessage { + kind: WsMessageType::Pipeline, + data: WsMessageData::Pipeline((config.clone(), runtime_only)), + ..Default::default() + }) + ) + .unwrap() ); } @@ -54,28 +58,32 @@ impl BackendCommChannel { pub fn get_devices(&mut self) { self.ws.send( - serde_json::to_string( - &(WsMessage { - kind: WsMessageType::Devices, - data: WsMessageData::Devices(Vec::new()), - }), - ) - .unwrap(), + serde_json + ::to_string( + &(WsMessage { + kind: WsMessageType::Devices, + data: WsMessageData::Devices(Vec::new()), + ..Default::default() + }) + ) + .unwrap() ); } - pub fn set_device(&mut self, device_id: depthai::DeviceId) { + pub fn select_device(&mut self, device_id: depthai::DeviceId) { self.ws.send( - serde_json::to_string( - &(WsMessage { - kind: WsMessageType::Device, - data: WsMessageData::Device(depthai::Device { - id: device_id, + serde_json + ::to_string( + &(WsMessage { + kind: WsMessageType::DeviceProperties, + data: WsMessageData::DeviceProperties(depthai::DeviceProperties { + id: device_id, + ..Default::default() + }), ..Default::default() - }), - }), - ) - .unwrap(), + }) + ) + .unwrap() ); } } diff --git a/crates/re_viewer/src/depthai/depthai.rs b/crates/re_viewer/src/depthai/depthai.rs index 48c8a8f1de2a..ed3b2b0482f5 100644 --- a/crates/re_viewer/src/depthai/depthai.rs +++ b/crates/re_viewer/src/depthai/depthai.rs @@ -3,103 +3,52 @@ use re_log_types::EntityPath; use super::api::BackendCommChannel; use super::ws::WsMessageData; +use crate::ViewerContext; use instant::Instant; use std::fmt; - use strum::EnumIter; use strum::IntoEnumIterator; -#[derive(serde::Deserialize, serde::Serialize, fmt::Debug, PartialEq, Clone, Copy, EnumIter)] -#[allow(non_camel_case_types)] -pub enum ColorCameraResolution { - THE_720_P, - THE_800_P, - THE_1440X1080, - THE_1080_P, - THE_1200_P, - THE_5_MP, - THE_4_K, - THE_12_MP, - THE_4000X3000, - THE_13_MP, - THE_48_MP, -} - -#[derive( - serde::Deserialize, - serde::Serialize, - fmt::Debug, - PartialEq, - Eq, - Clone, - Copy, - EnumIter, - PartialOrd, - Ord, -)] -#[allow(non_camel_case_types)] -pub enum MonoCameraResolution { - THE_400_P, - THE_720_P, - THE_800_P, - THE_1200_P, +#[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, Debug)] +pub struct CameraConfig { + pub fps: u8, + pub resolution: CameraSensorResolution, + pub kind: CameraSensorKind, + pub board_socket: CameraBoardSocket, + pub name: String, + pub stream_enabled: bool, } -// fmt::Display is used in UI while fmt::Debug is used with the depthai backend api -impl fmt::Display for ColorCameraResolution { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::THE_1080_P => write!(f, "1080p"), - Self::THE_4_K => write!(f, "4k"), - Self::THE_720_P => write!(f, "720p"), - Self::THE_800_P => write!(f, "800p"), - Self::THE_1200_P => write!(f, "1200p"), - Self::THE_5_MP => write!(f, "5MP"), - Self::THE_12_MP => write!(f, "12MP"), - Self::THE_13_MP => write!(f, "13MP"), - Self::THE_4000X3000 => write!(f, "4000x3000"), - Self::THE_48_MP => write!(f, "48MP"), - Self::THE_1440X1080 => write!(f, "1440x1080"), +impl Default for CameraConfig { + fn default() -> Self { + Self { + fps: 30, + resolution: CameraSensorResolution::THE_1080_P, + kind: CameraSensorKind::COLOR, + board_socket: CameraBoardSocket::CAM_A, + name: String::from("Color"), + stream_enabled: true, } } } -impl fmt::Display for MonoCameraResolution { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::THE_400_P => write!(f, "400p"), - Self::THE_720_P => write!(f, "720p"), - Self::THE_800_P => write!(f, "800p"), - Self::THE_1200_P => write!(f, "1200p"), +impl CameraConfig { + pub fn left() -> Self { + Self { + board_socket: CameraBoardSocket::CAM_B, + ..Default::default() } } -} - -#[derive(serde::Deserialize, serde::Serialize, Clone, Copy, PartialEq)] -pub struct ColorCameraConfig { - pub fps: u8, - pub resolution: ColorCameraResolution, - #[serde(rename = "xout_video")] - pub stream_enabled: bool, -} -impl Default for ColorCameraConfig { - fn default() -> Self { + pub fn right() -> Self { Self { - fps: 30, - resolution: ColorCameraResolution::THE_1080_P, - stream_enabled: true, + board_socket: CameraBoardSocket::CAM_C, + ..Default::default() } } -} -impl fmt::Debug for ColorCameraConfig { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Color camera config: fps: {}, resolution: {:?}", - self.fps, self.resolution - ) + pub fn is_color_camera(&self) -> bool { + self.name == "Color" } } @@ -121,6 +70,27 @@ pub enum CameraBoardSocket { CAM_H, } +impl CameraBoardSocket { + pub fn from(socket: String) -> Option { + match socket.as_str() { + "AUTO" => Some(CameraBoardSocket::AUTO), + "RGB" => Some(CameraBoardSocket::RGB), + "LEFT" => Some(CameraBoardSocket::LEFT), + "RIGHT" => Some(CameraBoardSocket::RIGHT), + "CENTER" => Some(CameraBoardSocket::CENTER), + "CAM_A" => Some(CameraBoardSocket::CAM_A), + "CAM_B" => Some(CameraBoardSocket::CAM_B), + "CAM_C" => Some(CameraBoardSocket::CAM_C), + "CAM_D" => Some(CameraBoardSocket::CAM_D), + "CAM_E" => Some(CameraBoardSocket::CAM_E), + "CAM_F" => Some(CameraBoardSocket::CAM_F), + "CAM_G" => Some(CameraBoardSocket::CAM_G), + "CAM_H" => Some(CameraBoardSocket::CAM_H), + _ => None, + } + } +} + impl Default for CameraBoardSocket { fn default() -> Self { Self::AUTO @@ -128,54 +98,106 @@ impl Default for CameraBoardSocket { } impl CameraBoardSocket { - pub fn depth_align_options() -> Vec { - return vec![Self::RGB, Self::LEFT, Self::RIGHT]; + pub fn display_name(&self, ctx: &ViewerContext<'_>) -> String { + let camera_features = ctx.depthai_state.get_connected_cameras(); + if let Some(cam) = camera_features.iter().find(|cam| cam.board_socket == *self) { + if !cam.name.is_empty() { + return format!("{} ({self:?})", cam.name); + } + } + format!("{self:?}") } } -#[derive(serde::Deserialize, serde::Serialize, Clone, Copy, PartialEq)] -pub struct MonoCameraConfig { - pub fps: u8, - pub resolution: MonoCameraResolution, - pub board_socket: CameraBoardSocket, - #[serde(rename = "xout")] - pub stream_enabled: bool, +#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, PartialEq, Debug)] +#[allow(non_camel_case_types)] +pub enum ImuKind { + SIX_AXIS, + NINE_AXIS, } -impl Default for MonoCameraConfig { - fn default() -> Self { - Self { - fps: 30, - resolution: MonoCameraResolution::THE_400_P, - board_socket: CameraBoardSocket::AUTO, - stream_enabled: true, +#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, PartialEq, Debug)] +#[allow(non_camel_case_types)] +pub enum CameraSensorResolution { + THE_400_P, + THE_720_P, + THE_800_P, + THE_1440X1080, + THE_1080_P, + THE_1200_P, + THE_5_MP, + THE_4_K, + THE_12_MP, + THE_4000X3000, + THE_13_MP, + THE_48_MP, +} + +// fmt::Display is used in UI while fmt::Debug is used with the depthai backend api +impl fmt::Display for CameraSensorResolution { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::THE_1080_P => write!(f, "1080p"), + Self::THE_4_K => write!(f, "4k"), + Self::THE_720_P => write!(f, "720p"), + Self::THE_800_P => write!(f, "800p"), + Self::THE_1200_P => write!(f, "1200p"), + Self::THE_5_MP => write!(f, "5MP"), + Self::THE_12_MP => write!(f, "12MP"), + Self::THE_13_MP => write!(f, "13MP"), + Self::THE_4000X3000 => write!(f, "4000x3000"), + Self::THE_48_MP => write!(f, "48MP"), + Self::THE_1440X1080 => write!(f, "1440x1080"), + Self::THE_400_P => write!(f, "400p"), + Self::THE_720_P => write!(f, "720p"), } } } -impl MonoCameraConfig { - fn left() -> Self { - Self { - board_socket: CameraBoardSocket::LEFT, - ..Default::default() - } +#[derive(serde::Deserialize, serde::Serialize, Clone, Copy, PartialEq, Debug)] +#[allow(non_camel_case_types)] +pub enum CameraSensorKind { + COLOR, + MONO, + TOF, + THERMAL, +} + +impl Default for CameraSensorKind { + fn default() -> Self { + Self::COLOR } +} - fn right() -> Self { - Self { - board_socket: CameraBoardSocket::RIGHT, - ..Default::default() - } +#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, Debug)] +pub struct CameraFeatures { + pub resolutions: Vec, + pub max_fps: u8, + pub board_socket: CameraBoardSocket, + pub supported_types: Vec, + pub stereo_pairs: Vec, // Which cameras can be paired with this one + pub name: String, + pub intrinsics: Option<[[f32; 3]; 3]>, +} + +impl CameraFeatures { + pub fn is_color_camera(&self) -> bool { + self.name == "Color" } } -impl fmt::Debug for MonoCameraConfig { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "Mono camera config: fps: {}, resolution: {:?}", - self.fps, self.resolution - ) +#[derive(serde::Serialize, serde::Deserialize, Clone, PartialEq, Debug, Default)] +pub struct DeviceProperties { + pub id: String, + pub cameras: Vec, + pub imu: Option, + pub stereo_pairs: Vec<(CameraBoardSocket, CameraBoardSocket)>, + pub default_stereo_pair: Option<(CameraBoardSocket, CameraBoardSocket)>, +} + +impl DeviceProperties { + pub fn has_stereo_pairs(&self) -> bool { + !self.stereo_pairs.is_empty() } } @@ -226,6 +248,7 @@ pub struct DepthConfig { pub sigma: i64, pub confidence: i64, pub align: CameraBoardSocket, + pub stereo_pair: (CameraBoardSocket, CameraBoardSocket), } impl Default for DepthConfig { @@ -239,6 +262,7 @@ impl Default for DepthConfig { sigma: 0, confidence: 230, align: CameraBoardSocket::RGB, + stereo_pair: (CameraBoardSocket::CAM_A, CameraBoardSocket::CAM_C), } } } @@ -249,19 +273,42 @@ impl DepthConfig { } pub fn only_runtime_configs_differ(&self, other: &DepthConfig) -> bool { - self.lr_check == other.lr_check - && self.align == other.align - && self.extended_disparity == other.extended_disparity - && self.subpixel_disparity == other.subpixel_disparity - && self != other + self.lr_check == other.lr_check && + self.align == other.align && + self.extended_disparity == other.extended_disparity && + self.subpixel_disparity == other.subpixel_disparity && + self != other + } +} + +impl From<&DeviceProperties> for Option { + fn from(props: &DeviceProperties) -> Self { + let mut config = DepthConfig::default(); + let Some(cam_with_stereo_pair) = props.cameras + .iter() + .find(|feat| !feat.stereo_pairs.is_empty()) else { + return None; + }; + if let Some((cam_a, cam_b)) = props.default_stereo_pair { + config.stereo_pair = (cam_a, cam_b); + } else { + let stereo_pair = cam_with_stereo_pair.stereo_pairs[0]; + config.stereo_pair = (cam_with_stereo_pair.board_socket, stereo_pair); + } + config.align = if + let Some(color_cam) = props.cameras.iter().find(|cam| cam.is_color_camera()) + { + color_cam.board_socket + } else { + config.stereo_pair.0 + }; + Some(config) } } #[derive(serde::Deserialize, serde::Serialize, Clone)] pub struct DeviceConfig { - pub color_camera: ColorCameraConfig, - pub left_camera: Option, - pub right_camera: Option, + pub cameras: Vec, #[serde(default = "bool_true")] pub depth_enabled: bool, // Much easier to have an explicit bool for checkbox #[serde(default = "DepthConfig::default_as_option")] @@ -272,9 +319,7 @@ pub struct DeviceConfig { impl Default for DeviceConfig { fn default() -> Self { Self { - color_camera: ColorCameraConfig::default(), - left_camera: Some(MonoCameraConfig::left()), - right_camera: Some(MonoCameraConfig::right()), + cameras: Vec::new(), depth_enabled: true, depth: Some(DepthConfig::default()), ai_model: AiModel::default(), @@ -282,6 +327,39 @@ impl Default for DeviceConfig { } } +impl From<&DeviceProperties> for DeviceConfig { + fn from(props: &DeviceProperties) -> Self { + let mut config = Self::default(); + + let has_color_cam = props.cameras.iter().any(|cam| cam.is_color_camera()); + config.cameras = props.cameras + .iter() + .map(|cam| CameraConfig { + name: cam.name.clone(), + fps: 30, // TODO(filip): Do performance improvements to allow higher fps + resolution: *cam.resolutions + .iter() + .filter(|res| { + res != &&CameraSensorResolution::THE_4_K && + res != &&CameraSensorResolution::THE_12_MP + }) + .last() + .unwrap_or(&CameraSensorResolution::THE_800_P), + board_socket: cam.board_socket, + stream_enabled: if has_color_cam { + cam.is_color_camera() + } else { + true + }, + kind: *cam.supported_types.first().unwrap(), + }) + .collect(); + config.depth = Option::::from(props); + config.ai_model = AiModel::from(props); + config + } +} + #[derive(serde::Serialize, serde::Deserialize)] #[allow(non_camel_case_types)] pub enum CameraSensorType { @@ -303,22 +381,6 @@ pub struct CameraSensorConfig { width: i64, } -#[derive(serde::Serialize, serde::Deserialize)] -pub struct CameraFeatures { - configs: Vec, - #[serde(rename = "hasAutofocus")] - has_autofocus: bool, - height: i64, - name: String, - orientation: CameraImageOrientation, - #[serde(rename = "sensorName")] - sensor_name: String, - socket: CameraBoardSocket, - #[serde(rename = "supportedTypes")] - supported_types: Vec, - width: i64, -} - #[derive(serde::Serialize, serde::Deserialize)] #[allow(non_camel_case_types)] pub enum CameraImageOrientation { @@ -335,12 +397,10 @@ impl PartialEq for DeviceConfig { (Some(a), Some(b)) => a == b, _ => true, // If one is None, it's only different if depth_enabled is different }; - self.color_camera == other.color_camera - && self.left_camera == other.left_camera - && self.right_camera == other.right_camera - && depth_eq - && self.depth_enabled == other.depth_enabled - && self.ai_model == other.ai_model + self.cameras == other.cameras && + depth_eq && + self.depth_enabled == other.depth_enabled && + self.ai_model == other.ai_model } } @@ -361,10 +421,8 @@ impl fmt::Debug for DeviceConfig { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "Device config: {:?} {:?} {:?}, depth: {:?}, ai_model: {:?}, depth_enabled: {:?}", - self.color_camera, - self.left_camera, - self.right_camera, + "Device config: cams: {:?}, depth: {:?}, ai_model: {:?}, depth_enabled: {:?}", + self.cameras, self.depth, self.ai_model, self.depth_enabled @@ -391,6 +449,7 @@ pub enum ErrorAction { FullReset, } +// TODO(filip): Move to a more appropriate place, refactor depthai.rs in general #[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, fmt::Debug)] pub struct Error { pub action: ErrorAction, @@ -406,30 +465,16 @@ impl Default for Error { } } -#[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, fmt::Debug)] -pub struct Device { - pub id: DeviceId, - pub supported_color_resolutions: Vec, - pub supported_left_mono_resolutions: Vec, - pub supported_right_mono_resolutions: Vec, -} - -// Sensible default for when no device is connected -impl Default for Device { - fn default() -> Self { - Self { - id: DeviceId::default(), - supported_color_resolutions: vec![ColorCameraResolution::THE_1080_P], - supported_left_mono_resolutions: vec![MonoCameraResolution::THE_400_P], - supported_right_mono_resolutions: vec![MonoCameraResolution::THE_400_P], - } - } +#[derive(serde::Deserialize, serde::Serialize, Clone, PartialEq, fmt::Debug, Default)] +pub struct Info { + pub message: String, } #[derive(serde::Deserialize, serde::Serialize, Clone, fmt::Debug)] pub struct AiModel { pub path: String, pub display_name: String, + pub camera: CameraBoardSocket, } impl Default for AiModel { @@ -443,13 +488,26 @@ impl AiModel { Self { path: String::new(), display_name: String::from("No model selected"), + camera: CameraBoardSocket::CAM_A, + } + } +} + +impl From<&DeviceProperties> for AiModel { + fn from(props: &DeviceProperties) -> Self { + let mut model = Self::default(); + if let Some(cam) = props.cameras.iter().find(|cam| cam.is_color_camera()) { + model.camera = cam.board_socket; + } else if let Some(cam) = props.cameras.first() { + model.camera = cam.board_socket; } + model } } impl PartialEq for AiModel { fn eq(&self, other: &Self) -> bool { - self.path == other.path + self.path == other.path && self.camera == other.camera } } @@ -458,7 +516,7 @@ pub struct State { #[serde(skip)] devices_available: Option>, #[serde(skip)] - pub selected_device: Device, + pub selected_device: DeviceProperties, #[serde(skip)] pub applied_device_config: DeviceConfigState, #[serde(skip)] @@ -474,6 +532,8 @@ pub struct State { poll_instant: Option, #[serde(default = "default_neural_networks")] pub neural_networks: Vec, + #[serde(skip)] + update_timeout_timer: Option, } #[inline] @@ -488,19 +548,23 @@ fn default_neural_networks() -> Vec { AiModel { path: String::from("yolov8n_coco_640x352"), display_name: String::from("Yolo V8"), + camera: CameraBoardSocket::CAM_A, }, AiModel { path: String::from("mobilenet-ssd"), display_name: String::from("MobileNet SSD"), + camera: CameraBoardSocket::CAM_A, }, AiModel { path: String::from("face-detection-retail-0004"), display_name: String::from("Face Detection"), + camera: CameraBoardSocket::CAM_A, }, AiModel { path: String::from("age-gender-recognition-retail-0013"), display_name: String::from("Age gender recognition"), - }, + camera: CameraBoardSocket::CAM_A, + } ] } @@ -508,7 +572,7 @@ impl Default for State { fn default() -> Self { Self { devices_available: None, - selected_device: Device::default(), + selected_device: DeviceProperties::default(), applied_device_config: DeviceConfigState::default(), modified_device_config: DeviceConfig::default(), subscriptions: ChannelId::iter().collect(), @@ -516,13 +580,22 @@ impl Default for State { backend_comms: BackendCommChannel::default(), poll_instant: Some(Instant::now()), // No default for Instant neural_networks: default_neural_networks(), + update_timeout_timer: None, } } } #[repr(u8)] #[derive( - serde::Serialize, serde::Deserialize, Copy, Clone, PartialEq, Eq, fmt::Debug, Hash, EnumIter, + serde::Serialize, + serde::Deserialize, + Copy, + Clone, + PartialEq, + Eq, + fmt::Debug, + Hash, + EnumIter )] pub enum ChannelId { ColorImage, @@ -533,125 +606,24 @@ pub enum ChannelId { ImuData, } -/// Entity paths for depthai-viewer space views -/// !---- These have to match with EntityPath in rerun_py/rerun_sdk/depthai_viewer_backend/sdk_callbacks.py ----! -pub mod entity_paths { - use lazy_static::lazy_static; - use re_log_types::EntityPath; - - lazy_static! { - pub static ref RGB_PINHOLE_CAMERA: EntityPath = EntityPath::from("color/camera/rgb"); - pub static ref LEFT_PINHOLE_CAMERA: EntityPath = EntityPath::from("mono/camera/left_mono"); - pub static ref LEFT_CAMERA_IMAGE: EntityPath = - EntityPath::from("mono/camera/left_mono/Left mono"); - pub static ref RIGHT_PINHOLE_CAMERA: EntityPath = - EntityPath::from("mono/camera/right_mono"); - pub static ref RIGHT_CAMERA_IMAGE: EntityPath = - EntityPath::from("mono/camera/right_mono/Right mono"); - pub static ref RGB_CAMERA_IMAGE: EntityPath = - EntityPath::from("color/camera/rgb/Color camera"); - pub static ref DETECTIONS: EntityPath = EntityPath::from("color/camera/rgb/Detections"); - pub static ref DETECTION: EntityPath = EntityPath::from("color/camera/rgb/Detection"); - pub static ref RGB_CAMERA_TRANSFORM: EntityPath = EntityPath::from("color/camera"); - pub static ref MONO_CAMERA_TRANSFORM: EntityPath = EntityPath::from("mono/camera"); - - // --- These are extra for the depthai-viewer --- - pub static ref COLOR_CAM_3D: EntityPath = EntityPath::from("color"); - pub static ref MONO_CAM_3D: EntityPath = EntityPath::from("mono"); - pub static ref DEPTH_RGB: EntityPath = EntityPath::from("color/camera/rgb/Depth"); - pub static ref DEPTH_LEFT_MONO: EntityPath = - EntityPath::from("mono/camera/left_mono/Depth"); - pub static ref DEPTH_RIGHT_MONO: EntityPath = - EntityPath::from("mono/camera/right_mono/Depth"); - } -} - impl State { pub fn only_runtime_configs_changed( old_config: &DeviceConfig, - new_config: &DeviceConfig, + new_config: &DeviceConfig ) -> bool { - let any_runtime_conf_changed = old_config.depth.is_some() - && new_config.depth.is_some() - && old_config - .depth - .unwrap() - .only_runtime_configs_differ(&new_config.depth.unwrap()); // || others to be added - any_runtime_conf_changed - && old_config.left_camera == new_config.left_camera - && old_config.right_camera == new_config.right_camera - && old_config.color_camera == new_config.color_camera - && old_config.ai_model == new_config.ai_model - } - - /// Get the entities that should be removed based on UI (e.g. if depth is disabled, remove the depth image) - pub fn get_entities_to_remove(&mut self) -> Vec { - let mut remove_entities = Vec::new(); - let Some(applied_device_config) = &self.applied_device_config.config else { - return vec![entity_paths::DEPTH_LEFT_MONO.clone(), - entity_paths::DEPTH_RIGHT_MONO.clone(), - entity_paths::DEPTH_RGB.clone(), - entity_paths::MONO_CAM_3D.clone(), - entity_paths::MONO_CAMERA_TRANSFORM.clone(), - entity_paths::RIGHT_PINHOLE_CAMERA.clone(), - entity_paths::RIGHT_CAMERA_IMAGE.clone(), - entity_paths::LEFT_PINHOLE_CAMERA.clone(), - entity_paths::LEFT_CAMERA_IMAGE.clone(), - entity_paths::RGB_PINHOLE_CAMERA.clone(), - entity_paths::RGB_CAMERA_IMAGE.clone(), - entity_paths::RGB_CAMERA_TRANSFORM.clone(), - entity_paths::COLOR_CAM_3D.clone(), - entity_paths::DETECTIONS.clone(), - entity_paths::DETECTION.clone()] - }; - if applied_device_config.depth.is_none() { - remove_entities.push(entity_paths::DEPTH_LEFT_MONO.clone()); - remove_entities.push(entity_paths::DEPTH_RIGHT_MONO.clone()); - remove_entities.push(entity_paths::DEPTH_RGB.clone()); - } - if let Some(right_cam) = applied_device_config.right_camera { - if !right_cam.stream_enabled { - remove_entities.push(entity_paths::RIGHT_PINHOLE_CAMERA.clone()); - remove_entities.push(entity_paths::RIGHT_CAMERA_IMAGE.clone()); - if let Some(left_cam) = applied_device_config.left_camera { - if !left_cam.stream_enabled { - remove_entities.push(entity_paths::MONO_CAM_3D.clone()); - remove_entities.push(entity_paths::MONO_CAMERA_TRANSFORM.clone()); - } - } else { - remove_entities.push(entity_paths::LEFT_PINHOLE_CAMERA.clone()); - remove_entities.push(entity_paths::LEFT_CAMERA_IMAGE.clone()); - } - } - } else { - remove_entities.push(entity_paths::RIGHT_PINHOLE_CAMERA.clone()); - remove_entities.push(entity_paths::RIGHT_CAMERA_IMAGE.clone()); - } - if let Some(left_cam) = applied_device_config.left_camera { - if !left_cam.stream_enabled { - remove_entities.push(entity_paths::LEFT_PINHOLE_CAMERA.clone()); - remove_entities.push(entity_paths::LEFT_CAMERA_IMAGE.clone()); - } - } - if !applied_device_config.color_camera.stream_enabled { - remove_entities.push(entity_paths::RGB_PINHOLE_CAMERA.clone()); - remove_entities.push(entity_paths::RGB_CAMERA_IMAGE.clone()); - remove_entities.push(entity_paths::COLOR_CAM_3D.clone()); - remove_entities.push(entity_paths::RGB_CAMERA_TRANSFORM.clone()); - } - if applied_device_config.ai_model.path.is_empty() { - remove_entities.push(entity_paths::DETECTIONS.clone()); - remove_entities.push(entity_paths::DETECTION.clone()); - } - remove_entities + let any_runtime_conf_changed = + old_config.depth.is_some() && + new_config.depth.is_some() && + old_config.depth.unwrap().only_runtime_configs_differ(&new_config.depth.unwrap()); // || others to be added + any_runtime_conf_changed && + old_config.cameras == new_config.cameras && + old_config.ai_model == new_config.ai_model } pub fn set_subscriptions(&mut self, subscriptions: &Vec) { - if self.subscriptions.len() == subscriptions.len() - && self - .subscriptions - .iter() - .all(|channel_id| subscriptions.contains(channel_id)) + if + self.subscriptions.len() == subscriptions.len() && + self.subscriptions.iter().all(|channel_id| subscriptions.contains(channel_id)) { return; } @@ -659,6 +631,7 @@ impl State { self.subscriptions = subscriptions.clone(); } + /// Returns available devices pub fn get_devices(&mut self) -> Vec { // Return stored available devices or fetch them from the api (they get fetched every 30s via poller) if let Some(devices) = self.devices_available.clone() { @@ -667,11 +640,30 @@ impl State { Vec::new() } + /// Returns cameras connected to the selected device + pub fn get_connected_cameras(&self) -> &Vec { + &self.selected_device.cameras + } + pub fn shutdown(&mut self) { self.backend_comms.shutdown(); } + fn set_update_in_progress(&mut self, in_progress: bool) { + self.update_timeout_timer = None; + if in_progress { + self.update_timeout_timer = Some(Instant::now()); + } + self.applied_device_config.update_in_progress = in_progress; + } + pub fn update(&mut self) { + if let Some(update_timeout) = self.update_timeout_timer { + if update_timeout.elapsed().as_secs() > 30 { + self.set_update_in_progress(false); + } + } + if let Some(ws_message) = self.backend_comms.receive() { re_log::debug!("Received message: {:?}", ws_message); match ws_message.data { @@ -688,50 +680,63 @@ impl State { if config.depth.is_some() { subs.push(ChannelId::DepthImage); } - if config.color_camera.stream_enabled { - subs.push(ChannelId::ColorImage); + if + let Some(color_camera) = &config.cameras + .iter() + .find(|cam| cam.is_color_camera()) + { + if color_camera.stream_enabled { + subs.push(ChannelId::ColorImage); + } } - if let Some(left_cam) = config.left_camera { + if let Some(left_cam) = &config.cameras.iter().find(|cam| cam.name == "left") { if left_cam.stream_enabled { subs.push(ChannelId::LeftMono); } } - if let Some(right_cam) = config.right_camera { + if let Some(right_cam) = &config.cameras.iter().find(|cam| cam.name == "right") { if right_cam.stream_enabled { subs.push(ChannelId::RightMono); } } self.applied_device_config.config = Some(config.clone()); self.modified_device_config = config.clone(); - let Some(applied_device_config) = self.applied_device_config.config.as_mut() else { + let Some(applied_device_config) = + self.applied_device_config.config.as_mut() else { self.reset(); - self.applied_device_config.update_in_progress = false; - return; + self.applied_device_config.update_in_progress = false; + return; }; applied_device_config.depth_enabled = config.depth.is_some(); self.modified_device_config.depth_enabled = self.modified_device_config.depth.is_some(); self.set_subscriptions(&subs); - self.applied_device_config.update_in_progress = false; + self.set_update_in_progress(false); } - WsMessageData::Device(device) => { + WsMessageData::DeviceProperties(device) => { re_log::debug!("Setting device: {device:?}"); - self.selected_device = device; - self.backend_comms.set_subscriptions(&self.subscriptions); - // self.backend_comms - // .set_pipeline(&self.applied_device_config.config, false); - self.applied_device_config.update_in_progress = false; + self.set_device(device); + if !self.selected_device.id.is_empty() { + // Apply default pipeline + self.set_pipeline(&mut self.modified_device_config.clone(), false); + } } WsMessageData::Error(error) => { re_log::error!("Error: {:}", error.message); - self.applied_device_config.update_in_progress = false; + self.set_update_in_progress(false); match error.action { ErrorAction::None => (), ErrorAction::FullReset => { - self.set_device(String::new()); + self.select_device(String::new()); } } } + WsMessageData::Info(info) => { + if info.message.is_empty() { + return; + } + re_log::info!("{}", info.message); + } } } @@ -748,43 +753,33 @@ impl State { } } - pub fn set_device(&mut self, device_id: DeviceId) { + fn set_device(&mut self, device_properties: DeviceProperties) { + self.selected_device = device_properties; + self.backend_comms.set_subscriptions(&self.subscriptions); + self.modified_device_config = DeviceConfig::from(&self.selected_device); + self.set_update_in_progress(false); + } + + pub fn select_device(&mut self, device_id: DeviceId) { re_log::debug!("Setting device: {:?}", device_id); self.applied_device_config.config = None; - self.backend_comms.set_device(device_id); - self.applied_device_config.update_in_progress = true; + self.backend_comms.select_device(device_id); + self.set_update_in_progress(true); } - pub fn set_device_config(&mut self, config: &mut DeviceConfig, runtime_only: bool) { - // Don't try to set pipeline in ws not connected - if !self - .backend_comms - .ws - .connected - .load(std::sync::atomic::Ordering::SeqCst) - { + pub fn set_pipeline(&mut self, config: &mut DeviceConfig, runtime_only: bool) { + // Don't try to set pipeline if ws isn't connected + if !self.backend_comms.ws.is_connected() { return; } if !config.depth_enabled { config.depth = None; } - if self - .selected_device - .supported_left_mono_resolutions - .is_empty() - { - config.left_camera = None; - config.depth = None; - } - if self - .selected_device - .supported_right_mono_resolutions - .is_empty() - { - config.right_camera = None; + if !self.selected_device.has_stereo_pairs() { config.depth = None; } + if self.selected_device.id.is_empty() { self.applied_device_config.config = Some(config.clone()); return; @@ -792,9 +787,9 @@ impl State { self.backend_comms.set_pipeline(config, runtime_only); if runtime_only { self.applied_device_config.config = Some(config.clone()); - self.applied_device_config.update_in_progress = false; + self.set_update_in_progress(false); } else { - self.applied_device_config.update_in_progress = true; + self.set_update_in_progress(true); } } diff --git a/crates/re_viewer/src/depthai/ws.rs b/crates/re_viewer/src/depthai/ws.rs index c6570deba36e..3a81824e7c91 100644 --- a/crates/re_viewer/src/depthai/ws.rs +++ b/crates/re_viewer/src/depthai/ws.rs @@ -1,6 +1,6 @@ use crossbeam_channel; -use ewebsock::{WsEvent, WsMessage}; -use serde::{Deserialize, Serialize}; +use ewebsock::{ WsEvent, WsMessage }; +use serde::{ Deserialize, Serialize }; use std::fmt; use std::ops::ControlFlow; use std::process::exit; @@ -13,7 +13,7 @@ async fn spawn_ws_client( recv_tx: crossbeam_channel::Sender, send_rx: crossbeam_channel::Receiver, shutdown: Arc, - connected: Arc, + connected: Arc ) { let (error_tx, error_rx) = crossbeam_channel::unbounded(); // Retry connection until successful @@ -21,35 +21,37 @@ async fn spawn_ws_client( let recv_tx = recv_tx.clone(); let error_tx = error_tx.clone(); let connected = connected.clone(); - if let Ok(sender) = ewebsock::ws_connect( - String::from("ws://localhost:9001"), - Box::new(move |event| { - match event { - WsEvent::Opened => { - re_log::info!("Websocket opened"); - connected.store(true, std::sync::atomic::Ordering::SeqCst); - ControlFlow::Continue(()) - } - WsEvent::Message(message) => { - // re_log::debug!("Websocket message"); - recv_tx.send(message); - ControlFlow::Continue(()) - } - WsEvent::Error(e) => { - // re_log::info!("Websocket Error: {:?}", e); - connected.store(false, std::sync::atomic::Ordering::SeqCst); - error_tx.send(e); - ControlFlow::Break(()) - } - WsEvent::Closed => { - // re_log::info!("Websocket Closed"); - error_tx.send(String::from("Websocket Closed")); - ControlFlow::Break(()) - } - } - }), - ) - .as_mut() + if + let Ok(sender) = ewebsock + ::ws_connect( + String::from("ws://localhost:9001"), + Box::new(move |event| { + match event { + WsEvent::Opened => { + re_log::info!("Websocket opened"); + connected.store(true, std::sync::atomic::Ordering::SeqCst); + ControlFlow::Continue(()) + } + WsEvent::Message(message) => { + // re_log::debug!("Websocket message"); + recv_tx.send(message); + ControlFlow::Continue(()) + } + WsEvent::Error(e) => { + // re_log::info!("Websocket Error: {:?}", e); + connected.store(false, std::sync::atomic::Ordering::SeqCst); + error_tx.send(e); + ControlFlow::Break(()) + } + WsEvent::Closed => { + // re_log::info!("Websocket Closed"); + error_tx.send(String::from("Websocket Closed")); + ControlFlow::Break(()) + } + } + }) + ) + .as_mut() { while error_rx.is_empty() { if shutdown.load(std::sync::atomic::Ordering::SeqCst) { @@ -81,18 +83,20 @@ type RuntimeOnly = bool; pub enum WsMessageData { Subscriptions(Vec), Devices(Vec), - Device(depthai::Device), + DeviceProperties(depthai::DeviceProperties), Pipeline((depthai::DeviceConfig, RuntimeOnly)), Error(depthai::Error), + Info(depthai::Info), } #[derive(Deserialize, Serialize, fmt::Debug)] pub enum WsMessageType { Subscriptions, Devices, - Device, + DeviceProperties, Pipeline, Error, + Info, } impl Default for WsMessageType { @@ -101,48 +105,51 @@ impl Default for WsMessageType { } } -// TODO(filip): Perhaps add a "message" field to all messages to display toasts #[derive(Serialize, fmt::Debug)] pub struct BackWsMessage { #[serde(rename = "type")] pub kind: WsMessageType, pub data: WsMessageData, + pub message: Option, } impl<'de> Deserialize<'de> for BackWsMessage { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { + fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de> { #[derive(Deserialize)] pub struct Message { #[serde(rename = "type")] pub kind: WsMessageType, pub data: serde_json::Value, + pub message: Option, } let message = Message::deserialize(deserializer)?; let data = match message.kind { - WsMessageType::Subscriptions => WsMessageData::Subscriptions( - serde_json::from_value(message.data).unwrap_or_default(), - ), + WsMessageType::Subscriptions => + WsMessageData::Subscriptions( + serde_json::from_value(message.data).unwrap_or_default() + ), WsMessageType::Devices => { WsMessageData::Devices(serde_json::from_value(message.data).unwrap_or_default()) } - WsMessageType::Device => { - WsMessageData::Device(serde_json::from_value(message.data).unwrap_or_default()) + WsMessageType::DeviceProperties => { + WsMessageData::DeviceProperties(serde_json::from_value(message.data).unwrap()) } WsMessageType::Pipeline => { WsMessageData::Pipeline(serde_json::from_value(message.data).unwrap()) - // TODO(filip) change to unwrap_or_default when pipeline config api is more stable } WsMessageType::Error => { WsMessageData::Error(serde_json::from_value(message.data).unwrap_or_default()) } + WsMessageType::Info => { + WsMessageData::Info(serde_json::from_value(message.data).unwrap_or_default()) + } }; + Ok(Self { kind: message.kind, data, + message: message.message, }) } } @@ -152,6 +159,7 @@ impl Default for BackWsMessage { Self { kind: WsMessageType::Error.into(), data: WsMessageData::Error(depthai::Error::default()), + message: None, } } } @@ -161,7 +169,7 @@ pub struct WebSocket { sender: crossbeam_channel::Sender, shutdown: Arc, task: tokio::task::JoinHandle<()>, - pub connected: Arc, + connected: Arc, } impl Default for WebSocket { @@ -182,23 +190,14 @@ impl WebSocket { let task; if let Ok(handle) = tokio::runtime::Handle::try_current() { re_log::debug!("Using current tokio runtime"); - task = handle.spawn(spawn_ws_client( - recv_tx, - send_rx, - shutdown_clone, - connected_clone, - )); + task = handle.spawn(spawn_ws_client(recv_tx, send_rx, shutdown_clone, connected_clone)); } else { re_log::debug!("Creating new tokio runtime"); - task = tokio::runtime::Builder::new_current_thread() + task = tokio::runtime::Builder + ::new_current_thread() .build() .unwrap() - .spawn(spawn_ws_client( - recv_tx, - send_rx, - shutdown_clone, - connected_clone, - )); + .spawn(spawn_ws_client(recv_tx, send_rx, shutdown_clone, connected_clone)); } Self { receiver: recv_rx, @@ -209,9 +208,12 @@ impl WebSocket { } } + pub fn is_connected(&self) -> bool { + self.connected.load(std::sync::atomic::Ordering::SeqCst) + } + pub fn shutdown(&mut self) { - self.shutdown - .store(true, std::sync::atomic::Ordering::SeqCst); + self.shutdown.store(true, std::sync::atomic::Ordering::SeqCst); } pub fn receive(&self) -> Option { @@ -240,7 +242,7 @@ impl WebSocket { pub fn send(&self, message: String) { self.sender.send(WsMessage::Text(message)); // TODO(filip): This is a hotfix for the websocket not sending the message - // This doesn't actually send any message, but it makes the websocket actually send the message previous msg + // This makes the websocket actually send the previous msg // It has to be something related to tokio::spawn, because it works fine when just running in the current thread self.sender.send(WsMessage::Text("".to_string())); } diff --git a/crates/re_viewer/src/gpu_bridge/mod.rs b/crates/re_viewer/src/gpu_bridge/mod.rs index 9cf35e1d4c01..a853d72bf638 100644 --- a/crates/re_viewer/src/gpu_bridge/mod.rs +++ b/crates/re_viewer/src/gpu_bridge/mod.rs @@ -8,11 +8,15 @@ pub use tensor_to_gpu::tensor_to_gpu; use egui::mutex::Mutex; use re_renderer::{ - renderer::{ColormappedTexture, RectangleOptions}, + renderer::{ ColormappedTexture, RectangleOptions, TextureEncoding }, resource_managers::{ - GpuTexture2D, Texture2DCreationDesc, TextureCreationError, TextureManager2DError, + GpuTexture2D, + Texture2DCreationDesc, + TextureCreationError, + TextureManager2DError, }, - RenderContext, ViewBuilder, + RenderContext, + ViewBuilder, }; // ---------------------------------------------------------------------------- @@ -57,24 +61,24 @@ pub fn viewport_resolution_in_pixels(clip_rect: egui::Rect, pixels_from_point: f pub fn try_get_or_create_texture<'a, Err: std::fmt::Display>( render_ctx: &mut RenderContext, texture_key: u64, - try_create_texture_desc: impl FnOnce() -> Result, Err>, + try_create_texture_desc: impl FnOnce() -> Result, Err> ) -> Result> { render_ctx.texture_manager_2d.get_or_try_create_with( texture_key, &mut render_ctx.gpu_resources.textures, - try_create_texture_desc, + try_create_texture_desc ) } pub fn get_or_create_texture<'a>( render_ctx: &mut RenderContext, texture_key: u64, - create_texture_desc: impl FnOnce() -> Texture2DCreationDesc<'a>, + create_texture_desc: impl FnOnce() -> Texture2DCreationDesc<'a> ) -> Result { render_ctx.texture_manager_2d.get_or_create_with( texture_key, &mut render_ctx.gpu_resources.textures, - create_texture_desc, + create_texture_desc ) } @@ -84,20 +88,20 @@ pub fn renderer_paint_callback( command_buffer: wgpu::CommandBuffer, view_builder: re_renderer::ViewBuilder, clip_rect: egui::Rect, - pixels_from_point: f32, + pixels_from_point: f32 ) -> egui::PaintCallback { crate::profile_function!(); - slotmap::new_key_type! { pub struct ViewBuilderHandle; } + slotmap::new_key_type! { + pub struct ViewBuilderHandle; + } type ViewBuilderMap = slotmap::SlotMap; // egui paint callback are copyable / not a FnOnce (this in turn is because egui primitives can be callbacks and are copyable) let command_buffer = std::sync::Arc::new(Mutex::new(Some(command_buffer))); - let composition_view_builder_map = render_ctx - .active_frame - .per_frame_data_helper + let composition_view_builder_map = render_ctx.active_frame.per_frame_data_helper .entry::() .or_insert_with(Default::default); let view_builder_handle = composition_view_builder_map.insert(view_builder); @@ -108,14 +112,16 @@ pub fn renderer_paint_callback( egui::PaintCallback { rect: clip_rect, callback: std::sync::Arc::new( - egui_wgpu::CallbackFn::new() - .prepare( - move |_device, _queue, _encoder, _paint_callback_resources| { - let mut command_buffer = command_buffer.lock(); - vec![std::mem::replace(&mut *command_buffer, None) - .expect("egui_wgpu prepare callback called more than once")] - }, - ) + egui_wgpu::CallbackFn + ::new() + .prepare(move |_device, _queue, _encoder, _paint_callback_resources| { + let mut command_buffer = command_buffer.lock(); + vec![ + std::mem + ::replace(&mut *command_buffer, None) + .expect("egui_wgpu prepare callback called more than once") + ] + }) .paint(move |_info, render_pass, paint_callback_resources| { crate::profile_scope!("paint"); // TODO(andreas): This should work as well but doesn't work in the 3d view. @@ -123,12 +129,11 @@ pub fn renderer_paint_callback( //let clip_rect = info.clip_rect_in_pixels(); let ctx = paint_callback_resources.get::().unwrap(); - ctx.active_frame - .per_frame_data_helper + ctx.active_frame.per_frame_data_helper .get::() - .unwrap()[view_builder_handle] - .composite(ctx, render_pass, screen_position); - }), + .unwrap() + [view_builder_handle].composite(ctx, render_pass, screen_position); + }) ), } } @@ -140,11 +145,11 @@ pub fn render_image( image_rect_on_screen: egui::Rect, colormapped_texture: ColormappedTexture, texture_options: egui::TextureOptions, - debug_name: &str, + debug_name: &str ) -> anyhow::Result<()> { crate::profile_function!(); - use re_renderer::renderer::{TextureFilterMag, TextureFilterMin}; + use re_renderer::renderer::{ TextureFilterMag, TextureFilterMin }; let clip_rect = painter.clip_rect().intersect(image_rect_on_screen); if !clip_rect.is_positive() { @@ -181,9 +186,11 @@ pub fn render_image( let space_from_points = space_from_ui.scale().y; let points_from_pixels = 1.0 / painter.ctx().pixels_per_point(); let space_from_pixel = space_from_points * points_from_pixels; + let resolution_in_pixel = crate::gpu_bridge::viewport_resolution_in_pixels( + clip_rect, + pixels_from_points + ); - let resolution_in_pixel = - crate::gpu_bridge::viewport_resolution_in_pixels(clip_rect, pixels_from_points); anyhow::ensure!(resolution_in_pixel[0] > 0 && resolution_in_pixel[1] > 0); let camera_position_space = space_from_ui.transform_pos(clip_rect.min); @@ -195,7 +202,7 @@ pub fn render_image( view_from_world: macaw::IsoTransform::from_translation(-top_left_position.extend(0.0)), projection_from_view: re_renderer::view_builder::Projection::Orthographic { camera_mode: re_renderer::view_builder::OrthographicCameraMode::TopLeftCornerAndExtendZ, - vertical_world_size: space_from_pixel * resolution_in_pixel[1] as f32, + vertical_world_size: space_from_pixel * (resolution_in_pixel[1] as f32), far_plane_distance: 1000.0, }, pixels_from_point: pixels_from_points, @@ -205,20 +212,21 @@ pub fn render_image( let mut view_builder = ViewBuilder::new(render_ctx, target_config); - view_builder.queue_draw(&re_renderer::renderer::RectangleDrawData::new( - render_ctx, - &[textured_rectangle], - )?); + view_builder.queue_draw( + &re_renderer::renderer::RectangleDrawData::new(render_ctx, &[textured_rectangle])? + ); let command_buffer = view_builder.draw(render_ctx, re_renderer::Rgba::TRANSPARENT)?; - painter.add(crate::gpu_bridge::renderer_paint_callback( - render_ctx, - command_buffer, - view_builder, - clip_rect, - painter.ctx().pixels_per_point(), - )); + painter.add( + crate::gpu_bridge::renderer_paint_callback( + render_ctx, + command_buffer, + view_builder, + clip_rect, + painter.ctx().pixels_per_point() + ) + ); Ok(()) } diff --git a/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs b/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs index 7787f6c50f15..f9d5b6a2a83b 100644 --- a/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs +++ b/crates/re_viewer/src/gpu_bridge/tensor_to_gpu.rs @@ -2,19 +2,20 @@ use anyhow::Context; use std::borrow::Cow; +use std::mem; -use bytemuck::{allocation::pod_collect_to_vec, cast_slice, Pod}; +use bytemuck::{ allocation::pod_collect_to_vec, cast_slice, Pod }; use egui::util::hash; use wgpu::TextureFormat; -use re_log_types::component_types::{DecodedTensor, Tensor, TensorData}; +use re_log_types::component_types::{ DecodedTensor, Tensor, TensorData }; use re_renderer::{ - renderer::{ColorMapper, ColormappedTexture}, + renderer::{ ColorMapper, ColormappedTexture, TextureEncoding }, resource_managers::Texture2DCreationDesc, RenderContext, }; -use crate::{gpu_bridge::get_or_create_texture, misc::caches::TensorStats}; +use crate::{ gpu_bridge::get_or_create_texture, misc::caches::TensorStats }; use super::try_get_or_create_texture; @@ -31,14 +32,16 @@ pub fn tensor_to_gpu( debug_name: &str, tensor: &DecodedTensor, tensor_stats: &TensorStats, - annotations: &crate::ui::Annotations, + annotations: &crate::ui::Annotations ) -> anyhow::Result { - crate::profile_function!(format!( - "meaning: {:?}, dtype: {}, shape: {:?}", - tensor.meaning, - tensor.dtype(), - tensor.shape() - )); + crate::profile_function!( + format!( + "meaning: {:?}, dtype: {}, shape: {:?}", + tensor.meaning, + tensor.dtype(), + tensor.shape() + ) + ); use re_log_types::component_types::TensorDataMeaning; @@ -55,6 +58,30 @@ pub fn tensor_to_gpu( } } +/// Pad and cast a slice of RGB values to RGBA with only one copy. +fn pad_and_cast_rgb(data: &[u8], alpha: u8) -> Cow<'static, [u8]> { + crate::profile_function!(); + ( + if cfg!(debug_assertions) { + // fastest version in debug builds. + // 5x faster in debug builds, but 2x slower in release + let mut padded = vec![alpha; data.len() / 3 * 4]; + for i in 0..data.len() / 3 { + padded[4 * i] = data[3 * i]; + padded[4 * i + 1] = data[3 * i + 1]; + padded[4 * i + 2] = data[3 * i + 2]; + } + padded + } else { + // fastest version in optimized builds + data.chunks_exact(3) + .flat_map(|chunk| [chunk[0], chunk[1], chunk[2], alpha]) + .collect::>() + .into() + } + ).into() +} + // ---------------------------------------------------------------------------- // Color textures: @@ -62,29 +89,35 @@ fn color_tensor_to_gpu( render_ctx: &mut RenderContext, debug_name: &str, tensor: &DecodedTensor, - tensor_stats: &TensorStats, + tensor_stats: &TensorStats ) -> anyhow::Result { + let [height, width, depth] = height_width_depth(tensor)?; let texture_handle = try_get_or_create_texture(render_ctx, hash(tensor.id()), || { - let [height, width, depth] = height_width_depth(tensor)?; - let (data, format) = match (depth, &tensor.data) { - // Use R8Unorm and R8Snorm to get filtering on the GPU: - (1, TensorData::U8(buf)) => (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Unorm), - (1, TensorData::I8(buf)) => (cast_slice_to_cow(buf), TextureFormat::R8Snorm), - - // Special handling for sRGB(A) textures: - (3, TensorData::U8(buf)) => ( - pad_and_cast(buf.as_slice(), 255), - TextureFormat::Rgba8UnormSrgb, - ), - (4, TensorData::U8(buf)) => ( - // TODO(emilk): premultiply alpha - cast_slice_to_cow(buf.as_slice()), - TextureFormat::Rgba8UnormSrgb, - ), - - _ => { - // Fallback to general case: - return general_texture_creation_desc_from_tensor(debug_name, tensor); + let (data, format) = { + match (depth, &tensor.data) { + // Use R8Unorm and R8Snorm to get filtering on the GPU: + (1, TensorData::U8(buf)) => { + (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Unorm) + } + (1, TensorData::NV12(buf)) => { + (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Uint) + } + (1, TensorData::I8(buf)) => (cast_slice_to_cow(buf), TextureFormat::R8Snorm), + + // Special handling for sRGB(A) textures: + (3, TensorData::U8(buf)) => + (pad_and_cast_rgb(buf.as_slice(), 255), TextureFormat::Rgba8UnormSrgb), + (4, TensorData::U8(buf)) => + ( + // TODO(emilk): premultiply alpha + cast_slice_to_cow(buf.as_slice()), + TextureFormat::Rgba8UnormSrgb, + ), + + _ => { + // Fallback to general case: + return general_texture_creation_desc_from_tensor(debug_name, tensor); + } } }; @@ -95,24 +128,26 @@ fn color_tensor_to_gpu( width, height, }) - }) - .map_err(|err| anyhow::anyhow!("Failed to create texture for color tensor: {err}"))?; + }).map_err(|err| anyhow::anyhow!("Failed to create texture for color tensor: {err}"))?; let texture_format = texture_handle.format(); + let encoding: Option = (&tensor.data).into(); // Special casing for normalized textures used above: - let range = if matches!( - texture_format, - TextureFormat::R8Unorm | TextureFormat::Rgba8UnormSrgb - ) { + let range = if matches!(texture_format, TextureFormat::R8Unorm | TextureFormat::Rgba8UnormSrgb) { [0.0, 1.0] } else if texture_format == TextureFormat::R8Snorm { [-1.0, 1.0] + } else if encoding == Some(TextureEncoding::Nv12) { + [0.0, 1.0] } else { crate::gpu_bridge::range(tensor_stats)? }; - let color_mapper = if re_renderer::texture_info::num_texture_components(texture_format) == 1 { + let color_mapper = if + encoding != Some(TextureEncoding::Nv12) && + re_renderer::texture_info::num_texture_components(texture_format) == 1 + { // Single-channel images = luminance = grayscale Some(ColorMapper::Function(re_renderer::Colormap::Grayscale)) } else { @@ -124,6 +159,7 @@ fn color_tensor_to_gpu( range, gamma: 1.0, color_mapper, + encoding, }) } @@ -135,22 +171,13 @@ fn class_id_tensor_to_gpu( debug_name: &str, tensor: &DecodedTensor, tensor_stats: &TensorStats, - annotations: &crate::ui::Annotations, + annotations: &crate::ui::Annotations ) -> anyhow::Result { let [_height, _width, depth] = height_width_depth(tensor)?; - anyhow::ensure!( - depth == 1, - "Cannot apply annotations to tensor of shape {:?}", - tensor.shape - ); - anyhow::ensure!( - tensor.dtype().is_integer(), - "Only integer tensors can be annotated" - ); + anyhow::ensure!(depth == 1, "Cannot apply annotations to tensor of shape {:?}", tensor.shape); + anyhow::ensure!(tensor.dtype().is_integer(), "Only integer tensors can be annotated"); - let (min, max) = tensor_stats - .range - .ok_or_else(|| anyhow::anyhow!("compressed_tensor!?"))?; + let (min, max) = tensor_stats.range.ok_or_else(|| anyhow::anyhow!("compressed_tensor!?"))?; anyhow::ensure!(0.0 <= min, "Negative class id"); anyhow::ensure!(max <= 65535.0, "Too many class ids"); // we only support u8 and u16 tensors @@ -161,95 +188,83 @@ fn class_id_tensor_to_gpu( let colormap_width = 256; let colormap_height = (num_colors + colormap_width - 1) / colormap_width; - let colormap_texture_handle = - get_or_create_texture(render_ctx, hash(annotations.row_id), || { - let data: Vec = (0..(colormap_width * colormap_height)) - .flat_map(|id| { - let color = annotations - .class_description(Some(re_log_types::component_types::ClassId(id as u16))) - .annotation_info() - .color(None, crate::ui::DefaultColor::TransparentBlack); - color.to_array() // premultiplied! - }) - .collect(); - - Texture2DCreationDesc { - label: "class_id_colormap".into(), - data: data.into(), - format: TextureFormat::Rgba8UnormSrgb, - width: colormap_width as u32, - height: colormap_height as u32, - } - }) - .context("Failed to create class_id_colormap.")?; + let colormap_texture_handle = get_or_create_texture(render_ctx, hash(annotations.row_id), || { + let data: Vec = (0..colormap_width * colormap_height) + .flat_map(|id| { + let color = annotations + .class_description(Some(re_log_types::component_types::ClassId(id as u16))) + .annotation_info() + .color(None, crate::ui::DefaultColor::TransparentBlack); + color.to_array() // premultiplied! + }) + .collect(); + + Texture2DCreationDesc { + label: "class_id_colormap".into(), + data: data.into(), + format: TextureFormat::Rgba8UnormSrgb, + width: colormap_width as u32, + height: colormap_height as u32, + } + }).context("Failed to create class_id_colormap.")?; let main_texture_handle = try_get_or_create_texture(render_ctx, hash(tensor.id()), || { general_texture_creation_desc_from_tensor(debug_name, tensor) - }) - .map_err(|err| anyhow::anyhow!("Failed to create texture for class id tensor: {err}"))?; + }).map_err(|err| anyhow::anyhow!("Failed to create texture for class id tensor: {err}"))?; Ok(ColormappedTexture { texture: main_texture_handle, range: [0.0, (colormap_width * colormap_height) as f32], gamma: 1.0, color_mapper: Some(ColorMapper::Texture(colormap_texture_handle)), + encoding: None, }) } // ---------------------------------------------------------------------------- -// Depth textures: +// Depth textures +// ---------------------------------------------------------------------------- fn depth_tensor_to_gpu( render_ctx: &mut RenderContext, debug_name: &str, tensor: &DecodedTensor, - tensor_stats: &TensorStats, + tensor_stats: &TensorStats ) -> anyhow::Result { let [_height, _width, depth] = height_width_depth(tensor)?; - anyhow::ensure!( - depth == 1, - "Depth tensor of weird shape: {:?}", - tensor.shape - ); + anyhow::ensure!(depth == 1, "Depth tensor of weird shape: {:?}", tensor.shape); let (min, max) = depth_tensor_range(tensor, tensor_stats)?; let texture = try_get_or_create_texture(render_ctx, hash(tensor.id()), || { general_texture_creation_desc_from_tensor(debug_name, tensor) - }) - .map_err(|err| anyhow::anyhow!("Failed to create depth tensor texture: {err}"))?; + }).map_err(|err| anyhow::anyhow!("Failed to create depth tensor texture: {err}"))?; Ok(ColormappedTexture { texture, range: [min as f32, max as f32], gamma: 1.0, color_mapper: Some(ColorMapper::Function(re_renderer::Colormap::Turbo)), + encoding: None, }) } fn depth_tensor_range( tensor: &DecodedTensor, - tensor_stats: &TensorStats, + tensor_stats: &TensorStats ) -> anyhow::Result<(f64, f64)> { - let range = tensor_stats.range.ok_or(anyhow::anyhow!( - "Tensor has no range!? Was this compressed?" - ))?; + let range = tensor_stats.range.ok_or( + anyhow::anyhow!("Tensor has no range!? Was this compressed?") + )?; let (mut min, mut max) = range; - anyhow::ensure!( - min.is_finite() && max.is_finite(), - "Tensor has non-finite values" - ); + anyhow::ensure!(min.is_finite() && max.is_finite(), "Tensor has non-finite values"); min = min.min(0.0); // Depth usually start at zero. if min == max { // Uniform image. We can't remap it to a 0-1 range, so do whatever: min = 0.0; - max = if tensor.dtype().is_float() { - 1.0 - } else { - tensor.dtype().max_value() - }; + max = if tensor.dtype().is_float() { 1.0 } else { tensor.dtype().max_value() }; } Ok((min, max)) @@ -261,7 +276,7 @@ fn depth_tensor_range( /// Uses no `Unorm/Snorm` formats. fn general_texture_creation_desc_from_tensor<'a>( debug_name: &str, - tensor: &'a DecodedTensor, + tensor: &'a DecodedTensor ) -> anyhow::Result> { let [height, width, depth] = height_width_depth(tensor)?; @@ -282,8 +297,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::R32Float), TensorData::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::R32Float), // narrowing to f32! - TensorData::JPEG(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") + TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::NV12(buf) => { + (cast_slice_to_cow(buf.as_slice()), TextureFormat::R8Unorm) } } } @@ -304,8 +320,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::Rg32Float), TensorData::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::Rg32Float), // narrowing to f32! - TensorData::JPEG(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") + TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::NV12(_) => { + panic!("NV12 cannot be a two channel tensor!"); } } } @@ -315,34 +332,36 @@ fn general_texture_creation_desc_from_tensor<'a>( // To be safe, we pad with the MAX value of integers, and with 1.0 for floats. // TODO(emilk): tell the shader to ignore the alpha channel instead! match &tensor.data { - TensorData::U8(buf) => ( - pad_and_cast(buf.as_slice(), u8::MAX), - TextureFormat::Rgba8Uint, - ), + TensorData::U8(buf) => + (pad_and_cast(buf.as_slice(), u8::MAX), TextureFormat::Rgba8Uint), TensorData::U16(buf) => (pad_and_cast(buf, u16::MAX), TextureFormat::Rgba16Uint), TensorData::U32(buf) => (pad_and_cast(buf, u32::MAX), TextureFormat::Rgba32Uint), - TensorData::U64(buf) => ( - pad_and_narrow_and_cast(buf, 1.0, |x: u64| x as f32), - TextureFormat::Rgba32Float, - ), + TensorData::U64(buf) => + ( + pad_and_narrow_and_cast(buf, 1.0, |x: u64| x as f32), + TextureFormat::Rgba32Float, + ), TensorData::I8(buf) => (pad_and_cast(buf, i8::MAX), TextureFormat::Rgba8Sint), TensorData::I16(buf) => (pad_and_cast(buf, i16::MAX), TextureFormat::Rgba16Sint), TensorData::I32(buf) => (pad_and_cast(buf, i32::MAX), TextureFormat::Rgba32Sint), - TensorData::I64(buf) => ( - pad_and_narrow_and_cast(buf, 1.0, |x: i64| x as f32), - TextureFormat::Rgba32Float, - ), + TensorData::I64(buf) => + ( + pad_and_narrow_and_cast(buf, 1.0, |x: i64| x as f32), + TextureFormat::Rgba32Float, + ), // TensorData::F16(buf) => (pad_and_cast(buf, 1.0), TextureFormat::Rgba16Float), TODO(#854) TensorData::F32(buf) => (pad_and_cast(buf, 1.0), TextureFormat::Rgba32Float), - TensorData::F64(buf) => ( - pad_and_narrow_and_cast(buf, 1.0, |x: f64| x as f32), - TextureFormat::Rgba32Float, - ), - - TensorData::JPEG(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") + TensorData::F64(buf) => + ( + pad_and_narrow_and_cast(buf, 1.0, |x: f64| x as f32), + TextureFormat::Rgba32Float, + ), + + TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::NV12(_) => { + panic!("NV12 cannot be a three channel tensor!"); } } } @@ -365,8 +384,9 @@ fn general_texture_creation_desc_from_tensor<'a>( TensorData::F32(buf) => (cast_slice_to_cow(buf), TextureFormat::Rgba32Float), TensorData::F64(buf) => (narrow_f64_to_f32s(buf), TextureFormat::Rgba32Float), // narrowing to f32! - TensorData::JPEG(_) => { - unreachable!("DecodedTensor cannot contain a JPEG") + TensorData::JPEG(_) => { unreachable!("DecodedTensor cannot contain a JPEG") } + TensorData::NV12(_) => { + panic!("NV12 cannot be a four channel tensor!"); } } } @@ -424,7 +444,7 @@ fn pad_to_four_elements(data: &[T], pad: T) -> Vec { // fastest version in debug builds. // 5x faster in debug builds, but 2x slower in release let mut padded = vec![pad; data.len() / 3 * 4]; - for i in 0..(data.len() / 3) { + for i in 0..data.len() / 3 { padded[4 * i] = data[3 * i]; padded[4 * i + 1] = data[3 * i + 1]; padded[4 * i + 2] = data[3 * i + 2]; @@ -448,7 +468,7 @@ fn pad_and_cast(data: &[T], pad: T) -> Cow<'static, [u8]> { fn pad_and_narrow_and_cast( data: &[T], pad: f32, - narrow: impl Fn(T) -> f32, + narrow: impl Fn(T) -> f32 ) -> Cow<'static, [u8]> { crate::profile_function!(); @@ -468,7 +488,7 @@ fn height_width_depth(tensor: &Tensor) -> anyhow::Result<[u32; 3]> { anyhow::ensure!( shape.len() == 2 || shape.len() == 3, - "Expected a 2D or 3D tensor, got {shape:?}", + "Expected a 2D or 3D tensor, got {shape:?}" ); let [height, width] = [ diff --git a/crates/re_viewer/src/lib.rs b/crates/re_viewer/src/lib.rs index 3d78fa141c47..cee2e90c80f8 100644 --- a/crates/re_viewer/src/lib.rs +++ b/crates/re_viewer/src/lib.rs @@ -71,12 +71,13 @@ macro_rules! profile_scope { // --------------------------------------------------------------------------- type SysExePath = String; +type VenvSitePackages = String; /// Where is this App running in? #[derive(Clone, Debug, PartialEq, Eq)] pub enum AppEnvironment { /// Created from the Rerun Python SDK. - PythonSdk(PythonVersion, SysExePath), + PythonSdk(PythonVersion, SysExePath, VenvSitePackages), /// Created from the Rerun Rust SDK. RustSdk { @@ -98,8 +99,8 @@ impl AppEnvironment { pub fn from_recording_source(source: &re_log_types::RecordingSource) -> Self { use re_log_types::RecordingSource; match source { - RecordingSource::PythonSdk(python_version, sys_exe) => { - Self::PythonSdk(python_version.clone(), sys_exe.clone()) + RecordingSource::PythonSdk(python_version, sys_exe, venv_site) => { + Self::PythonSdk(python_version.clone(), sys_exe.clone(), venv_site.clone()) } RecordingSource::RustSdk { rustc_version: rust_version, diff --git a/crates/re_viewer/src/misc/time_control_ui.rs b/crates/re_viewer/src/misc/time_control_ui.rs index f35b23bb14d0..f20bdaf3f64b 100644 --- a/crates/re_viewer/src/misc/time_control_ui.rs +++ b/crates/re_viewer/src/misc/time_control_ui.rs @@ -184,9 +184,10 @@ impl TimeControl { } fn toggle_playback_text(egui_ctx: &egui::Context) -> String { - if let Some(shortcut) = re_ui::Command::PlaybackTogglePlayPause.kb_shortcut() { - format!(" Toggle with {}", egui_ctx.format_shortcut(&shortcut)) - } else { - Default::default() - } + // if let Some(shortcut) = re_ui::Command::PlaybackTogglePlayPause.kb_shortcut() { + // format!(" Toggle with {}", egui_ctx.format_shortcut(&shortcut)) + // } else { + // Default::default() + // } + Default::default() } diff --git a/crates/re_viewer/src/ui/auto_layout.rs b/crates/re_viewer/src/ui/auto_layout.rs index d158ce0d25ca..40fa7d4af6e7 100644 --- a/crates/re_viewer/src/ui/auto_layout.rs +++ b/crates/re_viewer/src/ui/auto_layout.rs @@ -10,7 +10,7 @@ // TODO(emilk): fix O(N^2) execution time (where N = number of spaces) use core::panic; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; use ahash::HashMap; use egui::Vec2; @@ -44,12 +44,42 @@ pub struct SpaceMakeInfo { pub kind: SpaceViewKind, } -enum LayoutSplit { +pub(crate) enum LayoutSplit { LeftRight(Box, f32, Box), TopBottom(Box, f32, Box), Leaf(Vec), } +impl LayoutSplit { + pub fn is_empty(&self) -> bool { + match self { + LayoutSplit::Leaf(spaces) => spaces.is_empty(), + LayoutSplit::LeftRight(left, _, right) => left.is_empty() && right.is_empty(), + LayoutSplit::TopBottom(top, _, bottom) => top.is_empty() && bottom.is_empty(), + } + } +} + +impl std::fmt::Debug for LayoutSplit { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + LayoutSplit::LeftRight(left, fraction, right) => { + write!(f, "LeftRight({:?}, {}, {:?})", left, fraction, right) + } + LayoutSplit::TopBottom(top, fraction, bottom) => { + write!(f, "TopBottom({:?}, {}, {:?})", top, fraction, bottom) + } + LayoutSplit::Leaf(spaces) => { + write!( + f, + "Leaf({:?})", + spaces.iter().map(|s| s.path.clone()).collect_vec() + ) + } + } + } +} + enum SplitDirection { LeftRight { left: Vec2, t: f32, right: Vec2 }, TopBottom { top: Vec2, t: f32, bottom: Vec2 }, @@ -90,7 +120,7 @@ lazy_static! { static ref CONSTANT_SPACE_VIEWS: Vec = vec![ CONFIG_SPACE_VIEW.id, STATS_SPACE_VIEW.id, - SELECTION_SPACE_VIEW.id, + SELECTION_SPACE_VIEW.id ]; } @@ -110,8 +140,8 @@ fn find_space_path_in_tree( tree.tabs() .find(|tab| { let Some(path) = &tab.space_path else { - return false; - }; + return false; + }; path == space_view_path }) .cloned() @@ -121,263 +151,227 @@ fn find_top_left_leaf(tree: &egui_dock::Tree) -> NodeIndex { let mut node = NodeIndex::root(); loop { if tree[node].is_leaf() { - println!("Node: {node:?}"); return node; } node = node.right(); } } -/// Is it possible to create a quad of left top 3d color left bottom 2d color -/// right top 3d mono right bottom 2d mono, based on the current tree -fn can_create_color_mono_quad(tree: &egui_dock::Tree, space_views: Vec) -> bool { - let Some(color3d_tab) = find_space_path_in_tree(tree, &depthai::entity_paths::COLOR_CAM_3D) else { - return false; - }; - let Some((color3d_node_index, _)) = tree.find_tab(&color3d_tab) else { - return false; - }; - let Some(mono3d_tab) = find_space_path_in_tree(tree, &depthai::entity_paths::MONO_CAM_3D) else { - return false; - }; - let Some((mono3d_node_index, mono3d_tab_index)) = tree.find_tab(&mono3d_tab) else { - return false; - }; - mono3d_node_index == color3d_node_index.right() -} +// /// Layout `CAM_B` `CAM_A` | `CAM_C` with 3d views on top and 2d views on the bottom. +// fn create_inner_viewport_layout(spaces: &Vec) -> LayoutSplit { +// let mut groups: HashMap, Vec)> = +// HashMap::default(); + +// for space in spaces { +// if let Some(path) = &space.path { +// let base_path = match path.as_slice().first() { +// Some(part) => part.clone(), +// None => continue, +// }; + +// let (views_2d, views_3d) = groups.entry(base_path).or_default(); + +// if path.len() > 1 { +// views_2d.push(space.clone()); +// } else { +// views_3d.push(space.clone()); +// } +// } +// } + +// let mut sorted_groups: BTreeMap, Vec)> = +// BTreeMap::new(); +// for (key, value) in groups { +// sorted_groups.insert(key, value); +// } + +// let mut layouts: VecDeque = VecDeque::new(); + +// for (_base_path, (views_2d, views_3d)) in sorted_groups { +// let layout_2d = LayoutSplit::Leaf(views_2d); +// let layout_3d = LayoutSplit::Leaf(views_3d); + +// layouts.push_back(LayoutSplit::TopBottom( +// Box::new(layout_3d), +// 0.5, +// Box::new(layout_2d), +// )); +// } +// if layouts.len() > 1 { +// create_horizontal_layout(&mut layouts).1 +// } else { +// LayoutSplit::Leaf(spaces.clone()) +// } +// } + +// fn create_horizontal_layout(vertical_splits: &mut VecDeque) -> (f32, LayoutSplit) { +// if vertical_splits.len() == 1 { +// return (1.0, vertical_splits.pop_front().unwrap()); +// } +// let left = vertical_splits.pop_front().unwrap(); +// let (mut n_splits, mut right) = create_horizontal_layout(vertical_splits); +// n_splits += 1.0; +// right = LayoutSplit::LeftRight(Box::new(left), 1.0 / n_splits, Box::new(right)); +// (n_splits, right) +// } + +/// Layout `CAM_A` `CAM_B` | `CAM_C` with 3d views on top and 2d views on the bottom in the same group. (only one 2d and one 3d view visible from the start) +fn create_inner_viewport_layout( + viewport_size: egui::Vec2, + spaces: &Vec, +) -> LayoutSplit { + let mut groups: HashMap, Vec)> = + HashMap::default(); -/// Insert new space views and remove space views that aren't available anymore. -/// Tries to layout the viewport as intuitively as possible -/// TODO(filip): Reduce the size of this code. A lot of it is repetitive and can be refactored -/// TODO(filip): Improve code functionally: detect when you can group mono and color 3d + 2d views into a 4 way split -pub(crate) fn update_tree( - tree: &mut egui_dock::Tree, - visible_space_views: &BTreeSet, - space_views: &HashMap, - is_maximized: bool, -) { - // One view is maximized - if is_maximized { - let tab: Tab; - let space_view_id = visible_space_views.first().unwrap(); - if let Some(space_view) = space_views.get(space_view_id) { - tab = space_view.into(); - } else { - tab = if space_view_id == &STATS_SPACE_VIEW.id { - Tab { - space_path: None, - space_view_id: *space_view_id, - space_view_kind: SpaceViewKind::Stats, + for space in spaces { + if let Some(path) = &space.path { + let base_path = match path.as_slice().first() { + Some(part) => part.clone(), + None => { + continue; } + }; + + let (views_2d, views_3d) = groups.entry(base_path).or_default(); + + if path.len() > 1 { + views_2d.push(space.clone()); } else { - re_log::warn_once!("Can't maximize this space view"); - return; + views_3d.push(space.clone()); } } - *tree = egui_dock::Tree::new(vec![tab]); - return; } - for tab in tree.clone().tabs().filter(|tab| { - !CONSTANT_SPACE_VIEWS.contains(&tab.space_view_id) - && !visible_space_views - .iter() - .any(|sv_id| sv_id == &tab.space_view_id) - }) { - tree.remove_tab(tree.find_tab(tab).unwrap()); + let mut sorted_groups: BTreeMap, Vec)> = + BTreeMap::new(); + for (key, value) in groups { + sorted_groups.insert(key, value); + } + let mut all_2d = Vec::new(); + let mut all_3d = Vec::new(); + for (_base_path, (views_2d, views_3d)) in sorted_groups { + all_2d.extend(views_2d); + all_3d.extend(views_3d); } - // If there aren't any "data" space views, we show the config, stats and selection panel on the right. - // With an empty leaf on the left (aka middle if you take into account the blueprint panel) - if visible_space_views.is_empty() { - *tree = egui_dock::Tree::new(vec![]); - - tree_from_split( - tree, - NodeIndex::root(), - &LayoutSplit::LeftRight( - LayoutSplit::Leaf(Vec::new()).into(), - 0.7, - right_panel_split().into(), - ), - ); - let (config_node, config_tab) = tree - .find_tab( - tree.tabs() - .find(|tab| tab.space_view_id == CONFIG_SPACE_VIEW.id) - .unwrap(), // CONFIG_SPACE_VIEW is always present - ) - .unwrap(); - tree.set_active_tab(config_node, config_tab); + let monos_2d = all_2d + .iter() + .filter(|space| { + if let Some(last) = space.path.as_ref().and_then(|path| path.as_slice().last()) { + last == &EntityPathPart::from("mono_cam") + } else { + false + } + }) + .cloned() + .collect_vec(); - return; - } + let colors_2d = all_2d + .iter() + .filter(|space| { + if let Some(last) = space.path.as_ref().and_then(|path| path.as_slice().last()) { + last == &EntityPathPart::from("color_cam") + } else { + false + } + }) + .cloned() + .collect_vec(); - let visible_space_views = visible_space_views + let monos_3d = all_3d .iter() - .map(|sv| space_views.get(sv).unwrap()); - // Insert new space views - for space_view in visible_space_views { - // println!("Space view: {:?}", space_view.space_path.clone()); - if tree - .find_tab(&Tab { - space_view_id: space_view.id, - space_view_kind: SpaceViewKind::Data, - space_path: Some(space_view.space_path.clone()), - }) - .is_none() - { - // Insert space view into the tree, taking into account the following: - // * If the space view is a 3d view, try to find the corresponding 2d view and place the 3d on top of the 2d view - // * If the space view is a 2d view, try to find the corresponding 3d view and place the 2d view on top of the 3d view - // * If the space view is a duplicate of an existing view (entity path is the same space_view_id differs), place it within the same leaf as the existing view - // * else if none of the above, just place the view in the top left corner as a new tab, (don't insert it into a leaf, create a new leaf) - // println!("Space view getting inserted: {:?}", space_view.space_path); - - match space_view.space_path { - ref space_path - if space_path.hash() == depthai::entity_paths::COLOR_CAM_3D.hash() => - { - if let Some(existing_3d) = - find_space_path_in_tree(tree, &depthai::entity_paths::COLOR_CAM_3D) - { - let (leaf, _) = tree.find_tab(&existing_3d).unwrap(); - push_space_view_to_leaf(tree, leaf, space_view); - } else if let Some(existing_2d) = - find_space_path_in_tree(tree, &depthai::entity_paths::RGB_PINHOLE_CAMERA) + .filter(|space_3d| { + if let Some(socket_3d) = space_3d + .path + .as_ref() + .and_then(|path| path.as_slice().first()) + { + monos_2d.iter().any(|space_2d| { + if let Some(socket_2d) = space_2d + .path + .as_ref() + .and_then(|path| path.as_slice().first()) { - let (node_index, _) = tree.find_tab(&existing_2d).unwrap(); - tree.split_above(node_index, 0.5, vec![space_view.into()]); - } else if let Some(existing_mono3d) = - find_space_path_in_tree(tree, &depthai::entity_paths::MONO_CAM_3D) - { - let (leaf, _) = tree.find_tab(&existing_mono3d).unwrap(); - tree.split_left(leaf, 0.5, vec![space_view.into()]); - } else { - let top_left = find_top_left_leaf(tree); - push_space_view_to_leaf(tree, top_left, space_view); - } - } - ref space_path - if space_path.hash() == depthai::entity_paths::RGB_PINHOLE_CAMERA.hash() => - { - if let Some(existing_2d) = - find_space_path_in_tree(tree, &depthai::entity_paths::RGB_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_2d).unwrap(); - push_space_view_to_leaf(tree, leaf, space_view); - } else if let Some(existing_left) = - find_space_path_in_tree(tree, &depthai::entity_paths::LEFT_PINHOLE_CAMERA) - { - let (node_index, _) = tree.find_tab(&existing_left).unwrap(); - tree.split_left(node_index, 0.5, vec![space_view.into()]); - } else if let Some(existing_right) = - find_space_path_in_tree(tree, &depthai::entity_paths::RIGHT_PINHOLE_CAMERA) - { - let (node_index, _) = tree.find_tab(&existing_right).unwrap(); - tree.split_left(node_index, 0.5, vec![space_view.into()]); - } else if let Some(existing_3d) = - find_space_path_in_tree(tree, &depthai::entity_paths::COLOR_CAM_3D) - { - let (node_index, _) = tree.find_tab(&existing_3d).unwrap(); - tree.split_below(node_index, 0.5, vec![space_view.into()]); - } else { - let top_left = find_top_left_leaf(tree); - push_space_view_to_leaf(tree, top_left, space_view); - } - } - ref space_path - if space_path.hash() == depthai::entity_paths::MONO_CAM_3D.hash() => - { - if let Some(existing_3d) = - find_space_path_in_tree(tree, &depthai::entity_paths::MONO_CAM_3D) - { - let (leaf, _) = tree.find_tab(&existing_3d).unwrap(); - push_space_view_to_leaf(tree, leaf, space_view); - } else if let Some(existing_3d_color) = - find_space_path_in_tree(tree, &depthai::entity_paths::COLOR_CAM_3D) - { - let (leaf, _) = tree.find_tab(&existing_3d_color).unwrap(); - tree.split_right(leaf, 0.5, vec![space_view.into()]); - } else if let Some(existing_left) = - find_space_path_in_tree(tree, &depthai::entity_paths::LEFT_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_left).unwrap(); - tree.split_above(leaf, 0.5, vec![space_view.into()]); - } else if let Some(existing_right) = - find_space_path_in_tree(tree, &depthai::entity_paths::RIGHT_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_right).unwrap(); - tree.split_above(leaf, 0.5, vec![space_view.into()]); - } else if let Some(existing_color) = - find_space_path_in_tree(tree, &depthai::entity_paths::RGB_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_color).unwrap(); - tree.split_right(leaf, 0.5, vec![space_view.into()]); + socket_2d == socket_3d } else { - let top_left = find_top_left_leaf(tree); - push_space_view_to_leaf(tree, top_left, space_view); + false } - } - ref space_path - if space_path.hash() == depthai::entity_paths::LEFT_PINHOLE_CAMERA.hash() => - { - if let Some(existing_left) = - find_space_path_in_tree(tree, &depthai::entity_paths::LEFT_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_left).unwrap(); - push_space_view_to_leaf(tree, leaf, space_view); - } else if let Some(existing_right) = - find_space_path_in_tree(tree, &depthai::entity_paths::RIGHT_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_right).unwrap(); - push_space_view_to_leaf(tree, leaf, space_view); - } else if let Some(existing_3d) = - find_space_path_in_tree(tree, &depthai::entity_paths::MONO_CAM_3D) - { - let (node_index, _) = tree.find_tab(&existing_3d).unwrap(); - tree.split_below(node_index, 0.5, vec![space_view.into()]); - } else if let Some(existing_2d_color) = - find_space_path_in_tree(tree, &depthai::entity_paths::RGB_PINHOLE_CAMERA) - { - let (node_index, _) = tree.find_tab(&existing_2d_color).unwrap(); - tree.split_right(node_index, 0.5, vec![space_view.into()]); - } else { - let top_left = find_top_left_leaf(tree); - push_space_view_to_leaf(tree, top_left, space_view); - } - } - ref space_path - if space_path.hash() == depthai::entity_paths::RIGHT_PINHOLE_CAMERA.hash() => - { - if let Some(existing_right) = - find_space_path_in_tree(tree, &depthai::entity_paths::RIGHT_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_right).unwrap(); - push_space_view_to_leaf(tree, leaf, space_view); - } else if let Some(existing_left) = - find_space_path_in_tree(tree, &depthai::entity_paths::LEFT_PINHOLE_CAMERA) - { - let (leaf, _) = tree.find_tab(&existing_left).unwrap(); - push_space_view_to_leaf(tree, leaf, space_view); - } else if let Some(existing_3d) = - find_space_path_in_tree(tree, &depthai::entity_paths::MONO_CAM_3D) - { - let (node_index, _) = tree.find_tab(&existing_3d).unwrap(); - tree.split_below(node_index, 0.5, vec![space_view.into()]); - } else if let Some(existing_2d_color) = - find_space_path_in_tree(tree, &depthai::entity_paths::RGB_PINHOLE_CAMERA) + }) + } else { + false + } + }) + .cloned() + .collect_vec(); + + let colors_3d = all_3d + .iter() + .filter(|space_3d| { + if let Some(socket_3d) = space_3d + .path + .as_ref() + .and_then(|path| path.as_slice().first()) + { + colors_2d.iter().any(|space_2d| { + if let Some(socket_2d) = space_2d + .path + .as_ref() + .and_then(|path| path.as_slice().first()) { - let (node_index, _) = tree.find_tab(&existing_2d_color).unwrap(); - tree.split_right(node_index, 0.5, vec![space_view.into()]); + socket_2d == socket_3d } else { - let top_left = find_top_left_leaf(tree); - push_space_view_to_leaf(tree, top_left, space_view); + false } - } - _ => {} - }; - } + }) + } else { + false + } + }) + .cloned() + .collect_vec(); + + let color_split_2d = LayoutSplit::Leaf(colors_2d.clone().into()); + let color_split_3d = LayoutSplit::Leaf(colors_3d.clone().into()); + let mono_split_2d = LayoutSplit::Leaf(monos_2d.clone().into()); + let mono_split_3d = LayoutSplit::Leaf(monos_3d.clone().into()); + + let mono_split = if monos_2d.is_empty() && monos_3d.is_empty() { + LayoutSplit::Leaf(vec![]) + } else if monos_2d.is_empty() && !monos_3d.is_empty() { + mono_split_3d + } else if !monos_2d.is_empty() && monos_3d.is_empty() { + mono_split_2d + } else { + LayoutSplit::TopBottom(mono_split_3d.into(), 0.5, mono_split_2d.into()) + }; + + let color_split = if colors_2d.is_empty() && colors_3d.is_empty() { + LayoutSplit::Leaf(vec![]) + } else if colors_2d.is_empty() && !colors_3d.is_empty() { + color_split_3d + } else if !colors_2d.is_empty() && colors_3d.is_empty() { + color_split_2d + } else { + LayoutSplit::TopBottom(color_split_3d.into(), 0.5, color_split_2d.into()) + }; + + if color_split.is_empty() && mono_split.is_empty() { + stock_rerun_split(viewport_size, spaces) + } else if color_split.is_empty() && !mono_split.is_empty() { + mono_split + } else if !color_split.is_empty() && mono_split.is_empty() { + color_split + } else { + LayoutSplit::LeftRight(color_split.into(), 0.5, mono_split.into()) + } +} + +fn stock_rerun_split(viewport_size: egui::Vec2, spaces: &Vec) -> LayoutSplit { + if !spaces.is_empty() { + // Users often organize by path prefix, so we start by splitting along that + layout_by_path_prefix(viewport_size, &mut spaces.to_owned()) + } else { + LayoutSplit::Leaf(vec![]) } } @@ -441,7 +435,6 @@ pub(crate) fn default_tree_from_space_views( let space_view_id = visible.first().unwrap(); if space_views.get(space_view_id).is_none() { if space_view_id == &STATS_SPACE_VIEW.id { - println!("Space view is stats space view!"); LayoutSplit::Leaf(vec![SpaceMakeInfo { id: *space_view_id, path: None, @@ -457,67 +450,7 @@ pub(crate) fn default_tree_from_space_views( } } else { LayoutSplit::LeftRight( - { - // Split space views: - // - Color stream available: Split top 3d, bottom 2d - // - if mono available split it right from color streams into 3d top and both 2d in a tab group on the bottom - let mut top_left_spaces = Vec::new(); - let mut top_right_spaces = Vec::new(); - let mut bottom_left_spaces = Vec::new(); - let mut bottom_right_spaces = Vec::new(); - spaces.iter().cloned().for_each(|space| { - let Some(space_path) = &space.path else { - return; - }; - if space_path.hash() == depthai::entity_paths::COLOR_CAM_3D.hash() { - top_left_spaces.push(space); - } else if space_path.hash() - == depthai::entity_paths::RGB_PINHOLE_CAMERA.hash() - { - top_right_spaces.push(space); - } else if space_path.hash() == depthai::entity_paths::MONO_CAM_3D.hash() - { - bottom_left_spaces.push(space); - } else { - bottom_right_spaces.push(space); - } - }); - - let color_empty = top_left_spaces.is_empty() && top_right_spaces.is_empty(); - let mono_empty = - bottom_left_spaces.is_empty() && bottom_right_spaces.is_empty(); - let mut color_split = LayoutSplit::TopBottom( - LayoutSplit::Leaf(top_left_spaces.clone()).into(), - 0.5, - LayoutSplit::Leaf(top_right_spaces.clone()).into(), - ); - let mut mono_split = LayoutSplit::TopBottom( - LayoutSplit::Leaf(bottom_left_spaces.clone()).into(), - 0.5, - LayoutSplit::Leaf(bottom_right_spaces.clone()).into(), - ); - - if !color_empty && mono_empty { - color_split - } else if !color_empty && !mono_empty { - if top_left_spaces.is_empty() { - color_split = LayoutSplit::Leaf(top_right_spaces); - } else if top_right_spaces.is_empty() { - color_split = LayoutSplit::Leaf(top_left_spaces); - } - if bottom_left_spaces.is_empty() { - mono_split = LayoutSplit::Leaf(bottom_right_spaces); - } else if bottom_right_spaces.is_empty() { - mono_split = LayoutSplit::Leaf(bottom_left_spaces); - } - LayoutSplit::LeftRight(color_split.into(), 0.5, mono_split.into()) - } else if color_empty && !mono_empty { - mono_split - } else { - LayoutSplit::Leaf(spaces) - } - } - .into(), + create_inner_viewport_layout(viewport_size, &spaces).into(), 0.7, right_panel_split().into(), ) @@ -536,7 +469,24 @@ pub(crate) fn default_tree_from_space_views( ); } if !is_maximized { - // Always set the config tab as the active tab + // Always set the color cam (if available - currently the approach is really bad as I just check for CAM_A, + // should be improved upon to search for camera name in connected_cameras) as the active tab and then the config tab as the active tab + let tree_clone = tree.clone(); + let color_tabs = tree_clone.tabs().filter(|tab| { + if let Some(space_path) = tab.space_path.clone() { + if let Some(first_part) = space_path.as_slice().first() { + first_part == &EntityPathPart::from("CAM_A") + } else { + false + } + } else { + false + } + }); + for color_tab in color_tabs { + let (node_index, tab_index) = tree.find_tab(color_tab).unwrap(); + tree.set_active_tab(node_index, tab_index); + } let (config_node, config_tab) = tree .find_tab( tree.tabs() @@ -633,14 +583,14 @@ fn find_group_split_point(groups: Vec>) -> (Vec String { format!("[{}]", shape.iter().join(", ")) } @@ -28,15 +26,11 @@ impl EntityDataUi for Tensor { ui: &mut egui::Ui, verbosity: crate::ui::UiVerbosity, entity_path: &re_log_types::EntityPath, - query: &re_arrow_store::LatestAtQuery, + query: &re_arrow_store::LatestAtQuery ) { crate::profile_function!(); - match ctx - .cache - .decode - .try_decode_tensor_if_necessary(self.clone()) - { + match ctx.cache.decode.try_decode_tensor_if_necessary(self.clone()) { Ok(decoded) => { tensor_ui(ctx, ui, verbosity, entity_path, query, self, &decoded); } @@ -54,21 +48,16 @@ fn tensor_ui( entity_path: &re_data_store::EntityPath, query: &re_arrow_store::LatestAtQuery, _encoded_tensor: &Tensor, - tensor: &DecodedTensor, + tensor: &DecodedTensor ) { // See if we can convert the tensor to a GPU texture. // Even if not, we will show info about the tensor. let tensor_stats = *ctx.cache.tensor_stats(tensor); let annotations = annotations(ctx, query, entity_path); let debug_name = entity_path.to_string(); - let texture_result = crate::gpu_bridge::tensor_to_gpu( - ctx.render_ctx, - &debug_name, - tensor, - &tensor_stats, - &annotations, - ) - .ok(); + let texture_result = crate::gpu_bridge + ::tensor_to_gpu(ctx.render_ctx, &debug_name, tensor, &tensor_stats, &annotations) + .ok(); match verbosity { UiVerbosity::Small => { @@ -82,9 +71,8 @@ fn tensor_ui( ui, texture.clone(), &debug_name, - max_size, - ) - .on_hover_ui(|ui| { + max_size + ).on_hover_ui(|ui| { // Show larger image on hover let max_size = Vec2::splat(400.0); show_image_at_max_size( @@ -93,17 +81,18 @@ fn tensor_ui( ui, texture.clone(), &debug_name, - max_size, + max_size ); }); } - ui.label(format!( - "{} x {}", - tensor.dtype(), - format_tensor_shape_single_line(tensor.shape()) - )) - .on_hover_ui(|ui| tensor_summary_ui(ctx.re_ui, ui, tensor, &tensor_stats)); + ui.label( + format!( + "{} x {}", + tensor.dtype(), + format_tensor_shape_single_line(tensor.real_shape().as_slice()) + ) + ).on_hover_ui(|ui| tensor_summary_ui(ctx.re_ui, ui, tensor, &tensor_stats)); }); } @@ -123,7 +112,7 @@ fn tensor_ui( ui, texture.clone(), &debug_name, - max_size, + max_size ); if let Some(pointer_pos) = ui.ctx().pointer_latest_pos() { @@ -138,21 +127,24 @@ fn tensor_ui( tensor.meter, &debug_name, image_rect, - pointer_pos, + pointer_pos ); } // TODO(emilk): support copying and saving images on web #[cfg(not(target_arch = "wasm32"))] - if _encoded_tensor.data.is_compressed_image() || tensor.could_be_dynamic_image() + if + _encoded_tensor.data.is_compressed_image() || + tensor.could_be_dynamic_image() { copy_and_save_image_ui(ui, tensor, _encoded_tensor); } if let Some([_h, _w, channels]) = tensor.image_height_width_channels() { if channels == 3 { - if let re_log_types::component_types::TensorData::U8(data) = - &tensor.data + if + let re_log_types::component_types::TensorData::U8(data) = + &tensor.data { ui.collapsing("Histogram", |ui| { rgb8_histogram_ui(ui, data.as_slice()); @@ -169,7 +161,7 @@ fn tensor_ui( fn annotations( ctx: &mut ViewerContext<'_>, query: &re_arrow_store::LatestAtQuery, - entity_path: &re_data_store::EntityPath, + entity_path: &re_data_store::EntityPath ) -> std::sync::Arc { let mut annotation_map = AnnotationMap::default(); let entity_paths: nohash_hasher::IntSet<_> = std::iter::once(entity_path.clone()).collect(); @@ -185,7 +177,7 @@ fn annotations( } fn texture_size(colormapped_texture: &ColormappedTexture) -> Vec2 { - let [w, h] = colormapped_texture.texture.width_height(); + let [w, h] = colormapped_texture.width_height(); egui::vec2(w as f32, h as f32) } @@ -195,7 +187,7 @@ fn show_image_at_max_size( ui: &mut egui::Ui, colormapped_texture: ColormappedTexture, debug_name: &str, - max_size: Vec2, + max_size: Vec2 ) -> egui::Response { let desired_size = { let mut desired_size = texture_size(&colormapped_texture); @@ -205,14 +197,16 @@ fn show_image_at_max_size( }; let (response, painter) = ui.allocate_painter(desired_size, egui::Sense::hover()); - if let Err(err) = crate::gpu_bridge::render_image( - render_ctx, - &painter, - response.rect, - colormapped_texture, - egui::TextureOptions::LINEAR, - debug_name, - ) { + if + let Err(err) = crate::gpu_bridge::render_image( + render_ctx, + &painter, + response.rect, + colormapped_texture, + egui::TextureOptions::LINEAR, + debug_name + ) + { let label_response = ui.label(re_ui.error_text(err.to_string())); response.union(label_response) } else { @@ -224,15 +218,9 @@ pub fn tensor_summary_ui_grid_contents( re_ui: &re_ui::ReUi, ui: &mut egui::Ui, tensor: &Tensor, - tensor_stats: &TensorStats, + tensor_stats: &TensorStats ) { - let Tensor { - tensor_id: _, - shape, - data, - meaning, - meter, - } = tensor; + let Tensor { tensor_id: _, shape, data, meaning, meter } = tensor; re_ui .grid_left_hand_label(ui, "Data type") @@ -240,14 +228,17 @@ pub fn tensor_summary_ui_grid_contents( ui.label(tensor.dtype().to_string()); ui.end_row(); - re_ui - .grid_left_hand_label(ui, "Shape") - .on_hover_text("Extent of every dimension."); + re_ui.grid_left_hand_label(ui, "Shape").on_hover_text("Extent of every dimension."); ui.vertical(|ui| { // For unnamed tensor dimension more than a single line usually doesn't make sense! // But what if some are named and some are not? // -> If more than 1 is named, make it a column! - if shape.iter().filter(|d| d.name.is_some()).count() > 1 { + if + shape + .iter() + .filter(|d| d.name.is_some()) + .count() > 1 + { for dim in shape { ui.label(dim.to_string()); } @@ -276,7 +267,7 @@ pub fn tensor_summary_ui_grid_contents( } match data { - re_log_types::component_types::TensorData::U8(_) + | re_log_types::component_types::TensorData::U8(_) | re_log_types::component_types::TensorData::U16(_) | re_log_types::component_types::TensorData::U32(_) | re_log_types::component_types::TensorData::U64(_) @@ -285,13 +276,11 @@ pub fn tensor_summary_ui_grid_contents( | re_log_types::component_types::TensorData::I32(_) | re_log_types::component_types::TensorData::I64(_) | re_log_types::component_types::TensorData::F32(_) - | re_log_types::component_types::TensorData::F64(_) => {} + | re_log_types::component_types::TensorData::F64(_) + | re_log_types::component_types::TensorData::NV12(_) => {} re_log_types::component_types::TensorData::JPEG(jpeg_bytes) => { re_ui.grid_left_hand_label(ui, "Encoding"); - ui.label(format!( - "{} JPEG", - re_format::format_bytes(jpeg_bytes.num_bytes() as _), - )); + ui.label(format!("{} JPEG", re_format::format_bytes(jpeg_bytes.num_bytes() as _))); ui.end_row(); } } @@ -299,13 +288,10 @@ pub fn tensor_summary_ui_grid_contents( let TensorStats { range } = tensor_stats; if let Some((min, max)) = range { - ui.label("Data range") - .on_hover_text("All values of the tensor range within these bounds."); - ui.monospace(format!( - "[{} - {}]", - re_format::format_f64(*min), - re_format::format_f64(*max) - )); + ui.label("Data range").on_hover_text("All values of the tensor range within these bounds."); + ui.monospace( + format!("[{} - {}]", re_format::format_f64(*min), re_format::format_f64(*max)) + ); ui.end_row(); } } @@ -314,9 +300,10 @@ pub fn tensor_summary_ui( re_ui: &re_ui::ReUi, ui: &mut egui::Ui, tensor: &Tensor, - tensor_stats: &TensorStats, + tensor_stats: &TensorStats ) { - egui::Grid::new("tensor_summary_ui") + egui::Grid + ::new("tensor_summary_ui") .num_columns(2) .show(ui, |ui| { tensor_summary_ui_grid_contents(re_ui, ui, tensor, tensor_stats); @@ -334,39 +321,32 @@ fn show_zoomed_image_region_tooltip( meter: Option, debug_name: &str, image_rect: egui::Rect, - pointer_pos: egui::Pos2, + pointer_pos: egui::Pos2 ) -> egui::Response { - response - .on_hover_cursor(egui::CursorIcon::Crosshair) - .on_hover_ui_at_pointer(|ui| { - ui.set_max_width(320.0); - ui.horizontal(|ui| { - if let Some([h, w, _]) = tensor.image_height_width_channels() { - use egui::remap_clamp; - - let center_texel = [ - (remap_clamp(pointer_pos.x, image_rect.x_range(), 0.0..=w as f32) as isize), - (remap_clamp(pointer_pos.y, image_rect.y_range(), 0.0..=h as f32) as isize), - ]; - show_zoomed_image_region_area_outline( - parent_ui, - tensor, - center_texel, - image_rect, - ); - show_zoomed_image_region( - render_ctx, - ui, - tensor, - tensor_stats, - annotations, - meter, - debug_name, - center_texel, - ); - } - }); - }) + response.on_hover_cursor(egui::CursorIcon::Crosshair).on_hover_ui_at_pointer(|ui| { + ui.set_max_width(320.0); + ui.horizontal(|ui| { + if let Some([h, w, _]) = tensor.image_height_width_channels() { + use egui::remap_clamp; + + let center_texel = [ + remap_clamp(pointer_pos.x, image_rect.x_range(), 0.0..=w as f32) as isize, + remap_clamp(pointer_pos.y, image_rect.y_range(), 0.0..=h as f32) as isize, + ]; + show_zoomed_image_region_area_outline(parent_ui, tensor, center_texel, image_rect); + show_zoomed_image_region( + render_ctx, + ui, + tensor, + tensor_stats, + annotations, + meter, + debug_name, + center_texel + ); + } + }); + }) } // Show the surrounding pixels: @@ -376,11 +356,13 @@ pub fn show_zoomed_image_region_area_outline( ui: &mut egui::Ui, tensor: &Tensor, [center_x, center_y]: [isize; 2], - image_rect: egui::Rect, + image_rect: egui::Rect ) { - use egui::{pos2, remap, Rect}; + use egui::{ pos2, remap, Rect }; - let Some([height, width, _]) = tensor.image_height_width_channels() else {return;}; + let Some([height, width, _]) = tensor.image_height_width_channels() else { + return; + }; let width = width as f32; let height = height as f32; @@ -413,18 +395,20 @@ pub fn show_zoomed_image_region( annotations: &crate::ui::Annotations, meter: Option, debug_name: &str, - center_texel: [isize; 2], + center_texel: [isize; 2] ) { - if let Err(err) = try_show_zoomed_image_region( - render_ctx, - ui, - tensor, - tensor_stats, - annotations, - meter, - debug_name, - center_texel, - ) { + if + let Err(err) = try_show_zoomed_image_region( + render_ctx, + ui, + tensor, + tensor_stats, + annotations, + meter, + debug_name, + center_texel + ) + { ui.label(format!("Error: {err}")); } } @@ -439,20 +423,22 @@ fn try_show_zoomed_image_region( annotations: &crate::ui::Annotations, meter: Option, debug_name: &str, - center_texel: [isize; 2], + center_texel: [isize; 2] ) -> anyhow::Result<()> { let texture = crate::gpu_bridge::tensor_to_gpu( render_ctx, debug_name, tensor, tensor_stats, - annotations, + annotations )?; - let Some([height, width, _]) = tensor.image_height_width_channels() else { return Ok(()); }; + let Some([height, width, _]) = tensor.image_height_width_channels() else { + return Ok(()); + }; const POINTS_PER_TEXEL: f32 = 5.0; - let size = Vec2::splat((ZOOMED_IMAGE_TEXEL_RADIUS * 2 + 1) as f32 * POINTS_PER_TEXEL); + let size = Vec2::splat(((ZOOMED_IMAGE_TEXEL_RADIUS * 2 + 1) as f32) * POINTS_PER_TEXEL); let (_id, zoom_rect) = ui.allocate_space(size); let painter = ui.painter(); @@ -461,10 +447,10 @@ fn try_show_zoomed_image_region( { let image_rect_on_screen = egui::Rect::from_min_size( - zoom_rect.center() - - POINTS_PER_TEXEL - * egui::vec2(center_texel[0] as f32 + 0.5, center_texel[1] as f32 + 0.5), - POINTS_PER_TEXEL * egui::vec2(width as f32, height as f32), + zoom_rect.center() - + POINTS_PER_TEXEL * + egui::vec2((center_texel[0] as f32) + 0.5, (center_texel[1] as f32) + 0.5), + POINTS_PER_TEXEL * egui::vec2(width as f32, height as f32) ); crate::gpu_bridge::render_image( @@ -473,14 +459,16 @@ fn try_show_zoomed_image_region( image_rect_on_screen, texture.clone(), egui::TextureOptions::NEAREST, - debug_name, + debug_name )?; } // Show the center text, to indicate which texel we're printing the values of: { - let center_texel_rect = - egui::Rect::from_center_size(zoom_rect.center(), Vec2::splat(POINTS_PER_TEXEL)); + let center_texel_rect = egui::Rect::from_center_size( + zoom_rect.center(), + Vec2::splat(POINTS_PER_TEXEL) + ); painter.rect_stroke(center_texel_rect.expand(1.0), 0.0, (1.0, Color32::BLACK)); painter.rect_stroke(center_texel_rect, 0.0, (1.0, Color32::WHITE)); } @@ -493,14 +481,17 @@ fn try_show_zoomed_image_region( tensor_pixel_value_ui(ui, tensor, annotations, [x as _, y as _], meter); // Show a big sample of the color of the middle texel: - let (rect, _) = - ui.allocate_exact_size(Vec2::splat(ui.available_height()), egui::Sense::hover()); + let (rect, _) = ui.allocate_exact_size( + Vec2::splat(ui.available_height()), + egui::Sense::hover() + ); // Position texture so that the center texel is at the center of the rect: let zoom = rect.width(); let image_rect_on_screen = egui::Rect::from_min_size( - rect.center() - - zoom * egui::vec2(center_texel[0] as f32 + 0.5, center_texel[1] as f32 + 0.5), - zoom * egui::vec2(width as f32, height as f32), + rect.center() - + zoom * + egui::vec2((center_texel[0] as f32) + 0.5, (center_texel[1] as f32) + 0.5), + zoom * egui::vec2(width as f32, height as f32) ); crate::gpu_bridge::render_image( render_ctx, @@ -508,10 +499,9 @@ fn try_show_zoomed_image_region( image_rect_on_screen, texture, egui::TextureOptions::NEAREST, - debug_name, + debug_name ) - }) - .inner?; + }).inner?; } Ok(()) } @@ -521,7 +511,7 @@ fn tensor_pixel_value_ui( tensor: &Tensor, annotations: &crate::ui::Annotations, [x, y]: [u64; 2], - meter: Option, + meter: Option ) { egui::Grid::new("hovered pixel properties").show(ui, |ui| { ui.label("Position:"); @@ -530,8 +520,11 @@ fn tensor_pixel_value_ui( if tensor.num_dim() == 2 { if let Some(raw_value) = tensor.get(&[y, x]) { - if let (TensorDataMeaning::ClassId, Some(u16_val)) = - (tensor.meaning(), raw_value.try_as_u16()) + if + let (TensorDataMeaning::ClassId, Some(u16_val)) = ( + tensor.meaning(), + raw_value.try_as_u16(), + ) { ui.label("Label:"); ui.label( @@ -539,7 +532,7 @@ fn tensor_pixel_value_ui( .class_description(Some(ClassId(u16_val))) .annotation_info() .label(None) - .unwrap_or_else(|| u16_val.to_string()), + .unwrap_or_else(|| u16_val.to_string()) ); ui.end_row(); }; @@ -549,7 +542,7 @@ fn tensor_pixel_value_ui( // This is a depth map if let Some(raw_value) = tensor.get(&[y, x]) { let raw_value = raw_value.as_f64(); - let meters = raw_value / meter as f64; + let meters = raw_value / (meter as f64); ui.label("Depth:"); if meters < 1.0 { ui.monospace(format!("{:.1} mm", meters * 1e3)); @@ -561,52 +554,72 @@ fn tensor_pixel_value_ui( }); let text = match tensor.num_dim() { - 2 => tensor.get(&[y, x]).map(|v| format!("Val: {v}")), - 3 => match tensor.shape()[2].size { - 0 => Some("Cannot preview 0-size channel".to_owned()), - 1 => tensor.get(&[y, x, 0]).map(|v| format!("Val: {v}")), - 3 => { - // TODO(jleibs): Track RGB ordering somehow -- don't just assume it - if let (Some(r), Some(g), Some(b)) = ( - tensor.get(&[y, x, 0]), - tensor.get(&[y, x, 1]), - tensor.get(&[y, x, 2]), - ) { - match (r, g, b) { - (TensorElement::U8(r), TensorElement::U8(g), TensorElement::U8(b)) => { - Some(format!("R: {r}, G: {g}, B: {b}, #{r:02X}{g:02X}{b:02X}")) + 2 => + match &tensor.data { + TensorData::NV12(_) => + tensor.get_nv12_pixel(&[y, x]).map(|[r, g, b]| { + match (r, g, b) { + (TensorElement::U8(r), TensorElement::U8(g), TensorElement::U8(b)) => { + format!("R: {r}, G: {g}, B: {b}, #{r:02X}{g:02X}{b:02X}") + } + _ => unreachable!("NV12 should only contain u8"), + } + }), + _ => tensor.get(&[y, x]).map(|v| format!("Val: {v}")), + } + 3 => + match tensor.real_shape().as_slice()[2].size { + 0 => Some("Cannot preview 0-size channel".to_owned()), + 1 => tensor.get(&[y, x, 0]).map(|v| format!("Val: {v}")), + 3 => { + // TODO(jleibs): Track RGB ordering somehow -- don't just assume it + if + let (Some(r), Some(g), Some(b)) = ( + tensor.get(&[y, x, 0]), + tensor.get(&[y, x, 1]), + tensor.get(&[y, x, 2]), + ) + { + match (r, g, b) { + (TensorElement::U8(r), TensorElement::U8(g), TensorElement::U8(b)) => { + Some(format!("R: {r}, G: {g}, B: {b}, #{r:02X}{g:02X}{b:02X}")) + } + _ => Some(format!("R: {r}, G: {g}, B: {b}")), } - _ => Some(format!("R: {r}, G: {g}, B: {b}")), + } else { + None } - } else { - None } - } - 4 => { - // TODO(jleibs): Track RGB ordering somehow -- don't just assume it - if let (Some(r), Some(g), Some(b), Some(a)) = ( - tensor.get(&[y, x, 0]), - tensor.get(&[y, x, 1]), - tensor.get(&[y, x, 2]), - tensor.get(&[y, x, 3]), - ) { - match (r, g, b, a) { - ( - TensorElement::U8(r), - TensorElement::U8(g), - TensorElement::U8(b), - TensorElement::U8(a), - ) => Some(format!( - "R: {r}, G: {g}, B: {b}, A: {a}, #{r:02X}{g:02X}{b:02X}{a:02X}" - )), - _ => Some(format!("R: {r}, G: {g}, B: {b}, A: {a}")), + 4 => { + // TODO(jleibs): Track RGB ordering somehow -- don't just assume it + if + let (Some(r), Some(g), Some(b), Some(a)) = ( + tensor.get(&[y, x, 0]), + tensor.get(&[y, x, 1]), + tensor.get(&[y, x, 2]), + tensor.get(&[y, x, 3]), + ) + { + match (r, g, b, a) { + ( + TensorElement::U8(r), + TensorElement::U8(g), + TensorElement::U8(b), + TensorElement::U8(a), + ) => + Some( + format!( + "R: {r}, G: {g}, B: {b}, A: {a}, #{r:02X}{g:02X}{b:02X}{a:02X}" + ) + ), + _ => Some(format!("R: {r}, G: {g}, B: {b}, A: {a}")), + } + } else { + None } - } else { - None } + channels => Some(format!("Cannot preview {channels}-channel image")), } - channels => Some(format!("Cannot preview {channels}-channel image")), - }, dims => Some(format!("Cannot preview {dims}-dimensional image")), }; @@ -631,7 +644,7 @@ fn rgb8_histogram_ui(ui: &mut egui::Ui, rgb: &[u8]) -> egui::Response { } } - use egui::plot::{Bar, BarChart, Legend, Plot}; + use egui::plot::{ Bar, BarChart, Legend, Plot }; let names = ["R", "G", "B"]; let colors = [Color32::RED, Color32::GREEN, Color32::BLUE]; @@ -653,10 +666,10 @@ fn rgb8_histogram_ui(ui: &mut egui::Ui, rgb: &[u8]) -> egui::Response { .vertical() .stroke(egui::Stroke::NONE) }) - .collect(), + .collect() ) - .color(colors[component]) - .name(names[component]) + .color(colors[component]) + .name(names[component]) }) .collect_vec(); @@ -669,8 +682,7 @@ fn rgb8_histogram_ui(ui: &mut egui::Ui, rgb: &[u8]) -> egui::Response { for chart in charts { plot_ui.bar_chart(chart); } - }) - .response + }).response } #[cfg(not(target_arch = "wasm32"))] @@ -683,7 +695,7 @@ fn copy_and_save_image_ui(ui: &mut egui::Ui, tensor: &Tensor, _encoded_tensor: & crate::misc::Clipboard::with(|clipboard| { clipboard.set_image( [rgba.width() as _, rgba.height() as _], - bytemuck::cast_slice(rgba.as_raw()), + bytemuck::cast_slice(rgba.as_raw()) ); }); } @@ -712,10 +724,7 @@ fn save_image(tensor: &re_log_types::component_types::Tensor, dynamic_image: &im match &tensor.data { TensorData::JPEG(bytes) => { - if let Some(path) = rfd::FileDialog::new() - .set_file_name("image.jpg") - .save_file() - { + if let Some(path) = rfd::FileDialog::new().set_file_name("image.jpg").save_file() { match write_binary(&path, bytes.as_slice()) { Ok(()) => { re_log::info!("Image saved to {path:?}"); @@ -730,10 +739,7 @@ fn save_image(tensor: &re_log_types::component_types::Tensor, dynamic_image: &im } } _ => { - if let Some(path) = rfd::FileDialog::new() - .set_file_name("image.png") - .save_file() - { + if let Some(path) = rfd::FileDialog::new().set_file_name("image.png").save_file() { match dynamic_image.save(&path) { // TODO(emilk): show a popup instead of logging result Ok(()) => { diff --git a/crates/re_viewer/src/ui/device_settings_panel.rs b/crates/re_viewer/src/ui/device_settings_panel.rs index a90be8c24894..86fba5181006 100644 --- a/crates/re_viewer/src/ui/device_settings_panel.rs +++ b/crates/re_viewer/src/ui/device_settings_panel.rs @@ -1,4 +1,4 @@ -use crate::{depthai::depthai, misc::ViewerContext}; +use crate::{ depthai::depthai::{ self, CameraBoardSocket }, misc::ViewerContext }; use strum::IntoEnumIterator; // Needed for enum::iter() @@ -20,18 +20,18 @@ impl DeviceSettingsPanel { } let mut show_device_config = true; - egui::CentralPanel::default() + egui::CentralPanel + ::default() .frame(egui::Frame { inner_margin: egui::Margin::same(0.0), fill: egui::Color32::WHITE, ..Default::default() }) .show_inside(ui, |ui| { - egui::Frame { + (egui::Frame { inner_margin: egui::Margin::same(re_ui::ReUi::view_padding()), ..Default::default() - } - .show(ui, |ui| { + }).show(ui, |ui| { ui.horizontal(|ui| { // Use up all the horizontal space (color) ui.add_sized( @@ -48,33 +48,39 @@ impl DeviceSettingsPanel { }, true, |ui: &mut egui::Ui| { - if ui - .selectable_value( - &mut combo_device, - String::new(), - "No device", - ) - .changed() - { - ctx.depthai_state.set_device(combo_device.clone()); - } - for device in available_devices { - if ui + if + ui .selectable_value( &mut combo_device, - device.clone(), - device, + String::new(), + "No device" ) .changed() + { + ctx.depthai_state.select_device( + combo_device.clone() + ); + } + for device in available_devices { + if + ui + .selectable_value( + &mut combo_device, + device.clone(), + device + ) + .changed() { - ctx.depthai_state - .set_device(combo_device.clone()); + ctx.depthai_state.select_device( + combo_device.clone() + ); } } - }, + } ); - if !currently_selected_device.id.is_empty() - && !ctx.depthai_state.is_update_in_progress() + if + !currently_selected_device.id.is_empty() && + !ctx.depthai_state.is_update_in_progress() { ui.add_sized( [ @@ -87,10 +93,8 @@ impl DeviceSettingsPanel { // TODO(filip): Create a re_ui bound button with this style let color = ctx.re_ui.design_tokens.error_bg_color; - let hover_color = ctx - .re_ui - .design_tokens - .error_hover_bg_color; + let hover_color = + ctx.re_ui.design_tokens.error_hover_bg_color; style.visuals.widgets.hovered.bg_fill = hover_color; style.visuals.widgets.hovered.weak_bg_fill = @@ -98,27 +102,23 @@ impl DeviceSettingsPanel { style.visuals.widgets.inactive.bg_fill = color; style.visuals.widgets.inactive.weak_bg_fill = color; - style - .visuals - .widgets - .inactive - .fg_stroke - .color = egui::Color32::WHITE; + style.visuals.widgets.inactive.fg_stroke.color = + egui::Color32::WHITE; style.visuals.widgets.hovered.fg_stroke.color = egui::Color32::WHITE; ui.set_style(style); if ui.button("Disconnect").clicked() { - ctx.depthai_state.set_device(String::new()); + ctx.depthai_state.select_device( + String::new() + ); } - }) - .response - }, + }).response + } ); } - }) - .response - }, + }).response + } ); }); @@ -127,14 +127,15 @@ impl DeviceSettingsPanel { if pipeline_update_in_progress { ui.add_sized([CONFIG_UI_WIDTH, 10.0], |ui: &mut egui::Ui| { ui.with_layout(egui::Layout::left_to_right(egui::Align::Center), |ui| { - ui.label(if device_selected { - "Updating Pipeline" - } else { - "Selecting Device" - }); + ui.label( + if device_selected { + "Updating Pipeline" + } else { + "Selecting Device" + } + ); ui.add(egui::Spinner::new()) - }) - .response + }).response }); show_device_config = false; } @@ -150,383 +151,304 @@ impl DeviceSettingsPanel { }); } + fn camera_config_ui( + ctx: &mut ViewerContext<'_>, + ui: &mut egui::Ui, + camera_features: &depthai::CameraFeatures, + camera_config: &mut depthai::CameraConfig + ) { + let primary_700 = ctx.re_ui.design_tokens.primary_700; + egui::CollapsingHeader + ::new( + egui::RichText + ::new(camera_features.board_socket.display_name(ctx)) + .color(primary_700) + ) + .default_open(true) + .show(ui, |ui| { + ui.vertical(|ui| { + ui.set_width(CONFIG_UI_WIDTH); + ctx.re_ui.labeled_combo_box( + ui, + "Resolution", + format!("{}", camera_config.resolution), + false, + |ui| { + for res in camera_features.resolutions.clone() { + let disabled = false; + ui.add_enabled_ui(!disabled, |ui| { + ui.selectable_value( + &mut camera_config.resolution, + res, + format!("{res}") + ).on_disabled_hover_ui(|ui| { + ui.label( + format!("{res} will be available in a future release!") + ); + }); + }); + } + } + ); + ctx.re_ui.labeled_dragvalue( + ui, + "FPS", + &mut camera_config.fps, + 0..=camera_features.max_fps + ); + ctx.re_ui.labeled_checkbox(ui, "Stream", &mut camera_config.stream_enabled); + }); + }); + } + fn device_configuration_ui(ctx: &mut ViewerContext<'_>, ui: &mut egui::Ui) { let mut device_config = ctx.depthai_state.modified_device_config.clone(); let primary_700 = ctx.re_ui.design_tokens.primary_700; + let connected_cameras = ctx.depthai_state.get_connected_cameras().clone(); - ctx.re_ui.styled_scrollbar( - ui, re_ui::ScrollAreaDirection::Vertical, - [false; 2], - |ui| { - egui::Frame { + ctx.re_ui.styled_scrollbar(ui, re_ui::ScrollAreaDirection::Vertical, [false; 2], |ui| { + (egui::Frame { fill: ctx.re_ui.design_tokens.gray_50, inner_margin: egui::Margin::symmetric(30.0, 21.0), ..Default::default() - } - .show(ui, |ui| { - ui.horizontal(|ui| { - ui.vertical(|ui| { - ui.collapsing( - egui::RichText::new("Color Camera").color(primary_700), - |ui| { - ui.vertical(|ui| { - ui.set_width(CONFIG_UI_WIDTH); - ctx.re_ui.labeled_combo_box( - ui, - "Resolution", - format!("{}", device_config.color_camera.resolution), - false, - |ui| { - for res in &ctx - .depthai_state - .selected_device - .supported_color_resolutions - { - let disabled = res == &depthai::ColorCameraResolution::THE_4_K || res == &depthai::ColorCameraResolution::THE_12_MP; - ui.add_enabled_ui(!disabled, |ui| { - ui.selectable_value( - &mut device_config.color_camera.resolution, - *res, - format!("{res}"), - ).on_disabled_hover_ui(|ui| { - ui.label(format!("{res} will be available in a future release!")); - }); - }); - } - }, - ); - ctx.re_ui.labeled_dragvalue( - ui, - "FPS", - &mut device_config.color_camera.fps, - 0..=120, - ); - ctx.re_ui.labeled_checkbox( - ui, - "Stream", - &mut device_config.color_camera.stream_enabled, - ); - }); - }, - ); - let mut left_mono_config = device_config.left_camera.unwrap_or_else(|| depthai::MonoCameraConfig { - board_socket: depthai::CameraBoardSocket::LEFT, - ..Default::default() - }); - let mut right_mono_config = device_config.right_camera.unwrap_or_else(|| depthai::MonoCameraConfig { - board_socket: depthai::CameraBoardSocket::RIGHT, - ..Default::default() + }).show(ui, |ui| { + ui.horizontal(|ui| { + ui.vertical(|ui| { + for cam in connected_cameras.clone() { + let Some(config) = device_config.cameras + .iter_mut() + .find(|conf| conf.board_socket == cam.board_socket) else { + continue; + }; + Self::camera_config_ui(ctx, ui, &cam, config); + } + + ui.collapsing(egui::RichText::new("AI settings").color(primary_700), |ui| { + ui.vertical(|ui| { + ui.set_width(CONFIG_UI_WIDTH); + ctx.re_ui.labeled_combo_box( + ui, + "AI Model", + device_config.ai_model.display_name.clone(), + false, + |ui| { + for nn in &ctx.depthai_state.neural_networks { + ui.selectable_value( + &mut device_config.ai_model, + nn.clone(), + &nn.display_name + ); + } + } + ); + ctx.re_ui.labeled_combo_box( + ui, + "Run on", + device_config.ai_model.camera.display_name(ctx), + false, + |ui| { + for cam in &connected_cameras { + ui.selectable_value( + &mut device_config.ai_model.camera, + cam.board_socket, + cam.board_socket.display_name(ctx) + ); + } + } + ); }); - let has_left_mono = !ctx.depthai_state.selected_device.supported_left_mono_resolutions.is_empty(); - ui.add_enabled_ui(has_left_mono, |ui| { - egui::CollapsingHeader::new(egui::RichText::new("Left Mono Camera").color(primary_700)).default_open(true).open(if !has_left_mono { - Some(false) - } else {None}).show( - ui, |ui| { - ui.vertical(|ui| { - ui.set_width(CONFIG_UI_WIDTH); - ctx.re_ui.labeled_combo_box( - ui, - "Resolution", - format!("{}", left_mono_config.resolution), - false, - |ui| { - let highest_res = ctx.depthai_state.selected_device.supported_left_mono_resolutions.iter().max().unwrap(); - for res in depthai::MonoCameraResolution::iter() { - if &res > highest_res { - continue; - } - if ui - .selectable_value( - &mut left_mono_config - .resolution, - res, - format!("{res}"), - ) - .changed() - { - right_mono_config.resolution = - res; + }); + + let mut depth = device_config.depth.unwrap_or_default(); + ui.add_enabled_ui( + ctx.depthai_state.selected_device.has_stereo_pairs(), + |ui| { + egui::CollapsingHeader + ::new(egui::RichText::new("Depth Settings").color(primary_700)) + .open( + if !ctx.depthai_state.selected_device.has_stereo_pairs() { + Some(false) + } else { + None + } + ) + .show(ui, |ui| { + ui.vertical(|ui| { + ui.set_width(CONFIG_UI_WIDTH); + let (cam1, cam2) = depth.stereo_pair; + ctx.re_ui.labeled_combo_box( + ui, + "Camera Pair", + format!( + "{}, {}", + cam1.display_name(ctx), + cam2.display_name(ctx) + ), + false, + |ui| { + for pair in &ctx.depthai_state.selected_device.stereo_pairs { + ui.selectable_value( + &mut depth.stereo_pair, + *pair, + format!( + "{} {}", + pair.0.display_name(ctx), + pair.1.display_name(ctx) + ) + ); } } - }, - ); - if ctx - .re_ui - .labeled_dragvalue( + ); + ctx.re_ui.labeled_checkbox( ui, - "FPS", - &mut left_mono_config.fps, - 0..=120, - ) - .changed() - { - right_mono_config.fps = - left_mono_config.fps; - } - ctx.re_ui.labeled_checkbox( - ui, - "Stream", - &mut left_mono_config.stream_enabled, - ); - }) - }, - ).header_response.on_disabled_hover_ui(|ui| { - ui.label("Selected device doesn't have a left mono camera."); - }); - }); - let has_right_mono = !ctx.depthai_state.selected_device.supported_right_mono_resolutions.is_empty(); - ui.add_enabled_ui(has_right_mono, |ui| { - egui::CollapsingHeader::new(egui::RichText::new("Right Mono Camera").color(primary_700)).default_open(true).open(if !has_right_mono { - Some(false) - } else {None}).show( - ui, |ui| { - ui.vertical(|ui| { - ui.set_width(CONFIG_UI_WIDTH); - ctx.re_ui.labeled_combo_box( - ui, - "Resolution", - format!("{}", right_mono_config.resolution), - false, - |ui| { - let highest_res = ctx.depthai_state.selected_device.supported_right_mono_resolutions.iter().max().unwrap(); - for res in depthai::MonoCameraResolution::iter() { - if &res > highest_res { - continue; + "LR Check", + &mut depth.lr_check + ); + ctx.re_ui.labeled_combo_box( + ui, + "Align to", + depth.align.display_name(ctx), + false, + |ui| { + for align in &connected_cameras { + ui.selectable_value( + &mut depth.align, + align.board_socket, + align.board_socket.display_name(ctx) + ); } - if ui - .selectable_value( - &mut right_mono_config - .resolution, - res, - format!("{res}"), - ) - .changed() - { - left_mono_config.resolution = - res; + } + ); + ctx.re_ui.labeled_combo_box( + ui, + "Median Filter", + format!("{:?}", depth.median), + false, + |ui| { + for filter in depthai::DepthMedianFilter::iter() { + ui.selectable_value( + &mut depth.median, + filter, + format!("{filter:?}") + ); } } - }, - ); - if ctx - .re_ui - .labeled_dragvalue( + ); + ctx.re_ui.labeled_dragvalue( ui, - "FPS", - &mut right_mono_config.fps, - 0..=120, - ) - .changed() - { - left_mono_config.fps = - right_mono_config.fps; - } - ctx.re_ui.labeled_checkbox( - ui, - "Stream", - &mut right_mono_config.stream_enabled, - ); + "LR Threshold", + &mut depth.lrc_threshold, + 0..=10 + ); + ctx.re_ui.labeled_checkbox( + ui, + "Extended Disparity", + &mut depth.extended_disparity + ); + ctx.re_ui.labeled_checkbox( + ui, + "Subpixel Disparity", + &mut depth.subpixel_disparity + ); + ctx.re_ui.labeled_dragvalue( + ui, + "Sigma", + &mut depth.sigma, + 0..=65535 + ); + ctx.re_ui.labeled_dragvalue( + ui, + "Confidence", + &mut depth.confidence, + 0..=255 + ); + ctx.re_ui.labeled_toggle_switch( + ui, + "Depth enabled", + &mut device_config.depth_enabled + ); + }); }) - }).header_response.on_disabled_hover_ui(|ui| { - ui.label("Selected device doesn't have a right mono camera."); - }); - }); - - // This is a hack, I wanted AI settings at the bottom, but some depth settings names - // are too long and it messes up the width of the ui layout somehow. - ui.collapsing( - egui::RichText::new("AI settings").color(primary_700), - |ui| { - ui.vertical(|ui| { - ui.set_width(CONFIG_UI_WIDTH); - ctx.re_ui.labeled_combo_box( - ui, - "AI Model", - device_config.ai_model.display_name.clone(), - false, - |ui| { - for nn in &ctx.depthai_state.neural_networks { - ui.selectable_value( - &mut device_config.ai_model, - nn.clone(), - &nn.display_name, - ); - } - }, - ); + .header_response.on_disabled_hover_ui(|ui| { + ui.label("Selected device doesn't have any stereo pairs!"); }); - }, - ); - - let mut depth = device_config.depth.unwrap_or_default(); - if depth.align == depthai::CameraBoardSocket::CENTER && !depth.lr_check - { - depth.align = depthai::CameraBoardSocket::AUTO; } + ); - - ui.add_enabled_ui(has_right_mono && has_left_mono, |ui| { - egui::CollapsingHeader::new(egui::RichText::new("Depth Settings").color(primary_700)).open(if !(has_right_mono && has_left_mono) { - Some(false) - } else {None}).show( - ui, |ui| { - ui.vertical(|ui| { - ui.set_width(CONFIG_UI_WIDTH); - ctx.re_ui.labeled_checkbox( - ui, - "LR Check", - &mut depth.lr_check, - ); - ctx.re_ui.labeled_combo_box( - ui, - "Align to", - format!("{:?}", depth.align), - false, - |ui| { - for align in - depthai::CameraBoardSocket::depth_align_options( - ) - { - if align == depthai::CameraBoardSocket::CENTER - && !depth.lr_check - { - continue; - } - ui.selectable_value( - &mut depth.align, - align, - format!("{align:?}"), - ); - } - }, - ); - ctx.re_ui.labeled_combo_box( - ui, - "Median Filter", - format!("{:?}", depth.median), - false, - |ui| { - for filter in depthai::DepthMedianFilter::iter() { - ui.selectable_value( - &mut depth.median, - filter, - format!("{filter:?}"), - ); - } - }, - ); - ctx.re_ui.labeled_dragvalue( - ui, - "LR Threshold", - &mut depth.lrc_threshold, - 0..=10, - ); - ctx.re_ui.labeled_checkbox( - ui, - "Extended Disparity", - &mut depth.extended_disparity, - ); - ctx.re_ui.labeled_checkbox( - ui, - "Subpixel Disparity", - &mut depth.subpixel_disparity, - ); - ctx.re_ui.labeled_dragvalue( - ui, - "Sigma", - &mut depth.sigma, - 0..=65535, - ); - ctx.re_ui.labeled_dragvalue( - ui, - "Confidence", - &mut depth.confidence, - 0..=255, - ); - ctx.re_ui.labeled_toggle_switch( - ui, - "Depth enabled", - &mut device_config.depth_enabled, - ); - }); - }, - ).header_response.on_disabled_hover_ui(|ui| { - ui.label("Selected device doesn't support depth!"); - }); - }); - - device_config.left_camera = Some(left_mono_config); - device_config.right_camera = Some(right_mono_config); - device_config.depth = Some(depth); - ctx.depthai_state.modified_device_config = device_config.clone(); - ui.vertical(|ui| { - ui.horizontal(|ui| { - let apply_enabled = { - if let Some(applied_config) = &ctx.depthai_state.applied_device_config.config { - let only_runtime_configs_changed = + device_config.depth = Some(depth); + ctx.depthai_state.modified_device_config = device_config.clone(); + ui.vertical(|ui| { + ui.horizontal(|ui| { + let apply_enabled = { + if + let Some(applied_config) = + &ctx.depthai_state.applied_device_config.config + { + let only_runtime_configs_changed = depthai::State::only_runtime_configs_changed( applied_config, - &device_config, + &device_config ); - let apply_enabled = !only_runtime_configs_changed - && ctx.depthai_state.applied_device_config.config.is_some() - && device_config - != applied_config.clone() - && !ctx.depthai_state.selected_device.id.is_empty() && !ctx.depthai_state.is_update_in_progress(); + let apply_enabled = + !only_runtime_configs_changed && + ctx.depthai_state.applied_device_config.config.is_some() && + device_config != applied_config.clone() && + !ctx.depthai_state.selected_device.id.is_empty() && + !ctx.depthai_state.is_update_in_progress(); - if !apply_enabled && only_runtime_configs_changed { - ctx.depthai_state - .set_device_config(&mut device_config, true); - } - apply_enabled - } else { - !ctx.depthai_state.applied_device_config.update_in_progress + if !apply_enabled && only_runtime_configs_changed { + ctx.depthai_state.set_pipeline( + &mut device_config, + true + ); } + apply_enabled + } else { + !ctx.depthai_state.applied_device_config.update_in_progress + } + }; - }; - - ui.add_enabled_ui(apply_enabled, |ui| { - ui.scope(|ui| { - let mut style = ui.style_mut().clone(); - if apply_enabled { - let color = - ctx.re_ui.design_tokens.primary_bg_color; - let hover_color = - ctx.re_ui.design_tokens.primary_hover_bg_color; - style.visuals.widgets.hovered.bg_fill = hover_color; - style.visuals.widgets.hovered.weak_bg_fill = - hover_color; - style.visuals.widgets.inactive.bg_fill = color; - style.visuals.widgets.inactive.weak_bg_fill = color; - style.visuals.widgets.inactive.fg_stroke.color = - egui::Color32::WHITE; - style.visuals.widgets.hovered.fg_stroke.color = - egui::Color32::WHITE; - } - style.spacing.button_padding = - egui::Vec2::new(24.0, 4.0); - ui.set_style(style); - if ui + ui.add_enabled_ui(apply_enabled, |ui| { + ui.scope(|ui| { + let mut style = ui.style_mut().clone(); + if apply_enabled { + let color = ctx.re_ui.design_tokens.primary_bg_color; + let hover_color = + ctx.re_ui.design_tokens.primary_hover_bg_color; + style.visuals.widgets.hovered.bg_fill = hover_color; + style.visuals.widgets.hovered.weak_bg_fill = + hover_color; + style.visuals.widgets.inactive.bg_fill = color; + style.visuals.widgets.inactive.weak_bg_fill = color; + style.visuals.widgets.inactive.fg_stroke.color = + egui::Color32::WHITE; + style.visuals.widgets.hovered.fg_stroke.color = + egui::Color32::WHITE; + } + style.spacing.button_padding = egui::Vec2::new(24.0, 4.0); + ui.set_style(style); + if + ui .add_sized( [CONFIG_UI_WIDTH, re_ui::ReUi::box_height()], - egui::Button::new("Apply"), + egui::Button::new("Apply") ) .clicked() - { - ctx.depthai_state - .set_device_config(&mut device_config, false); - } - }); + { + ctx.depthai_state.set_pipeline( + &mut device_config, + false + ); + } }); }); }); }); - ui.allocate_space(ui.available_size()); }); + ui.allocate_space(ui.available_size()); }); - } - ); + }); + }); // Set a more visible scroll bar color } } diff --git a/crates/re_viewer/src/ui/mod.rs b/crates/re_viewer/src/ui/mod.rs index 9bb817412638..a372cea1787d 100644 --- a/crates/re_viewer/src/ui/mod.rs +++ b/crates/re_viewer/src/ui/mod.rs @@ -31,11 +31,11 @@ pub mod view_spatial; use self::scene::SceneQuery; pub(crate) use self::blueprint::Blueprint; -pub(crate) use self::space_view::{SpaceView, SpaceViewId, SpaceViewKind}; +pub(crate) use self::space_view::{ SpaceView, SpaceViewId, SpaceViewKind }; -pub use self::annotations::{Annotations, DefaultColor, MISSING_ANNOTATIONS}; +pub use self::annotations::{ Annotations, DefaultColor, MISSING_ANNOTATIONS }; pub use self::data_blueprint::DataBlueprintGroupHandle; -pub use self::selection_history::{HistoricalSelection, SelectionHistory}; +pub use self::selection_history::{ HistoricalSelection, SelectionHistory }; pub use self::view_category::ViewCategory; pub use self::viewport::Viewport; diff --git a/crates/re_viewer/src/ui/selection_panel.rs b/crates/re_viewer/src/ui/selection_panel.rs index da92463e2719..9b774a7ce084 100644 --- a/crates/re_viewer/src/ui/selection_panel.rs +++ b/crates/re_viewer/src/ui/selection_panel.rs @@ -218,32 +218,6 @@ fn blueprint_ui( } Item::SpaceView(space_view_id) => { - ui.horizontal(|ui| { - if ui - .button("Add/remove entities") - .on_hover_text("Manually add or remove entities from the Space View.") - .clicked() - { - viewport - .show_add_remove_entities_window(*space_view_id); - } - - if ui - .button("Clone view") - .on_hover_text("Create an exact duplicate of this Space View including all blueprint settings") - .clicked() - { - if let Some(space_view) = viewport.space_view(space_view_id) { - let mut new_space_view = space_view.clone(); - new_space_view.id = super::SpaceViewId::random(); - viewport.add_space_view(new_space_view); - viewport.mark_user_interaction(); - } - } - }); - - ui.add_space(ui.spacing().item_spacing.y); - if let Some(space_view) = viewport.space_view_mut(space_view_id) { space_view.selection_ui(ctx, ui); } diff --git a/crates/re_viewer/src/ui/space_view.rs b/crates/re_viewer/src/ui/space_view.rs index 365c151cfd6b..df5c7111f3c8 100644 --- a/crates/re_viewer/src/ui/space_view.rs +++ b/crates/re_viewer/src/ui/space_view.rs @@ -1,5 +1,6 @@ use re_arrow_store::Timeline; use re_data_store::{EntityPath, EntityPropertyMap, EntityTree, InstancePath, TimeInt}; +use re_log_types::EntityPathPart; use re_renderer::{GpuReadbackIdentifier, ScreenshotProcessor}; use crate::{ @@ -81,6 +82,7 @@ pub struct SpaceView { impl SpaceView { pub fn new( + ctx: &ViewerContext<'_>, category: ViewCategory, space_path: &EntityPath, queries_entities: &[EntityPath], @@ -89,31 +91,41 @@ impl SpaceView { // this led to somewhat confusing and inconsistent behavior. See https://github.com/rerun-io/rerun/issues/1220 // Spaces are now always named after the final element of the space-path (or the root), independent of the // query entities. - let mut is_depthai_spaceview = true; - let display_name = match space_path { - ep if ep.hash() == depthai::entity_paths::RGB_PINHOLE_CAMERA.hash() => { - "Color camera (2D)".into() + let mut is_depthai_spaceview = false; + let display_name = if let Some(board_socket_part) = space_path.as_slice().first() { + let is_3d = space_path.len() == 1; + let mut is_2d = false; + if !is_3d { + let last_part = space_path.iter().last().unwrap(); + is_2d = (last_part == &EntityPathPart::from("mono_cam") + || last_part == &EntityPathPart::from("color_cam")) + && last_part != &EntityPathPart::from("transform"); } - ep if ep.hash() == depthai::entity_paths::COLOR_CAM_3D.hash() => { - "Color camera (3D)".into() - } - ep if ep.hash() == depthai::entity_paths::RIGHT_PINHOLE_CAMERA.hash() => { - "Right mono camera (2D)".into() - } - ep if ep.hash() == depthai::entity_paths::LEFT_PINHOLE_CAMERA.hash() => { - "Left mono camera (2D)".into() - } - ep if ep.hash() == depthai::entity_paths::MONO_CAM_3D.hash() => { - "Mono cameras (3D)".into() - } - _ => { - is_depthai_spaceview = false; - if let Some(entity_path_part) = space_path.iter().last() { - entity_path_part.to_string() + if let Some(board_socket) = + depthai::CameraBoardSocket::from(board_socket_part.to_string()) + { + let camera_features = ctx.depthai_state.get_connected_cameras(); + if let Some(camera) = camera_features + .iter() + .find(|camera| camera.board_socket == board_socket) + { + if is_3d { + is_depthai_spaceview = true; + format!("{} ({})", camera.board_socket.display_name(ctx), "3D") + } else if is_2d { + is_depthai_spaceview = true; + format!("{} ({})", camera.board_socket.display_name(ctx), "2D") + } else { + space_path.iter().last().unwrap().to_string() + } } else { - format!("/ ({category})") + space_path.iter().last().unwrap().to_string() } + } else { + space_path.iter().last().unwrap().to_string() } + } else { + format!("/ ({category})") }; let mut data_blueprint_tree = DataBlueprintTree::default(); @@ -309,10 +321,12 @@ impl SpaceView { pub fn remove_entity_subtree(&mut self, tree: &EntityTree) { crate::profile_function!(); - tree.visit_children_recursively(&mut |path: &EntityPath| { - self.data_blueprint.remove_entity(path); - self.entities_determined_by_user = true; - }); + tree.visit_children_recursively( + &mut (|path: &EntityPath| { + self.data_blueprint.remove_entity(path); + self.entities_determined_by_user = true; + }), + ); } /// Adds a subtree of entities to the blueprint tree and creates groups as needed. @@ -327,19 +341,21 @@ impl SpaceView { crate::profile_function!(); let mut entities = Vec::new(); - tree.visit_children_recursively(&mut |entity_path: &EntityPath| { - let entity_categories = - categorize_entity_path(Timeline::log_time(), log_db, entity_path); - - if entity_categories.contains(self.category) - && !self.data_blueprint.contains_entity(entity_path) - && spaces_info - .is_reachable_by_transform(entity_path, &self.space_path) - .is_ok() - { - entities.push(entity_path.clone()); - } - }); + tree.visit_children_recursively( + &mut (|entity_path: &EntityPath| { + let entity_categories = + categorize_entity_path(Timeline::log_time(), log_db, entity_path); + + if entity_categories.contains(self.category) + && !self.data_blueprint.contains_entity(entity_path) + && spaces_info + .is_reachable_by_transform(entity_path, &self.space_path) + .is_ok() + { + entities.push(entity_path.clone()); + } + }), + ); if !entities.is_empty() { self.data_blueprint @@ -440,10 +456,10 @@ impl ViewState { ui: &mut egui::Ui, scene: &view_text::SceneText, ) { - egui::Frame { + (egui::Frame { inner_margin: re_ui::ReUi::view_padding().into(), ..egui::Frame::default() - } + }) .show(ui, |ui| { view_text::view_text(ctx, ui, &mut self.state_text, scene); }); @@ -455,10 +471,10 @@ impl ViewState { ui: &mut egui::Ui, scene: &view_node_graph::SceneNodeGraph, ) { - egui::Frame { + (egui::Frame { inner_margin: re_ui::ReUi::view_padding().into(), ..egui::Frame::default() - } + }) .show(ui, |ui| { view_node_graph::view_node_graph(ctx, ui, &mut self.state_node_graph, scene) }); diff --git a/crates/re_viewer/src/ui/space_view_heuristics.rs b/crates/re_viewer/src/ui/space_view_heuristics.rs index 88181e47cd75..040a76e49096 100644 --- a/crates/re_viewer/src/ui/space_view_heuristics.rs +++ b/crates/re_viewer/src/ui/space_view_heuristics.rs @@ -40,6 +40,7 @@ pub fn all_possible_space_views( total_msgs != 0 }) .collect::>(); + let candidate_space_paths = spaces_info .iter() .map(|info| &info.path) @@ -52,7 +53,7 @@ pub fn all_possible_space_views( default_queried_entities_by_category(ctx, candidate_space_path, spaces_info) .iter() .map(|(category, entity_paths)| { - SpaceView::new(*category, candidate_space_path, entity_paths) + SpaceView::new(ctx, *category, candidate_space_path, entity_paths) }) .collect::>() }) @@ -138,10 +139,11 @@ pub fn default_created_space_views( spaces_info: &SpaceInfoCollection, ) -> Vec { let candidates = all_possible_space_views(ctx, spaces_info); - default_created_space_views_from_candidates(&ctx.log_db.entity_db, candidates) + default_created_space_views_from_candidates(ctx, &ctx.log_db.entity_db, candidates) } fn default_created_space_views_from_candidates( + ctx: &ViewerContext<'_>, entity_db: &EntityDb, candidates: Vec, ) -> Vec { @@ -181,8 +183,12 @@ fn default_created_space_views_from_candidates( // For tensors create one space view for each tensor (even though we're able to stack them in one view) if candidate.category == ViewCategory::Tensor { for entity_path in candidate.data_blueprint.entity_paths() { - let mut space_view = - SpaceView::new(ViewCategory::Tensor, entity_path, &[entity_path.clone()]); + let mut space_view = SpaceView::new( + ctx, + ViewCategory::Tensor, + entity_path, + &[entity_path.clone()], + ); space_view.entities_determined_by_user = true; // Suppress auto adding of entities. space_views.push(space_view); } @@ -234,7 +240,7 @@ fn default_created_space_views_from_candidates( .collect_vec(); let mut space_view = - SpaceView::new(candidate.category, &candidate.space_path, &entities); + SpaceView::new(ctx, candidate.category, &candidate.space_path, &entities); space_view.entities_determined_by_user = true; // Suppress auto adding of entities. space_views.push(space_view); } diff --git a/crates/re_viewer/src/ui/stats_panel.rs b/crates/re_viewer/src/ui/stats_panel.rs index 0e1caf6aa80b..30fcfb4b6437 100644 --- a/crates/re_viewer/src/ui/stats_panel.rs +++ b/crates/re_viewer/src/ui/stats_panel.rs @@ -77,7 +77,7 @@ impl Default for StatsPanelState { magnetometer_history: History::new(0..1000, 5.0), start_time: instant::Instant::now(), imu_tab_visible: false, - xlink_stats_history: History::new(0..1000, 1.0), + xlink_stats_history: History::new(0..1000, 5.0), avg_xlink_stats_plot_history: History::new(0..1000, 5.0), } } @@ -126,15 +126,18 @@ impl StatsPanelState { (xlink_stats.bytes_written / 1e6 as i64) as f64, (xlink_stats.bytes_read / 1e6 as i64) as f64, ); - if let Some((time, [_, _, total_written, total_read])) = + if let Some((then, [_, _, total_written, total_read])) = self.xlink_stats_history.iter().last() { - written = (written - total_written) / (now - time); - read = (read - total_read) / (now - time); + if xlink_stats.timestamp == then { + return; + } + written = (written - total_written) / (xlink_stats.timestamp - then); + read = (read - total_read) / (xlink_stats.timestamp - then); } self.xlink_stats_history.add( - now, + xlink_stats.timestamp, [ written, read, @@ -143,7 +146,7 @@ impl StatsPanelState { ], ); self.avg_xlink_stats_plot_history.add( - now, + xlink_stats.timestamp, [ self.xlink_stats_history .iter() @@ -240,7 +243,11 @@ impl<'a, 'b> StatsTabs<'a, 'b> { "{display_name}: avg. Sent from device {:.2} MB/s, avg. Sent to Device: {:.2} MB/s", latest[0], latest[1] )); - Plot::new(display_name).show(ui, |plot_ui| { + Plot::new(display_name) + .allow_drag(false) + .allow_scroll(false) + .allow_zoom(false) + .show(ui, |plot_ui| { plot_ui.line( Line::new(PlotPoints::new( history @@ -248,13 +255,13 @@ impl<'a, 'b> StatsTabs<'a, 'b> { .map(|(t, [written, _])| [t, written]) .collect_vec(), )) - .color(egui::Color32::BLUE), + .color(egui::Color32::RED), ); plot_ui.line( Line::new(PlotPoints::new( history.iter().map(|(t, [_, read])| [t, read]).collect_vec(), )) - .color(egui::Color32::RED), + .color(egui::Color32::GREEN), ); }); }); diff --git a/crates/re_viewer/src/ui/view_bar_chart/ui.rs b/crates/re_viewer/src/ui/view_bar_chart/ui.rs index 685fc58c3c27..0fd8d42bad0f 100644 --- a/crates/re_viewer/src/ui/view_bar_chart/ui.rs +++ b/crates/re_viewer/src/ui/view_bar_chart/ui.rs @@ -4,15 +4,16 @@ use egui::util::hash; use re_data_store::EntityPath; use re_log::warn_once; -use re_log_types::component_types::{self, InstanceKey}; +use re_log_types::component_types::{ self, InstanceKey }; -use crate::{misc::ViewerContext, ui::annotations::auto_color}; +use crate::{ misc::ViewerContext, ui::annotations::auto_color }; use super::SceneBarChart; // --- -pub(crate) const HELP_TEXT: &str = "\ +pub(crate) const HELP_TEXT: &str = + "\ Pan by dragging, or scroll (+ shift = horizontal).\n\ Box zooming: Right click to zoom in and zoom out using a selection.\n\ Reset view with double-click."; @@ -24,9 +25,9 @@ pub(crate) fn view_bar_chart( _ctx: &mut ViewerContext<'_>, ui: &mut egui::Ui, _state: &mut BarChartState, - scene: &SceneBarChart, + scene: &SceneBarChart ) -> egui::Response { - use egui::plot::{Bar, BarChart, Legend, Plot}; + use egui::plot::{ Bar, BarChart, Legend, Plot }; Plot::new("bar_chart_plot") .legend(Legend::default()) @@ -35,7 +36,7 @@ pub(crate) fn view_bar_chart( fn create_bar_chart>( ent_path: &EntityPath, instance_key: &InstanceKey, - values: impl Iterator, + values: impl Iterator ) -> BarChart { let color = auto_color(hash((ent_path, instance_key)) as _); let fill = color.gamma_multiply(0.75).additive(); // make sure overlapping bars are obvious @@ -43,16 +44,16 @@ pub(crate) fn view_bar_chart( values .enumerate() .map(|(i, value)| { - Bar::new(i as f64 + 0.5, value.into()) + Bar::new((i as f64) + 0.5, value.into()) .width(0.95) .name(format!("{ent_path}[#{instance_key}] #{i}")) .fill(fill) .stroke(egui::Stroke::NONE) }) - .collect(), + .collect() ) - .name(format!("{ent_path}[#{instance_key}]")) - .color(color) + .name(format!("{ent_path}[#{instance_key}]")) + .color(color) } for ((ent_path, instance_key), tensor) in &scene.charts { @@ -66,11 +67,15 @@ pub(crate) fn view_bar_chart( component_types::TensorData::U32(data) => { create_bar_chart(ent_path, instance_key, data.iter().copied()) } - component_types::TensorData::U64(data) => create_bar_chart( - ent_path, - instance_key, - data.iter().copied().map(|v| v as f64), - ), + component_types::TensorData::U64(data) => + create_bar_chart( + ent_path, + instance_key, + data + .iter() + .copied() + .map(|v| v as f64) + ), component_types::TensorData::I8(data) => { create_bar_chart(ent_path, instance_key, data.iter().copied()) } @@ -80,20 +85,25 @@ pub(crate) fn view_bar_chart( component_types::TensorData::I32(data) => { create_bar_chart(ent_path, instance_key, data.iter().copied()) } - component_types::TensorData::I64(data) => create_bar_chart( - ent_path, - instance_key, - data.iter().copied().map(|v| v as f64), - ), + component_types::TensorData::I64(data) => + create_bar_chart( + ent_path, + instance_key, + data + .iter() + .copied() + .map(|v| v as f64) + ), component_types::TensorData::F32(data) => { create_bar_chart(ent_path, instance_key, data.iter().copied()) } component_types::TensorData::F64(data) => { create_bar_chart(ent_path, instance_key, data.iter().copied()) } - component_types::TensorData::JPEG(_) => { + component_types::TensorData::JPEG(_) | component_types::TensorData::NV12(_) => { warn_once!( - "trying to display JPEG data as a bar chart ({:?})", + "trying to display {:?} data as a bar chart ({:?})", + tensor.data, ent_path ); continue; @@ -102,6 +112,5 @@ pub(crate) fn view_bar_chart( plot_ui.bar_chart(chart); } - }) - .response + }).response } diff --git a/crates/re_viewer/src/ui/view_spatial/scene/picking.rs b/crates/re_viewer/src/ui/view_spatial/scene/picking.rs index 8d8325fdff1a..b478de883ddb 100644 --- a/crates/re_viewer/src/ui/view_spatial/scene/picking.rs +++ b/crates/re_viewer/src/ui/view_spatial/scene/picking.rs @@ -261,15 +261,16 @@ fn picking_textured_rects(context: &PickingContext, images: &[Image]) -> Vec Option { crate::profile_function!(); - let Some([height, width, _]) = tensor.image_height_width_channels() else { return None; }; + let Some([height, width, _]) = tensor.image_height_width_channels() else { + return None; + }; let debug_name = ent_path.to_string(); let tensor_stats = ctx.cache.tensor_stats(tensor); - match crate::gpu_bridge::tensor_to_gpu( - ctx.render_ctx, - &debug_name, - tensor, - tensor_stats, - annotations, - ) { + match + crate::gpu_bridge::tensor_to_gpu( + ctx.render_ctx, + &debug_name, + tensor, + tensor_stats, + annotations + ) + { Ok(colormapped_texture) => { // TODO(emilk): let users pick texture filtering. // Always use nearest for magnification: let users see crisp individual pixels when they zoom @@ -65,11 +68,10 @@ fn to_textured_rect( } else { re_renderer::renderer::TextureFilterMin::Linear }; - Some(re_renderer::renderer::TexturedRect { top_left_corner_position: world_from_obj.transform_point3(glam::Vec3::ZERO), - extent_u: world_from_obj.transform_vector3(glam::Vec3::X * width as f32), - extent_v: world_from_obj.transform_vector3(glam::Vec3::Y * height as f32), + extent_u: world_from_obj.transform_vector3(glam::Vec3::X * (width as f32)), + extent_v: world_from_obj.transform_vector3(glam::Vec3::Y * (height as f32)), colormapped_texture, options: RectangleOptions { texture_filter_magnification, @@ -93,41 +95,44 @@ fn handle_image_layering(scene: &mut SceneSpatial) { // Handle layered rectangles that are on (roughly) the same plane and were logged in sequence. // First, group by similar plane. // TODO(andreas): Need planes later for picking as well! - let images_grouped_by_plane = { - let mut cur_plane = macaw::Plane3::from_normal_dist(Vec3::NAN, std::f32::NAN); - let mut rectangle_group = Vec::new(); - scene - .primitives - .images - .drain(..) // We rebuild the list as we might reorder as well! - .batching(move |it| { - for image in it { - let rect = &image.textured_rect; - - let prev_plane = cur_plane; - cur_plane = macaw::Plane3::from_normal_point( - rect.extent_u.cross(rect.extent_v).normalize(), - rect.top_left_corner_position, - ); + let images_grouped_by_plane = ( + { + let mut cur_plane = macaw::Plane3::from_normal_dist(Vec3::NAN, std::f32::NAN); + let mut rectangle_group = Vec::new(); + scene.primitives.images + .drain(..) // We rebuild the list as we might reorder as well! + .batching(move |it| { + for image in it { + let rect = &image.textured_rect; + + let prev_plane = cur_plane; + cur_plane = macaw::Plane3::from_normal_point( + rect.extent_u.cross(rect.extent_v).normalize(), + rect.top_left_corner_position + ); - // Are the image planes too unsimilar? Then this is a new group. - if !rectangle_group.is_empty() - && prev_plane.normal.dot(cur_plane.normal) < 0.99 - && (prev_plane.d - cur_plane.d) < 0.01 - { - let previous_group = std::mem::replace(&mut rectangle_group, vec![image]); - return Some(previous_group); + // Are the image planes too unsimilar? Then this is a new group. + if + !rectangle_group.is_empty() && + prev_plane.normal.dot(cur_plane.normal) < 0.99 && + prev_plane.d - cur_plane.d < 0.01 + { + let previous_group = std::mem::replace( + &mut rectangle_group, + vec![image] + ); + return Some(previous_group); + } + rectangle_group.push(image); } - rectangle_group.push(image); - } - if !rectangle_group.is_empty() { - Some(rectangle_group.drain(..).collect()) - } else { - None - } - }) - } - .collect_vec(); + if !rectangle_group.is_empty() { + Some(rectangle_group.drain(..).collect()) + } else { + None + } + }) + } + ).collect_vec(); // Then, for each planar group do resorting and change transparency. for mut grouped_images in images_grouped_by_plane { @@ -138,20 +143,13 @@ fn handle_image_layering(scene: &mut SceneSpatial) { for (idx, image) in grouped_images.iter_mut().enumerate() { // Set depth offset for correct order and avoid z fighting when there is a 3d camera. // Keep behind depth offset 0 for correct picking order. - image.textured_rect.options.depth_offset = - (idx as isize - total_num_images as isize) as re_renderer::DepthOffset; + image.textured_rect.options.depth_offset = ((idx as isize) - + (total_num_images as isize)) as re_renderer::DepthOffset; // make top images transparent - let opacity = if idx == 0 { - 1.0 - } else { - 1.0 / total_num_images.at_most(20) as f32 - }; // avoid precision problems in framebuffer - image.textured_rect.options.multiplicative_tint = image - .textured_rect - .options - .multiplicative_tint - .multiply(opacity); + let opacity = if idx == 0 { 1.0 } else { 1.0 / (total_num_images.at_most(20) as f32) }; // avoid precision problems in framebuffer + image.textured_rect.options.multiplicative_tint = + image.textured_rect.options.multiplicative_tint.multiply(opacity); } scene.primitives.images.extend(grouped_images); @@ -170,7 +168,7 @@ impl ImagesPart { properties: &mut EntityProperties, ent_path: &EntityPath, world_from_obj: glam::Mat4, - highlights: &SpaceViewHighlights, + highlights: &SpaceViewHighlights ) -> Result<(), QueryError> { crate::profile_function!(); @@ -180,7 +178,9 @@ impl ImagesPart { entity_view.iter_component::()? ) { crate::profile_scope!("loop_iter"); - let Some(tensor) = tensor else { continue; }; + let Some(tensor) = tensor else { + continue; + }; if !tensor.is_shaped_like_an_image() { return Ok(()); @@ -201,45 +201,54 @@ impl ImagesPart { if *properties.backproject_depth.get() && tensor.meaning == TensorDataMeaning::Depth { let query = ctx.current_query(); - let pinhole_ent_path = - crate::misc::queries::closest_pinhole_transform(ctx, ent_path, &query); + let pinhole_ent_path = crate::misc::queries::closest_pinhole_transform( + ctx, + ent_path, + &query + ); if let Some(pinhole_ent_path) = pinhole_ent_path { // NOTE: we don't pass in `world_from_obj` because this corresponds to the // transform of the projection plane, which is of no use to us here. // What we want are the extrinsics of the depth camera! - match Self::process_entity_view_as_depth_cloud( - scene, - ctx, - transforms, - properties, - &tensor, - ent_path, - &pinhole_ent_path, - entity_highlight, - ) { - Ok(()) => return Ok(()), + match + Self::process_entity_view_as_depth_cloud( + scene, + ctx, + transforms, + properties, + &tensor, + ent_path, + &pinhole_ent_path, + entity_highlight + ) + { + Ok(()) => { + return Ok(()); + } Err(err) => { re_log::warn_once!("{err}"); } } - }; + } } - let color = annotations.class_description(None).annotation_info().color( - color.map(|c| c.to_array()).as_ref(), - DefaultColor::OpaqueWhite, - ); - - if let Some(textured_rect) = to_textured_rect( - ctx, - &annotations, - world_from_obj, - ent_path, - &tensor, - color.into(), - entity_highlight.overall, - ) { + let color = annotations + .class_description(None) + .annotation_info() + .color(color.map(|c| c.to_array()).as_ref(), DefaultColor::OpaqueWhite); + + if + let Some(textured_rect) = to_textured_rect( + ctx, + &annotations, + world_from_obj, + ent_path, + &tensor, + color.into(), + entity_highlight.overall + ) + { scene.primitives.images.push(Image { ent_path: ent_path.clone(), tensor, @@ -260,14 +269,14 @@ impl ImagesPart { tensor: &DecodedTensor, ent_path: &EntityPath, pinhole_ent_path: &EntityPath, - entity_highlight: &SpaceViewOutlineMasks, + entity_highlight: &SpaceViewOutlineMasks ) -> Result<(), String> { crate::profile_function!(); let Some(re_log_types::Transform::Pinhole(intrinsics)) = query_latest_single::( &ctx.log_db.entity_db, pinhole_ent_path, - &ctx.current_query(), + &ctx.current_query() ) else { return Err(format!("Couldn't fetch pinhole intrinsics at {pinhole_ent_path:?}")); }; @@ -284,96 +293,51 @@ impl ImagesPart { return Err(format!("Tensor at {ent_path:?} is not an image")); }; let dimensions = glam::UVec2::new(width as _, height as _); + let annotations = scene.annotation_map.find(ent_path); + let tensor_stats = ctx.cache.tensor_stats(tensor).clone(); + let debug_name = ent_path.to_string(); - let depth_texture = { - // Ideally, we'd use the same key as for displaying the texture, but we might make other compromises regarding formats etc.! - // So to not couple this, we use a different key here - let texture_key = egui::util::hash((tensor.id(), "depth_cloud")); - let mut data_f32 = Vec::new(); - ctx.render_ctx - .texture_manager_2d - .get_or_try_create_with( - texture_key, - &mut ctx.render_ctx.gpu_resources.textures, - || { - // TODO(andreas/cmc): Ideally we'd upload the u16 data as-is. - // However, R16Unorm is behind a feature flag and Depth16Unorm doesn't work on WebGL (and is awkward as this is a depth buffer format!). - let data = match &tensor.data { - TensorData::U16(data) => { - data_f32.extend(data.as_slice().iter().map(|d| *d as f32)); - bytemuck::cast_slice(&data_f32).into() - } - TensorData::F32(data) => bytemuck::cast_slice(data).into(), - _ => { - return Err(format!( - "Tensor datatype {} is not supported for back-projection", - tensor.dtype() - )); - } - }; - - Ok(Texture2DCreationDesc { - label: format!("Depth cloud for {ent_path:?}").into(), - data, - format: wgpu::TextureFormat::R32Float, - width: width as _, - height: height as _, - }) - }, - ) - .map_err(|err| format!("Failed to create depth cloud texture: {err}"))? - }; - + let depth_texture = crate::gpu_bridge + ::tensor_to_gpu(ctx.render_ctx, &debug_name, tensor, &tensor_stats, &annotations) + .map_err(|_| format!("Couldn't create depth texture"))?; let depth_from_world_scale = *properties.depth_from_world_scale.get(); let world_depth_from_texture_depth = 1.0 / depth_from_world_scale; let mut colormap = match *properties.color_mapper.get() { - re_data_store::ColorMapper::Colormap(colormap) => match colormap { - re_data_store::Colormap::Grayscale => Colormap::Grayscale, - re_data_store::Colormap::Turbo => Colormap::Turbo, - re_data_store::Colormap::Viridis => Colormap::Viridis, - re_data_store::Colormap::Plasma => Colormap::Plasma, - re_data_store::Colormap::Magma => Colormap::Magma, - re_data_store::Colormap::Inferno => Colormap::Inferno, - }, + re_data_store::ColorMapper::Colormap(colormap) => + match colormap { + re_data_store::Colormap::Grayscale => Colormap::Grayscale, + re_data_store::Colormap::Turbo => Colormap::Turbo, + re_data_store::Colormap::Viridis => Colormap::Viridis, + re_data_store::Colormap::Plasma => Colormap::Plasma, + re_data_store::Colormap::Magma => Colormap::Magma, + re_data_store::Colormap::Inferno => Colormap::Inferno, + } re_data_store::ColorMapper::AlbedoTexture => Colormap::AlbedoTexture, }; - let mut albedo_data = None; - let mut albedo_dimensions = glam::UVec2::ZERO; - + let mut albedo_texture: Option = None; if colormap == Colormap::AlbedoTexture { - let tensor = properties.albedo_texture.as_ref().and_then(|path| { - query_latest_single::(&ctx.log_db.entity_db, path, &ctx.current_query()) - }); + let tensor = properties.albedo_texture + .as_ref() + .and_then(|path| { + query_latest_single::(&ctx.log_db.entity_db, path, &ctx.current_query()) + }); if let Some(tensor) = tensor { - let (h, w) = (tensor.shape()[0].size, tensor.shape()[1].size); - albedo_dimensions = glam::UVec2::new(w as _, h as _); - - // TODO(cmc): How does one know whether the texture is sRGB or not at this point? - // TODO(cmc): We should easily be able to pass almost any datatype here. - - albedo_data = match &tensor.data { - TensorData::U8(data) => { - if let Some([_, _, c]) = tensor.image_height_width_channels() { - match c { - 1 => Some(DepthCloudAlbedoData::Mono8(data.0.to_vec())), - 3 => Some(DepthCloudAlbedoData::Rgb8(data.0.to_vec())), - 4 => Some(DepthCloudAlbedoData::Rgb8Srgb(data.0.to_vec())), - _ => None, - } - } else { - None - } - } - _ => { - re_log::debug_once!( - "Tensor datatype not supported for albedo texture ({:?})", - std::mem::discriminant(&tensor.data), - ); - None - } + albedo_texture = match + crate::gpu_bridge::tensor_to_gpu( + ctx.render_ctx, + &debug_name, + &tensor + .try_into() + .map_err(|_| format!("Couldn't convert albedo texture to RGB"))?, + &tensor_stats, + &annotations + ) + { + anyhow::Result::Ok(texture) => Some(texture), + anyhow::Result::Err(_) => { None } }; } else { re_log::debug_once!( @@ -388,7 +352,7 @@ impl ImagesPart { // is a factor (`backproject_radius_scale`) of the diameter of a pixel projected // at that distance. let fov_y = intrinsics.fov_y().unwrap_or(1.0); - let pixel_width_from_depth = (0.5 * fov_y).tan() / (0.5 * height as f32); + let pixel_width_from_depth = (0.5 * fov_y).tan() / (0.5 * (height as f32)); let radius_scale = *properties.backproject_radius_scale.get(); let point_radius_from_world_depth = radius_scale * pixel_width_from_depth; @@ -415,8 +379,7 @@ impl ImagesPart { colormap, outline_mask_id: entity_highlight.overall, picking_object_id: re_renderer::PickingLayerObjectId(ent_path.hash64()), - albedo_data, - albedo_dimensions, + albedo_texture, }); Ok(()) @@ -430,7 +393,7 @@ impl ScenePart for ImagesPart { ctx: &mut ViewerContext<'_>, query: &SceneQuery<'_>, transforms: &TransformCache, - highlights: &SpaceViewHighlights, + highlights: &SpaceViewHighlights ) { crate::profile_scope!("ImagesPart"); @@ -439,29 +402,30 @@ impl ScenePart for ImagesPart { continue; }; - match query_primary_with_history::( - &ctx.log_db.entity_db.data_store, - &query.timeline, - &query.latest_at, - &props.visible_history, - ent_path, - [Tensor::name(), InstanceKey::name(), ColorRGBA::name()], - ) - .and_then(|entities| { - for entity in entities { - Self::process_entity_view( - &entity, - scene, - ctx, - transforms, - &mut props, - ent_path, - world_from_obj, - highlights, - )?; - } - Ok(()) - }) { + match + query_primary_with_history::( + &ctx.log_db.entity_db.data_store, + &query.timeline, + &query.latest_at, + &props.visible_history, + ent_path, + [Tensor::name(), InstanceKey::name(), ColorRGBA::name()] + ).and_then(|entities| { + for entity in entities { + Self::process_entity_view( + &entity, + scene, + ctx, + transforms, + &mut props, + ent_path, + world_from_obj, + highlights + )?; + } + Ok(()) + }) + { Ok(_) | Err(QueryError::PrimaryNotFound) => {} Err(err) => { re_log::error_once!("Unexpected error querying {ent_path:?}: {err}"); diff --git a/crates/re_viewer/src/ui/view_spatial/ui.rs b/crates/re_viewer/src/ui/view_spatial/ui.rs index bd2daf058b1e..11b02345faa9 100644 --- a/crates/re_viewer/src/ui/view_spatial/ui.rs +++ b/crates/re_viewer/src/ui/view_spatial/ui.rs @@ -1,20 +1,29 @@ use eframe::epaint::text::TextWrapping; -use re_data_store::{query_latest_single, EditableAutoValue, EntityPath, EntityPropertyMap}; +use re_data_store::{ + query_latest_single, + EditableAutoValue, + EntityPath, + EntityPathPart, + EntityPropertyMap, +}; use re_format::format_f32; -use egui::{NumExt, WidgetText}; +use egui::{ NumExt, WidgetText }; use macaw::BoundingBox; -use re_log_types::component_types::{Tensor, TensorDataMeaning}; -use re_renderer::{Colormap, OutlineConfig}; +use re_log_types::component_types::{ Tensor, TensorDataMeaning }; +use re_renderer::{ Colormap, OutlineConfig }; use crate::{ misc::{ - space_info::query_view_coordinates, HoveredSpace, SelectionHighlight, SpaceViewHighlights, + space_info::query_view_coordinates, + HoveredSpace, + SelectionHighlight, + SpaceViewHighlights, ViewerContext, }, ui::{ data_blueprint::DataBlueprintTree, - data_ui::{self, DataUi}, + data_ui::{ self, DataUi }, space_view::ScreenshotMode, view_spatial::UiLabelTarget, SpaceViewId, @@ -23,10 +32,11 @@ use crate::{ use super::{ eye::Eye, - scene::{PickingHitType, PickingResult, SceneSpatialUiData}, + scene::{ PickingHitType, PickingResult, SceneSpatialUiData }, ui_2d::View2DState, ui_3d::View3DState, - SceneSpatial, SpaceSpecs, + SceneSpatial, + SpaceSpecs, }; /// Describes how the scene is navigated, determining if it is a 2D or 3D experience. @@ -105,7 +115,7 @@ impl Default for ViewSpatialState { state_3d: Default::default(), auto_size_config: re_renderer::AutoSizeConfig { point_radius: re_renderer::Size::AUTO, // let re_renderer decide - line_radius: re_renderer::Size::AUTO, // let re_renderer decide + line_radius: re_renderer::Size::AUTO, // let re_renderer decide }, previous_picking_result: None, } @@ -149,7 +159,7 @@ impl ViewSpatialState { pub fn update_object_property_heuristics( &self, ctx: &mut ViewerContext<'_>, - data_blueprint: &mut DataBlueprintTree, + data_blueprint: &mut DataBlueprintTree ) { crate::profile_function!(); @@ -164,7 +174,7 @@ impl ViewSpatialState { data_blueprint, &query, &entity_path, - scene_size, + scene_size ); self.update_depth_cloud_property_heuristics(ctx, data_blueprint, &query, &entity_path); } @@ -175,14 +185,15 @@ impl ViewSpatialState { data_blueprint: &mut DataBlueprintTree, query: &re_arrow_store::LatestAtQuery, entity_path: &EntityPath, - scene_size: f32, + scene_size: f32 ) { - if let Some(re_log_types::Transform::Pinhole(_)) = - query_latest_single::( - &ctx.log_db.entity_db, - entity_path, - query, - ) + if + let Some(re_log_types::Transform::Pinhole(_)) = + query_latest_single::( + &ctx.log_db.entity_db, + entity_path, + query + ) { let default_image_plane_distance = if scene_size.is_finite() && scene_size > 0.0 { scene_size * 0.05 @@ -192,11 +203,10 @@ impl ViewSpatialState { let mut properties = data_blueprint.data_blueprints_individual().get(entity_path); if properties.pinhole_image_plane_distance.is_auto() { - properties.pinhole_image_plane_distance = - EditableAutoValue::Auto(default_image_plane_distance); - data_blueprint - .data_blueprints_individual() - .set(entity_path.clone(), properties); + properties.pinhole_image_plane_distance = EditableAutoValue::Auto( + default_image_plane_distance + ); + data_blueprint.data_blueprints_individual().set(entity_path.clone(), properties); } } } @@ -206,26 +216,22 @@ impl ViewSpatialState { ctx: &mut ViewerContext<'_>, data_blueprint: &mut DataBlueprintTree, query: &re_arrow_store::LatestAtQuery, - entity_path: &EntityPath, + entity_path: &EntityPath ) -> Option<()> { let tensor = query_latest_single::(&ctx.log_db.entity_db, entity_path, query)?; let mut properties = data_blueprint.data_blueprints_individual().get(entity_path); if properties.backproject_depth.is_auto() { properties.backproject_depth = EditableAutoValue::Auto( - tensor.meaning == TensorDataMeaning::Depth - && *self.nav_mode.get() == SpatialNavigationMode::ThreeD, + tensor.meaning == TensorDataMeaning::Depth && + *self.nav_mode.get() == SpatialNavigationMode::ThreeD ); } if tensor.meaning == TensorDataMeaning::Depth { if properties.depth_from_world_scale.is_auto() { let auto = tensor.meter.unwrap_or_else(|| { - if tensor.dtype().is_integer() { - 1000.0 - } else { - 1.0 - } + if tensor.dtype().is_integer() { 1000.0 } else { 1.0 } }); properties.depth_from_world_scale = EditableAutoValue::Auto(auto); } @@ -235,42 +241,52 @@ impl ViewSpatialState { } let colormap = match *properties.color_mapper.get() { - re_data_store::ColorMapper::Colormap(colormap) => match colormap { - re_data_store::Colormap::Grayscale => Colormap::Grayscale, - re_data_store::Colormap::Turbo => Colormap::Turbo, - re_data_store::Colormap::Viridis => Colormap::Viridis, - re_data_store::Colormap::Plasma => Colormap::Plasma, - re_data_store::Colormap::Magma => Colormap::Magma, - re_data_store::Colormap::Inferno => Colormap::Inferno, - }, + re_data_store::ColorMapper::Colormap(colormap) => + match colormap { + re_data_store::Colormap::Grayscale => Colormap::Grayscale, + re_data_store::Colormap::Turbo => Colormap::Turbo, + re_data_store::Colormap::Viridis => Colormap::Viridis, + re_data_store::Colormap::Plasma => Colormap::Plasma, + re_data_store::Colormap::Magma => Colormap::Magma, + re_data_store::Colormap::Inferno => Colormap::Inferno, + } re_data_store::ColorMapper::AlbedoTexture => Colormap::AlbedoTexture, }; // Set albedo texture if it is not set yet if colormap == Colormap::AlbedoTexture && properties.albedo_texture.is_none() { - let mut tex_ep = None; - if let Some(tree) = entity_path - .parent() - .and_then(|path| ctx.log_db.entity_db.tree.subtree(&path)) + let mut tex_ep: Option = None; + if + let Some(tree) = entity_path + .parent() + .and_then(|path| ctx.log_db.entity_db.tree.subtree(&path)) { - tree.visit_children_recursively(&mut |ent_path| { - if tex_ep.is_some() { - return; - } - let Some(tensor) = - query_latest_single::(&ctx.log_db.entity_db, ent_path, &ctx.current_query()) else { - return; - }; - if tensor.is_shaped_like_an_image() { - tex_ep = Some(ent_path.clone()); - } - }); + tree.visit_children_recursively( + &mut (|ent_path| { + // Prioritize color image over depth images + if let Some(current_tex) = tex_ep.clone() { + if let Some(trailing) = current_tex.iter().last() { + if trailing == &EntityPathPart::from("Color") { + return; + } + } + } + let Some(tensor) = query_latest_single::( + &ctx.log_db.entity_db, + ent_path, + &ctx.current_query() + ) else { + return; + }; + if tensor.is_shaped_like_an_image() { + tex_ep = Some(ent_path.clone()); + } + }) + ); properties.albedo_texture = tex_ep; } } - data_blueprint - .data_blueprints_individual() - .set(entity_path.clone(), properties); + data_blueprint.data_blueprints_individual().set(entity_path.clone(), properties); } Some(()) @@ -282,22 +298,22 @@ impl ViewSpatialState { ui: &mut egui::Ui, data_blueprint: &DataBlueprintTree, space_path: &EntityPath, - space_view_id: SpaceViewId, + space_view_id: SpaceViewId ) { - ctx.re_ui.selection_grid(ui, "spatial_settings_ui") - .show(ui, |ui| { + ctx.re_ui.selection_grid(ui, "spatial_settings_ui").show(ui, |ui| { let auto_size_world = self.auto_size_world_heuristic(); - ctx.re_ui.grid_left_hand_label(ui, "Space root") - .on_hover_text("The origin is at the origin of this Entity. All transforms are relative to it"); + ctx.re_ui + .grid_left_hand_label(ui, "Space root") + .on_hover_text( + "The origin is at the origin of this Entity. All transforms are relative to it" + ); // Specify space view id only if this is actually part of the space view itself. // (otherwise we get a somewhat broken link) ctx.entity_path_button( ui, - data_blueprint - .contains_entity(space_path) - .then_some(space_view_id), - space_path, + data_blueprint.contains_entity(space_path).then_some(space_view_id), + space_path ); ui.end_row(); @@ -305,76 +321,82 @@ impl ViewSpatialState { ui.vertical(|ui| { ui.horizontal(|ui| { ui.push_id("points", |ui| { - size_ui( - ui, - 2.0, - auto_size_world, - &mut self.auto_size_config.point_radius, - ); + size_ui(ui, 2.0, auto_size_world, &mut self.auto_size_config.point_radius); }); - ui.label("Point radius") - .on_hover_text("Point radius used whenever not explicitly specified."); + ui.label("Point radius").on_hover_text( + "Point radius used whenever not explicitly specified." + ); }); ui.horizontal(|ui| { ui.push_id("lines", |ui| { - size_ui( - ui, - 1.5, - auto_size_world, - &mut self.auto_size_config.line_radius, + size_ui(ui, 1.5, auto_size_world, &mut self.auto_size_config.line_radius); + ui.label("Line radius").on_hover_text( + "Line radius used whenever not explicitly specified." ); - ui.label("Line radius") - .on_hover_text("Line radius used whenever not explicitly specified."); }); }); }); ui.end_row(); - ctx.re_ui.grid_left_hand_label(ui, "Camera") + ctx.re_ui + .grid_left_hand_label(ui, "Camera") .on_hover_text("The virtual camera which controls what is shown on screen."); ui.vertical(|ui| { let mut nav_mode = *self.nav_mode.get(); let mut changed = false; - egui::ComboBox::from_id_source("nav_mode") + egui::ComboBox + ::from_id_source("nav_mode") .selected_text(nav_mode) .show_ui(ui, |ui| { ui.style_mut().wrap = Some(false); ui.set_min_width(64.0); - changed |= ui.selectable_value( - &mut nav_mode, - SpatialNavigationMode::TwoD, - SpatialNavigationMode::TwoD, - ).changed(); - - changed |= ui.selectable_value( - &mut nav_mode, - SpatialNavigationMode::ThreeD, - SpatialNavigationMode::ThreeD, - ).changed(); + changed |= ui + .selectable_value( + &mut nav_mode, + SpatialNavigationMode::TwoD, + SpatialNavigationMode::TwoD + ) + .changed(); + + changed |= ui + .selectable_value( + &mut nav_mode, + SpatialNavigationMode::ThreeD, + SpatialNavigationMode::ThreeD + ) + .changed(); }); - if changed { - self.nav_mode = EditableAutoValue::UserEdited(nav_mode); - } + if changed { + self.nav_mode = EditableAutoValue::UserEdited(nav_mode); + } if *self.nav_mode.get() == SpatialNavigationMode::ThreeD { - if ui.button("Reset").on_hover_text( - "Resets camera position & orientation.\nYou can also double-click the 3D view.") - .clicked() + if + ui + .button("Reset") + .on_hover_text( + "Resets camera position & orientation.\nYou can also double-click the 3D view." + ) + .clicked() { self.state_3d.reset_camera(&self.scene_bbox_accum); } - ui.checkbox(&mut self.state_3d.spin, "Spin") - .on_hover_text("Spin camera around the orbit center."); + ui.checkbox(&mut self.state_3d.spin, "Spin").on_hover_text( + "Spin camera around the orbit center." + ); } }); ui.end_row(); if *self.nav_mode.get() == SpatialNavigationMode::ThreeD { - ctx.re_ui.grid_left_hand_label(ui, "Coordinates") + ctx.re_ui + .grid_left_hand_label(ui, "Coordinates") .on_hover_text("The world coordinate system used for this view."); - ui.vertical(|ui|{ - ui.label(format!("Up is {}", axis_name(self.state_3d.space_specs.up))).on_hover_ui(|ui| { + ui.vertical(|ui| { + ui.label( + format!("Up is {}", axis_name(self.state_3d.space_specs.up)) + ).on_hover_ui(|ui| { ui.horizontal(|ui| { ui.spacing_mut().item_spacing.x = 0.0; ui.label("Set with "); @@ -382,33 +404,26 @@ impl ViewSpatialState { ui.label("."); }); }); - ui.checkbox(&mut self.state_3d.show_axes, "Show origin axes").on_hover_text("Show X-Y-Z axes"); - ui.checkbox(&mut self.state_3d.show_bbox, "Show bounding box").on_hover_text("Show the current scene bounding box"); + ui.checkbox(&mut self.state_3d.show_axes, "Show origin axes").on_hover_text( + "Show X-Y-Z axes" + ); + ui.checkbox(&mut self.state_3d.show_bbox, "Show bounding box").on_hover_text( + "Show the current scene bounding box" + ); }); ui.end_row(); } - ctx.re_ui.grid_left_hand_label(ui, "Bounding box") + ctx.re_ui + .grid_left_hand_label(ui, "Bounding box") .on_hover_text("The bounding box encompassing all Entities in the view right now."); ui.vertical(|ui| { ui.style_mut().wrap = Some(false); let BoundingBox { min, max } = self.scene_bbox; - ui.label(format!( - "x [{} - {}]", - format_f32(min.x), - format_f32(max.x), - )); - ui.label(format!( - "y [{} - {}]", - format_f32(min.y), - format_f32(max.y), - )); + ui.label(format!("x [{} - {}]", format_f32(min.x), format_f32(max.x))); + ui.label(format!("y [{} - {}]", format_f32(min.y), format_f32(max.y))); if *self.nav_mode.get() == SpatialNavigationMode::ThreeD { - ui.label(format!( - "z [{} - {}]", - format_f32(min.z), - format_f32(max.z), - )); + ui.label(format!("z [{} - {}]", format_f32(min.z), format_f32(max.z))); } }); ui.end_row(); @@ -425,7 +440,7 @@ impl ViewSpatialState { scene: SceneSpatial, space_view_id: SpaceViewId, highlights: &SpaceViewHighlights, - entity_properties: &EntityPropertyMap, + entity_properties: &EntityPropertyMap ) { self.scene_bbox = scene.primitives.bounding_box(); if self.scene_bbox_accum.is_nothing() { @@ -441,8 +456,11 @@ impl ViewSpatialState { match *self.nav_mode.get() { SpatialNavigationMode::ThreeD => { - let coordinates = - query_view_coordinates(&ctx.log_db.entity_db, space, &ctx.current_query()); + let coordinates = query_view_coordinates( + &ctx.log_db.entity_db, + space, + &ctx.current_query() + ); self.state_3d.space_specs = SpaceSpecs::from_view_coordinates(coordinates); super::view_3d( ctx, @@ -452,14 +470,14 @@ impl ViewSpatialState { space_view_id, scene, highlights, - entity_properties, + entity_properties ); } SpatialNavigationMode::TwoD => { self.scene_bbox_accum = self.scene_bbox; let scene_rect_accum = egui::Rect::from_min_max( self.scene_bbox_accum.min.truncate().to_array().into(), - self.scene_bbox_accum.max.truncate().to_array().into(), + self.scene_bbox_accum.max.truncate().to_array().into() ); super::view_2d( ctx, @@ -470,7 +488,7 @@ impl ViewSpatialState { scene_rect_accum, space_view_id, highlights, - entity_properties, + entity_properties ); } } @@ -488,7 +506,7 @@ fn size_ui( ui: &mut egui::Ui, default_size_points: f32, default_size_world: f32, - size: &mut re_renderer::Size, + size: &mut re_renderer::Size ) { use re_renderer::Size; @@ -501,18 +519,24 @@ fn size_ui( }; let mode_before = mode; - egui::ComboBox::from_id_source("auto_size_mode") + egui::ComboBox + ::from_id_source("auto_size_mode") .selected_text(mode) .show_ui(ui, |ui| { ui.style_mut().wrap = Some(false); ui.set_min_width(64.0); - ui.selectable_value(&mut mode, AutoSizeUnit::Auto, AutoSizeUnit::Auto) - .on_hover_text("Determine automatically."); - ui.selectable_value(&mut mode, AutoSizeUnit::UiPoints, AutoSizeUnit::UiPoints) - .on_hover_text("Manual in UI points."); - ui.selectable_value(&mut mode, AutoSizeUnit::World, AutoSizeUnit::World) - .on_hover_text("Manual in scene units."); + ui.selectable_value(&mut mode, AutoSizeUnit::Auto, AutoSizeUnit::Auto).on_hover_text( + "Determine automatically." + ); + ui.selectable_value( + &mut mode, + AutoSizeUnit::UiPoints, + AutoSizeUnit::UiPoints + ).on_hover_text("Manual in UI points."); + ui.selectable_value(&mut mode, AutoSizeUnit::World, AutoSizeUnit::World).on_hover_text( + "Manual in scene units." + ); }); if mode != mode_before { *size = match mode { @@ -529,14 +553,16 @@ fn size_ui( } else { (0.01 * displayed_size, 0.0001..=f32::INFINITY) }; - if ui - .add( - egui::DragValue::new(&mut displayed_size) - .speed(drag_speed) - .clamp_range(clamp_range) - .max_decimals(4), - ) - .changed() + if + ui + .add( + egui::DragValue + ::new(&mut displayed_size) + .speed(drag_speed) + .clamp_range(clamp_range) + .max_decimals(4) + ) + .changed() { *size = match mode { AutoSizeUnit::Auto => unreachable!(), @@ -578,7 +604,7 @@ pub fn create_labels( eye3d: &Eye, parent_ui: &mut egui::Ui, highlights: &SpaceViewHighlights, - nav_mode: SpatialNavigationMode, + nav_mode: SpatialNavigationMode ) -> Vec { crate::profile_function!(); @@ -620,8 +646,7 @@ pub fn create_labels( let font_id = egui::TextStyle::Body.resolve(parent_ui.style()); let galley = parent_ui.fonts(|fonts| { - fonts.layout_job({ - egui::text::LayoutJob { + fonts.layout_job({ egui::text::LayoutJob { sections: vec![egui::text::LayoutSection { leading_space: 0.0, byte_range: 0..label.text.len(), @@ -635,25 +660,27 @@ pub fn create_labels( break_on_newline: true, halign: egui::Align::Center, ..Default::default() - } - }) + } }) }); - let text_rect = egui::Align2::CENTER_TOP - .anchor_rect(egui::Rect::from_min_size(text_anchor_pos, galley.size())); + let text_rect = egui::Align2::CENTER_TOP.anchor_rect( + egui::Rect::from_min_size(text_anchor_pos, galley.size()) + ); let bg_rect = text_rect.expand2(egui::vec2(4.0, 2.0)); let highlight = highlights .entity_highlight(label.labeled_instance.entity_path_hash) .index_highlight(label.labeled_instance.instance_key); let fill_color = match highlight.hover { - crate::misc::HoverHighlight::None => match highlight.selection { - SelectionHighlight::None => parent_ui.style().visuals.widgets.inactive.bg_fill, - SelectionHighlight::SiblingSelection => { - parent_ui.style().visuals.widgets.active.bg_fill + crate::misc::HoverHighlight::None => + match highlight.selection { + SelectionHighlight::None => parent_ui.style().visuals.widgets.inactive.bg_fill, + SelectionHighlight::SiblingSelection => { + parent_ui.style().visuals.widgets.active.bg_fill + } + SelectionHighlight::Selection => + parent_ui.style().visuals.widgets.active.bg_fill, } - SelectionHighlight::Selection => parent_ui.style().visuals.widgets.active.bg_fill, - }, crate::misc::HoverHighlight::Hovered => { parent_ui.style().visuals.widgets.hovered.bg_fill } @@ -673,10 +700,12 @@ pub fn create_labels( pub fn outline_config(gui_ctx: &egui::Context) -> OutlineConfig { // Take the exact same colors we have in the ui! - let selection_outline_color = - re_renderer::Rgba::from(gui_ctx.style().visuals.selection.bg_fill); - let hover_outline_color = - re_renderer::Rgba::from(gui_ctx.style().visuals.widgets.hovered.bg_fill); + let selection_outline_color = re_renderer::Rgba::from( + gui_ctx.style().visuals.selection.bg_fill + ); + let hover_outline_color = re_renderer::Rgba::from( + gui_ctx.style().visuals.widgets.hovered.bg_fill + ); OutlineConfig { outline_radius_pixel: (gui_ctx.pixels_per_point() * 1.5).at_least(0.5), @@ -687,7 +716,7 @@ pub fn outline_config(gui_ctx: &egui::Context) -> OutlineConfig { pub fn screenshot_context_menu( _ctx: &ViewerContext<'_>, - response: egui::Response, + response: egui::Response ) -> (egui::Response, Option) { #[cfg(not(target_arch = "wasm32"))] { @@ -726,7 +755,7 @@ pub fn picking( state: &mut ViewSpatialState, scene: &SceneSpatial, space: &EntityPath, - entity_properties: &EntityPropertyMap, + entity_properties: &EntityPropertyMap ) -> egui::Response { crate::profile_function!(); @@ -740,27 +769,24 @@ pub fn picking( space_from_ui, ui_clip_rect, parent_ui.ctx().pixels_per_point(), - &eye, + &eye ); let picking_rect_size = super::scene::PickingContext::UI_INTERACTION_RADIUS * parent_ui.ctx().pixels_per_point(); // Make the picking rect bigger than necessary so we can use it to counter act delays. // (by the time the picking rectangle read back, the cursor may have moved on). - let picking_rect_size = (picking_rect_size * 2.0) - .ceil() - .at_least(8.0) - .at_most(128.0) as u32; + let picking_rect_size = (picking_rect_size * 2.0).ceil().at_least(8.0).at_most(128.0) as u32; let _ = view_builder.schedule_picking_rect( ctx.render_ctx, re_renderer::IntRect::from_middle_and_extent( picking_context.pointer_in_pixel.as_ivec2(), - glam::uvec2(picking_rect_size, picking_rect_size), + glam::uvec2(picking_rect_size, picking_rect_size) ), space_view_id.gpu_readback_id(), (), - ctx.app_options.show_picking_debug_overlay, + ctx.app_options.show_picking_debug_overlay ); let picking_result = picking_context.pick( @@ -768,7 +794,7 @@ pub fn picking( space_view_id.gpu_readback_id(), &state.previous_picking_result, &scene.primitives, - &scene.ui, + &scene.ui ); state.previous_picking_result = Some(picking_result.clone()); @@ -778,8 +804,9 @@ pub fn picking( // TODO(#1818): Depth at pointer only works for depth images so far. let mut depth_at_pointer = None; for hit in &picking_result.hits { - let Some(mut instance_path) = hit.instance_path_hash.resolve(&ctx.log_db.entity_db) - else { continue; }; + let Some(mut instance_path) = hit.instance_path_hash.resolve(&ctx.log_db.entity_db) else { + continue; + }; let ent_properties = entity_properties.get(&instance_path.entity_path); if !ent_properties.interactive { @@ -787,28 +814,27 @@ pub fn picking( } // Special hover ui for images. - let picked_image_with_coords = if hit.hit_type == PickingHitType::TexturedRect - || *ent_properties.backproject_depth.get() + let picked_image_with_coords = if + hit.hit_type == PickingHitType::TexturedRect || + *ent_properties.backproject_depth.get() { query_latest_single::( &ctx.log_db.entity_db, &instance_path.entity_path, - &ctx.current_query(), - ) - .and_then(|tensor| { + &ctx.current_query() + ).and_then(|tensor| { // If we're here because of back-projection, but this wasn't actually a depth image, drop out. // (the back-projection property may be true despite this not being a depth image!) - if hit.hit_type != PickingHitType::TexturedRect - && *ent_properties.backproject_depth.get() - && tensor.meaning != TensorDataMeaning::Depth + if + hit.hit_type != PickingHitType::TexturedRect && + *ent_properties.backproject_depth.get() && + tensor.meaning != TensorDataMeaning::Depth { None } else { tensor.image_height_width_channels().map(|[_, w, _]| { - let coordinates = hit - .instance_path_hash - .instance_key - .to_2d_image_coordinate(w); + let coordinates = + hit.instance_path_hash.instance_key.to_2d_image_coordinate(w); (tensor, coordinates) }) } @@ -821,57 +847,58 @@ pub fn picking( instance_path.instance_key = re_log_types::component_types::InstanceKey::SPLAT; } - hovered_items.push(crate::misc::Item::InstancePath( - Some(space_view_id), - instance_path.clone(), - )); + hovered_items.push( + crate::misc::Item::InstancePath(Some(space_view_id), instance_path.clone()) + ); response = if let Some((tensor, coords)) = picked_image_with_coords { if let Some(meter) = tensor.meter { - if let Some(raw_value) = tensor.get(&[ - picking_context.pointer_in_space2d.y.round() as _, - picking_context.pointer_in_space2d.x.round() as _, - ]) { + if + let Some(raw_value) = tensor.get( + &[ + picking_context.pointer_in_space2d.y.round() as _, + picking_context.pointer_in_space2d.x.round() as _, + ] + ) + { let raw_value = raw_value.as_f64(); - let depth_in_meters = raw_value / meter as f64; + let depth_in_meters = raw_value / (meter as f64); depth_at_pointer = Some(depth_in_meters as f32); } } - response - .on_hover_cursor(egui::CursorIcon::Crosshair) - .on_hover_ui_at_pointer(|ui| { - ui.set_max_width(320.0); - - ui.vertical(|ui| { - ui.label(instance_path.to_string()); - instance_path.data_ui( - ctx, - ui, - crate::ui::UiVerbosity::Small, - &ctx.current_query(), - ); + response.on_hover_cursor(egui::CursorIcon::Crosshair).on_hover_ui_at_pointer(|ui| { + ui.set_max_width(320.0); - if let Some([h, w, ..]) = tensor.image_height_width_channels() { - ui.separator(); - ui.horizontal(|ui| { - let (w, h) = (w as f32, h as f32); - if *state.nav_mode.get() == SpatialNavigationMode::TwoD { - let rect = egui::Rect::from_min_size( - egui::Pos2::ZERO, - egui::vec2(w, h), - ); - data_ui::image::show_zoomed_image_region_area_outline( - ui, - &tensor, - [coords[0] as _, coords[1] as _], - space_from_ui.inverse().transform_rect(rect), - ); - } + ui.vertical(|ui| { + ui.label(instance_path.to_string()); + instance_path.data_ui( + ctx, + ui, + crate::ui::UiVerbosity::Small, + &ctx.current_query() + ); - let tensor_name = instance_path.to_string(); - match ctx.cache.decode.try_decode_tensor_if_necessary(tensor) { - Ok(decoded_tensor) => + if let Some([h, w, ..]) = tensor.image_height_width_channels() { + ui.separator(); + ui.horizontal(|ui| { + let (w, h) = (w as f32, h as f32); + if *state.nav_mode.get() == SpatialNavigationMode::TwoD { + let rect = egui::Rect::from_min_size( + egui::Pos2::ZERO, + egui::vec2(w, h) + ); + data_ui::image::show_zoomed_image_region_area_outline( + ui, + &tensor, + [coords[0] as _, coords[1] as _], + space_from_ui.inverse().transform_rect(rect) + ); + } + + let tensor_name = instance_path.to_string(); + match ctx.cache.decode.try_decode_tensor_if_necessary(tensor) { + Ok(decoded_tensor) => data_ui::image::show_zoomed_image_region( ctx.render_ctx, ui, @@ -880,17 +907,17 @@ pub fn picking( &scene.annotation_map.find(&instance_path.entity_path), decoded_tensor.meter, &tensor_name, - [coords[0] as _, coords[1] as _], + [coords[0] as _, coords[1] as _] ), Err(err) => re_log::warn_once!( "Encountered problem decoding tensor at path {tensor_name}: {err}" ), - } - }); - } - }); - }) + } + }); + } + }); + }) } else { // Hover ui for everything else response.on_hover_ui_at_pointer(|ui| { @@ -899,7 +926,7 @@ pub fn picking( ctx, ui, crate::ui::UiVerbosity::Reduced, - &ctx.current_query(), + &ctx.current_query() ); }) }; @@ -909,20 +936,20 @@ pub fn picking( ctx.set_hovered(hovered_items.into_iter()); let hovered_space = match state.nav_mode.get() { - SpatialNavigationMode::TwoD => HoveredSpace::TwoD { - space_2d: space.clone(), - pos: picking_context - .pointer_in_space2d - .extend(depth_at_pointer.unwrap_or(f32::INFINITY)), - }, + SpatialNavigationMode::TwoD => + HoveredSpace::TwoD { + space_2d: space.clone(), + pos: picking_context.pointer_in_space2d.extend( + depth_at_pointer.unwrap_or(f32::INFINITY) + ), + }, SpatialNavigationMode::ThreeD => { let hovered_point = picking_result.space_position(); HoveredSpace::ThreeD { space_3d: space.clone(), pos: hovered_point, tracked_space_camera: state.state_3d.tracked_camera.clone(), - point_in_space_cameras: scene - .space_cameras + point_in_space_cameras: scene.space_cameras .iter() .map(|cam| { ( diff --git a/crates/re_viewer/src/ui/view_spatial/ui_3d.rs b/crates/re_viewer/src/ui/view_spatial/ui_3d.rs index dfcbeb6b6d40..313506f9b022 100644 --- a/crates/re_viewer/src/ui/view_spatial/ui_3d.rs +++ b/crates/re_viewer/src/ui/view_spatial/ui_3d.rs @@ -85,7 +85,7 @@ impl Default for View3DState { impl View3DState { pub fn reset_camera(&mut self, scene_bbox_accum: &BoundingBox) { - self.interpolate_to_orbit_eye(default_eye(scene_bbox_accum, &self.space_specs)); + self.interpolate_to_orbit_eye(default_eye(scene_bbox_accum, &self.space_specs, &[])); self.tracked_camera = None; self.camera_before_tracked_camera = None; } @@ -116,7 +116,7 @@ impl View3DState { let orbit_camera = self .orbit_eye - .get_or_insert_with(|| default_eye(scene_bbox_accum, &self.space_specs)); + .get_or_insert_with(|| default_eye(scene_bbox_accum, &self.space_specs, space_cameras)); if self.spin { orbit_camera.rotate(egui::vec2( @@ -209,7 +209,7 @@ impl EyeInterpolation { } } -#[derive(Clone, Default)] +#[derive(Clone, Default, Debug)] pub struct SpaceSpecs { pub up: Option, pub right: Option, @@ -289,6 +289,7 @@ pub fn view_3d( state .state_3d .update_eye(&response, &state.scene_bbox_accum, &scene.space_cameras); + let did_interact_with_eye = orbit_eye.update(&response, orbit_eye_drag_threshold); let orbit_eye = *orbit_eye; @@ -624,7 +625,50 @@ fn add_picking_ray( } } -fn default_eye(scene_bbox: &macaw::BoundingBox, space_specs: &SpaceSpecs) -> OrbitEye { +/// Look down and forward on the space camera. +fn focus_on_space_camera( + scene_bbox: &macaw::BoundingBox, + space_specs: &SpaceSpecs, + camera: &SpaceCamera3D, +) -> OrbitEye { + let Some(up) = camera.view_coordinates.up() else { + return default_eye(scene_bbox, space_specs, &[]); + }; + let Some(fwd) = camera.view_coordinates.forward() else { + return default_eye(scene_bbox, space_specs, &[]); + }; + let [x, y, z] = up.as_vec3(); + let up = vec3(x, y, z); + let [x, y, z] = fwd.as_vec3(); + let fwd = vec3(x, y, z); + let down = -up; + let look_dir = camera + .world_from_cam() + .transform_vector3(fwd + 0.5 * down) + .normalize(); + let look_up = camera.world_from_cam().transform_vector3(up); + let mut radius = camera.picture_plane_distance.unwrap_or(1.0); + if !radius.is_finite() || radius == 0.0 { + radius = 1.0; + } + let center = camera.position(); + let eye_pos = center - radius * look_dir; + OrbitEye::new( + center, + radius, + Quat::from_affine3(&Affine3A::look_at_rh(eye_pos, center, look_up).inverse()), + Vec3::ZERO, + ) +} + +fn default_eye( + scene_bbox: &macaw::BoundingBox, + space_specs: &SpaceSpecs, + space_cameras: &[SpaceCamera3D], +) -> OrbitEye { + if let Some(space_camera) = space_cameras.get(0) { + return focus_on_space_camera(scene_bbox, space_specs, space_camera); + } let mut center = scene_bbox.center(); if !center.is_finite() { center = Vec3::ZERO; @@ -660,3 +704,66 @@ fn default_eye(scene_bbox: &macaw::BoundingBox, space_specs: &SpaceSpecs) -> Orb space_specs.up.unwrap_or(Vec3::ZERO), ) } + +// fn default_eye( +// scene_bbox: &macaw::BoundingBox, +// space_specs: &SpaceSpecs, +// space_cameras: &[SpaceCamera3D], +// ) -> OrbitEye { +// println!("Space cameras: {:?}", space_cameras[0].ent_path); + +// if let Some(space_camera) = space_cameras.get(0) { +// return focus_on_space_camera(scene_bbox, space_specs, space_camera); +// } +// let mut center = scene_bbox.center(); +// if !center.is_finite() { +// center = Vec3::ZERO; +// } + +// let mut radius = 2.0 * scene_bbox.half_size().length(); +// if !radius.is_finite() || radius == 0.0 { +// radius = 1.0; +// } + +// let space_camera = space_cameras.get(0); +// let mut look_up = space_specs.up.unwrap_or(Vec3::Z); + +// let look_dir = if let Some(right) = space_specs.right { +// // Make sure right is to the right, and up is up: +// let fwd = look_up.cross(right); +// 0.75 * fwd + 0.25 * right - 0.25 * look_up +// } else if let Some(space_camera) = space_camera { +// // Look down and forward on the first cam in this space +// let down = -match space_camera.view_coordinates.up() { +// Some(up) => { +// let [x, y, z] = up.as_vec3(); +// let as_vec = vec3(x, y, z); +// look_up = space_camera.world_from_cam().transform_vector3(as_vec); +// as_vec +// } +// None => Vec3::X, +// }; +// let forward = match space_camera.view_coordinates.forward() { +// Some(forward) => { +// let [x, y, z] = forward.as_vec3(); +// vec3(x, y, z) +// } +// None => Vec3::Y, +// }; +// space_camera +// .world_from_cam() +// .transform_vector3(forward + 0.5 * down) +// } else { +// // Look along the cardinal directions: +// vec3(1.0, 1.0, 1.0) +// }; +// let look_dir = look_dir.normalize(); +// let eye_pos = center - 1.0 * look_dir; +// let space_camera = space_camera.unwrap(); +// OrbitEye::new( +// center, +// radius, +// Quat::from_affine3(&Affine3A::look_at_rh(eye_pos, center, look_up).inverse()), +// space_specs.up.unwrap_or(Vec3::ZERO), +// ) +// } diff --git a/crates/re_viewer/src/ui/view_tensor/tensor_slice_to_gpu.rs b/crates/re_viewer/src/ui/view_tensor/tensor_slice_to_gpu.rs index 887471a9e519..109eb090c4e0 100644 --- a/crates/re_viewer/src/ui/view_tensor/tensor_slice_to_gpu.rs +++ b/crates/re_viewer/src/ui/view_tensor/tensor_slice_to_gpu.rs @@ -1,4 +1,7 @@ -use re_log_types::{component_types::TensorCastError, DecodedTensor, TensorDataType}; +use re_log_types::{ + component_types::{TensorCastError}, + DecodedTensor, TensorDataType, +}; use re_renderer::{ renderer::ColormappedTexture, resource_managers::{GpuTexture2D, Texture2DCreationDesc, TextureManager2DError}, @@ -47,6 +50,7 @@ pub fn colormapped_texture( color_mapper: Some(re_renderer::renderer::ColorMapper::Function( color_mapping.map, )), + encoding: (&tensor.data).into(), }) } diff --git a/crates/re_viewer/src/ui/view_tensor/ui.rs b/crates/re_viewer/src/ui/view_tensor/ui.rs index b1c6066df1fa..2d3d9282384d 100644 --- a/crates/re_viewer/src/ui/view_tensor/ui.rs +++ b/crates/re_viewer/src/ui/view_tensor/ui.rs @@ -48,7 +48,7 @@ impl ViewTensorState { pub fn create(tensor: &DecodedTensor) -> ViewTensorState { Self { slice: SliceSelection { - dim_mapping: DimensionMapping::create(tensor.shape()), + dim_mapping: DimensionMapping::create(tensor.real_shape().as_slice()), selector_values: Default::default(), }, color_mapping: ColorMapping::default(), @@ -86,8 +86,8 @@ impl ViewTensorState { ui.separator(); ui.strong("Dimension Mapping"); - dimension_mapping_ui(ctx.re_ui, ui, &mut self.slice.dim_mapping, tensor.shape()); - let default_mapping = DimensionMapping::create(tensor.shape()); + dimension_mapping_ui(ctx.re_ui, ui, &mut self.slice.dim_mapping, tensor.real_shape().as_slice()); + let default_mapping = DimensionMapping::create(tensor.real_shape().as_slice()); if ui .add_enabled( self.slice.dim_mapping != default_mapping, @@ -97,7 +97,7 @@ impl ViewTensorState { .on_hover_text("Reset dimension mapping to the default.") .clicked() { - self.slice.dim_mapping = DimensionMapping::create(tensor.shape()); + self.slice.dim_mapping = DimensionMapping::create(tensor.real_shape().as_slice()); } } } @@ -113,7 +113,7 @@ pub(crate) fn view_tensor( state.tensor = Some(tensor.clone()); if !state.slice.dim_mapping.is_valid(tensor.num_dim()) { - state.slice.dim_mapping = DimensionMapping::create(tensor.shape()); + state.slice.dim_mapping = DimensionMapping::create(&tensor.real_shape().as_slice()); } let default_item_spacing = ui.spacing_mut().item_spacing; @@ -189,7 +189,8 @@ fn paint_tensor_slice( tensor_stats, state, )?; - let [width, height] = colormapped_texture.texture.width_height(); + + let [width, height] = colormapped_texture.width_height(); let img_size = egui::vec2(width as _, height as _); let img_size = Vec2::max(Vec2::splat(1.0), img_size); // better safe than sorry @@ -208,6 +209,7 @@ fn paint_tensor_slice( let (response, painter) = ui.allocate_painter(desired_size, egui::Sense::hover()); let rect = response.rect; + let image_rect = egui::Rect::from_min_max(rect.min, rect.max); let debug_name = "tensor_slice"; @@ -329,6 +331,7 @@ fn paint_colormap_gradient( range: [0.0, 1.0], gamma: 1.0, color_mapper: Some(re_renderer::renderer::ColorMapper::Function(colormap)), + encoding: None, }; let debug_name = format!("colormap_{colormap}"); @@ -639,7 +642,7 @@ fn selectors_ui(ui: &mut egui::Ui, state: &mut ViewTensorState, tensor: &Tensor) continue; } - let dim = &tensor.shape()[selector.dim_idx]; + let dim = tensor.real_shape().get(selector.dim_idx).unwrap().to_owned(); let size = dim.size; let selector_value = state diff --git a/crates/re_viewer/src/ui/viewport.rs b/crates/re_viewer/src/ui/viewport.rs index 1c2aa7691a94..4dfa37fe1d6a 100644 --- a/crates/re_viewer/src/ui/viewport.rs +++ b/crates/re_viewer/src/ui/viewport.rs @@ -5,9 +5,14 @@ use ahash::HashMap; use itertools::Itertools as _; -use re_data_store::EntityPath; +use re_arrow_store::LatestAtQuery; +use re_data_store::{query_latest_single, EntityPath}; +use re_log_types::{ + component_types::TensorDataMeaning, EntityPathPart, Tensor, Time, TimeInt, Timeline, +}; use crate::{ + depthai::depthai, misc::{space_info::SpaceInfoCollection, Item, SpaceViewHighlights, ViewerContext}, ui::{space_view_heuristics::default_created_space_views, stats_panel::StatsPanel}, }; @@ -16,7 +21,8 @@ use super::{ device_settings_panel::DeviceSettingsPanel, selection_panel::SelectionPanel, space_view_entity_picker::SpaceViewEntityPicker, space_view_heuristics::all_possible_space_views, stats_panel::StatsPanelState, - view_category::ViewCategory, SpaceView, SpaceViewId, SpaceViewKind, + view_category::ViewCategory, view_spatial::SpatialNavigationMode, SpaceView, SpaceViewId, + SpaceViewKind, }; // ---------------------------------------------------------------------------- @@ -64,7 +70,9 @@ impl Viewport { crate::profile_function!(); let mut blueprint = Self::default(); - for space_view in default_created_space_views(ctx, spaces_info) { + for space_view in all_possible_space_views(ctx, spaces_info) + { + println!("All possible: {:?}", space_view.space_path); blueprint.add_space_view(space_view); } blueprint @@ -115,8 +123,6 @@ impl Viewport { ) { crate::profile_function!(); - let entities_to_remove = ctx.depthai_state.get_entities_to_remove(); - egui::ScrollArea::vertical() .auto_shrink([false; 2]) .show(ui, |ui| { @@ -127,9 +133,7 @@ impl Viewport { // as they didn't create the blueprint by logging the data for space_view in all_possible_space_views(ctx, spaces_info) .into_iter() - .filter(|sv| { - sv.is_depthai_spaceview && !entities_to_remove.contains(&sv.space_path) - }) + .filter(|sv| sv.is_depthai_spaceview) { self.available_space_view_row_ui(ctx, ui, space_view); } @@ -229,54 +233,66 @@ impl Viewport { self.space_view_entity_window = Some(SpaceViewEntityPicker { space_view_id }); } + /// Gets the space views that don't actually have an instance associated with them + fn get_space_views_to_delete( + &mut self, + ctx: &mut ViewerContext<'_>, + spaces_info: &SpaceInfoCollection, + ) -> Vec { + let mut space_views_to_delete = Vec::new(); + let binding = all_possible_space_views(ctx, spaces_info); + let possible_space_views = binding + .iter() + .cloned() + .filter(|sv| sv.is_depthai_spaceview) + .collect_vec(); + for space_view in &possible_space_views { + let mut found = false; + for existing_view in self.space_views.values() { + if existing_view.space_path == space_view.space_path { + found = true; + break; + } + } + if !found { + space_views_to_delete.push(space_view.id); + } + } + for space_view in self.space_views.values() { + if !possible_space_views + .iter() + .map(|sv| sv.space_path.clone()) + .contains(&space_view.space_path) + { + self.has_been_user_edited + .insert(space_view.space_path.clone(), false); + space_views_to_delete.push(space_view.id); + } + } + space_views_to_delete + } + pub fn on_frame_start( &mut self, ctx: &mut ViewerContext<'_>, spaces_info: &SpaceInfoCollection, ) { crate::profile_function!(); - let mut space_views_to_remove = Vec::new(); - - // Get all the entity paths that aren't logged anymore - let entities_to_remove = ctx.depthai_state.get_entities_to_remove(); - // First clear the has_been_user_edited entry, so if the entity path is a space path and it reappeaars later, - // it will get added back into the viewport - for ep in &entities_to_remove { - self.has_been_user_edited.insert(ep.clone(), false); - } + + // for space_view_id in &self.get_space_views_to_delete(ctx, spaces_info) { + // self.remove(space_view_id); + // } + self.stats_panel_state.update(ctx); - // Remove all entities that are marked for removal from the space view. - // Remove the space view if it has no entities left for space_view in self.space_views.values_mut() { - if let Some(group) = space_view - .data_blueprint - .group(space_view.data_blueprint.root_handle()) - { - for ep in entities_to_remove.iter() { - space_view.data_blueprint.remove_entity(ep); - } - - if space_view.data_blueprint.entity_paths().is_empty() { - space_views_to_remove.push(space_view.id); - self.has_been_user_edited - .insert(space_view.space_path.clone(), false); - continue; - } - } space_view.on_frame_start(ctx, spaces_info); } - for id in &space_views_to_remove { - if self.space_views.get(id).is_some() { - self.remove(id); - } - } for space_view_candidate in default_created_space_views(ctx, spaces_info) { if !self .has_been_user_edited .get(&space_view_candidate.space_path) .unwrap_or(&false) - && !entities_to_remove.contains(&space_view_candidate.space_path) && self.should_auto_add_space_view(&space_view_candidate) { self.add_space_view(space_view_candidate); @@ -337,24 +353,12 @@ impl Viewport { .trees .entry(visible_space_views.clone()) .or_insert_with(|| { - // TODO(filip): Continue working on this smart layout updater - // if let Some(previous_frame_tree) = &self.previous_frame_tree { - // let mut tree = previous_frame_tree.clone(); - // super::auto_layout::update_tree( - // &mut tree, - // &visible_space_views, - // &self.space_views, - // self.maximized.is_some(), - // ); - // tree - // } else { super::auto_layout::default_tree_from_space_views( ui.available_size(), &visible_space_views, &self.space_views, self.maximized.is_some(), ) - // } }) .clone(); self.previous_frame_tree = Some(tree.clone()); @@ -414,6 +418,42 @@ impl Viewport { { match space_view_kind { SpaceViewKind::Data | SpaceViewKind::Stats => { + let mut entities_to_skip = Vec::new(); + if let Some(space_view) = self.space_views.get_mut(&space_view_id) { + let mut is3d = false; + let mut has_depth = false; + let mut image_to_hide = None; + space_view.data_blueprint.visit_group_entities_recursively( + space_view.data_blueprint.root_handle(), + &mut (|entity_path| { + if is3d && has_depth { + if let Some(last_part) = entity_path.iter().last() { + if last_part == &EntityPathPart::from("Image") { + image_to_hide = Some(entity_path.clone()); + } + } + } + is3d |= entity_path.len() == 2 + && entity_path.iter().last().unwrap() + == &EntityPathPart::from("transform"); + if let Some(last_part) = entity_path.iter().last() { + has_depth |= last_part == &EntityPathPart::from("Depth"); + } + }), + ); + if let Some(image_to_hide) = image_to_hide { + entities_to_skip.push(image_to_hide.clone()); + let mut props = space_view + .data_blueprint + .data_blueprints_individual() + .get(&image_to_hide); + props.visible = false; + space_view + .data_blueprint + .data_blueprints_individual() + .set(image_to_hide.clone(), props); + } + } space_view_options_ui( ctx, ui, @@ -421,6 +461,7 @@ impl Viewport { tab_bar_rect, space_view_id, num_space_views, + entities_to_skip.as_slice(), ); } SpaceViewKind::Selection => { @@ -677,6 +718,7 @@ fn space_view_options_ui( tab_bar_rect: egui::Rect, space_view_id: SpaceViewId, num_space_views: usize, + entities_to_skip: &[EntityPath], ) { let tab_bar_rect = tab_bar_rect.shrink2(egui::vec2(4.0, 0.0)); // Add some side margin outside the frame @@ -718,17 +760,18 @@ fn space_view_options_ui( ui.style_mut().wrap = Some(false); let entities = space_view.data_blueprint.entity_paths().clone(); let entities = entities.iter().filter(|ep| { - let eps_to_skip = vec![ - EntityPath::from("color/camera/rgb"), - EntityPath::from("color/camera"), - EntityPath::from("mono/camera"), - EntityPath::from("mono/camera/left_mono"), - EntityPath::from("mono/camera/right_mono"), - ]; - !eps_to_skip.contains(ep) + if let Some(last_part) = ep.iter().last() { + last_part != &EntityPathPart::from("transform") + && (last_part != &EntityPathPart::from("mono_cam") + || last_part != &EntityPathPart::from("color_cam")) + } else { + false + } }); for entity_path in entities { - // if matches!(entity_path, EntityPath::from("color")) + if entities_to_skip.contains(entity_path) { + continue; + } ui.horizontal(|ui| { let mut properties = space_view .data_blueprint diff --git a/crates/re_viewer/src/viewer_analytics.rs b/crates/re_viewer/src/viewer_analytics.rs index 40a6e40cc73b..d97414df5ce0 100644 --- a/crates/re_viewer/src/viewer_analytics.rs +++ b/crates/re_viewer/src/viewer_analytics.rs @@ -108,7 +108,7 @@ impl ViewerAnalytics { } _ => {} } - if let AppEnvironment::PythonSdk(version, _) = app_env { + if let AppEnvironment::PythonSdk(version, ..) = app_env { event = event.with_prop("python_version", version.to_string()); } @@ -151,7 +151,7 @@ impl ViewerAnalytics { let recording_source = match &rec_info.recording_source { RecordingSource::Unknown => "unknown".to_owned(), - RecordingSource::PythonSdk(_version, _) => "python_sdk".to_owned(), + RecordingSource::PythonSdk(_version, ..) => "python_sdk".to_owned(), RecordingSource::RustSdk { .. } => "rust_sdk".to_owned(), RecordingSource::Other(other) => other.clone(), }; @@ -170,7 +170,7 @@ impl ViewerAnalytics { self.register("llvm_version", llvm_version.to_string()); self.deregister("python_version"); // can't be both! } - if let RecordingSource::PythonSdk(version, _) = &rec_info.recording_source { + if let RecordingSource::PythonSdk(version, ..) = &rec_info.recording_source { self.register("python_version", version.to_string()); self.deregister("rust_version"); // can't be both! self.deregister("llvm_version"); // can't be both! diff --git a/crates/rerun/src/main.rs b/crates/rerun/src/main.rs index 0a80e45e384c..6b82e0f2f535 100644 --- a/crates/rerun/src/main.rs +++ b/crates/rerun/src/main.rs @@ -6,7 +6,6 @@ static GLOBAL: AccountingAllocator = #[tokio::main] async fn main() -> anyhow::Result { - println!("Running from CLI!!!"); re_log::setup_native_logging(); let build_info = re_build_info::build_info!(); depthai_viewer::run( diff --git a/crates/rerun/src/run.rs b/crates/rerun/src/run.rs index 4ee3ee57be48..6a4da472a5db 100644 --- a/crates/rerun/src/run.rs +++ b/crates/rerun/src/run.rs @@ -156,6 +156,7 @@ enum AnalyticsCommands { } type SysExePath = String; +type VenvSitePackages = String; /// Where are we calling [`run`] from? #[derive(Clone, Debug, PartialEq, Eq)] @@ -164,7 +165,7 @@ pub enum CallSource { Cli, /// Called from the Rerun Python SDK. - Python(PythonVersion, SysExePath), + Python(PythonVersion, SysExePath, VenvSitePackages), } #[cfg(feature = "native_viewer")] @@ -179,8 +180,8 @@ impl CallSource { rustc_version: env!("RE_BUILD_RUSTC_VERSION").into(), llvm_version: env!("RE_BUILD_LLVM_VERSION").into(), }, - CallSource::Python(python_version, sys_exe) => { - re_viewer::AppEnvironment::PythonSdk(python_version.clone(), sys_exe.clone()) + CallSource::Python(python_version, sys_exe, venv_site) => { + re_viewer::AppEnvironment::PythonSdk(python_version.clone(), sys_exe.clone(), venv_site.clone()) } } } diff --git a/rerun_py/README.md b/rerun_py/README.md index 91cdfc96ff53..0f1a403cda16 100644 --- a/rerun_py/README.md +++ b/rerun_py/README.md @@ -2,11 +2,10 @@ ![Screenshot from 2023-05-20 00-22-36](https://github.com/luxonis/depthai-viewer/assets/59307111/605bdf38-1bb4-416d-9643-0da1a511d58e) - ## Install ```sh -python3 -m pip install depthai-viewer +python3 -m pip install depthai-viewer --extra-index-url https://test.pypi.org/simple -U ``` ## Run diff --git a/rerun_py/depthai_viewer/__init__.py b/rerun_py/depthai_viewer/__init__.py index 363e19e458ad..aaa3020afa9a 100644 --- a/rerun_py/depthai_viewer/__init__.py +++ b/rerun_py/depthai_viewer/__init__.py @@ -6,14 +6,29 @@ import depthai_viewer_bindings as bindings # type: ignore[attr-defined] from depthai_viewer import _backend +from depthai_viewer.components.tensor import ImageEncoding from depthai_viewer.log import log_cleared -from depthai_viewer.log.annotation import AnnotationInfo, ClassDescription, log_annotation_context +from depthai_viewer.log.annotation import ( + AnnotationInfo, + ClassDescription, + log_annotation_context, +) from depthai_viewer.log.arrow import log_arrow from depthai_viewer.log.bounding_box import log_obb from depthai_viewer.log.camera import log_pinhole from depthai_viewer.log.extension_components import log_extension_components -from depthai_viewer.log.file import ImageFormat, MeshFormat, log_image_file, log_mesh_file -from depthai_viewer.log.image import log_depth_image, log_image, log_segmentation_image +from depthai_viewer.log.file import ( + ImageFormat, + MeshFormat, + log_image_file, + log_mesh_file, +) +from depthai_viewer.log.image import ( + log_depth_image, + log_encoded_image, + log_image, + log_segmentation_image, +) from depthai_viewer.log.imu import log_imu from depthai_viewer.log.lines import log_line_segments, log_line_strip, log_path from depthai_viewer.log.mesh import log_mesh, log_meshes @@ -23,7 +38,11 @@ from depthai_viewer.log.scalar import log_scalar from depthai_viewer.log.tensor import log_tensor from depthai_viewer.log.text import LoggingHandler, LogLevel, log_text_entry -from depthai_viewer.log.transform import log_rigid3, log_unknown_transform, log_view_coordinates +from depthai_viewer.log.transform import ( + log_rigid3, + log_unknown_transform, + log_view_coordinates, +) from depthai_viewer.log.xlink_stats import log_xlink_stats from depthai_viewer.recording import MemoryRecording from depthai_viewer.script_helpers import script_add_args, script_setup, script_teardown @@ -43,6 +62,7 @@ "log_extension_components", "log_image_file", "log_image", + "log_encoded_image", "log_pipeline_graph", "log_line_segments", "log_line_strip", @@ -73,6 +93,8 @@ "log_imu", "log_xlink_stats", "_backend", + "rerun_shutdown", + "ImageEncoding", ] @@ -376,6 +398,18 @@ def serve(open_browser: bool = True, web_port: Optional[int] = None, ws_port: Op bindings.serve(open_browser, web_port, ws_port) +def version() -> str: + """ + Get the version of the Rerun SDK. + + Returns + ------- + str + The version of the Rerun SDK. + """ + return str(bindings.version()) + + def start_web_viewer_server(port: int = 0) -> None: """ Start an HTTP server that hosts the rerun web viewer. diff --git a/rerun_py/depthai_viewer/__main__.py b/rerun_py/depthai_viewer/__main__.py index c454feb6d507..ebd74f04c7e7 100644 --- a/rerun_py/depthai_viewer/__main__.py +++ b/rerun_py/depthai_viewer/__main__.py @@ -1,14 +1,101 @@ -"""See `python3 -m rerun --help`.""" +"""See `python3 -m depthai-viewer --help`.""" +import os +import shutil +import signal +import subprocess import sys +import traceback -from depthai_viewer import bindings, unregister_shutdown # type: ignore[attr-defined] +from depthai_viewer import bindings, unregister_shutdown +from depthai_viewer import version as depthai_viewer_version # type: ignore[attr-defined] + +script_path = os.path.dirname(os.path.abspath(__file__)) +venv_dir = os.path.join(script_path, "venv-" + depthai_viewer_version()) + + +def delete_partially_created_venv(path: str) -> None: + try: + if os.path.exists(path): + print(f"Deleting partially created virtual environment: {path}") + shutil.rmtree(path) + except Exception as e: + print(f"Error occurred while attempting to delete the virtual environment: {e}") + print(traceback.format_exc()) + + +def sigint_mid_venv_install_handler(signum, frame) -> None: # type: ignore[no-untyped-def] + delete_partially_created_venv(venv_dir) + + +def create_venv_and_install_dependencies() -> str: + py_executable = ( + os.path.join(venv_dir, "Scripts", "python") + if sys.platform == "win32" + else os.path.join(venv_dir, "bin", "python") + ) + try: + original_sigint_handler = signal.getsignal(signal.SIGINT) + # Create venv if it doesn't exist + if not os.path.exists(venv_dir): + # In case of Ctrl+C during the venv creation, delete the partially created venv + signal.signal(signal.SIGINT, sigint_mid_venv_install_handler) + print("Creating virtual environment...") + subprocess.run([sys.executable, "-m", "venv", venv_dir], check=True) + + # Install dependencies + subprocess.run([py_executable, "-m", "pip", "install", "-U", "pip"], check=True) + # Install depthai_sdk first, then override depthai version with the one from requirements.txt + subprocess.run( + [ + py_executable, + "-m", + "pip", + "install", + "depthai-sdk==1.11.0" + # "git+https://github.com/luxonis/depthai@refactor_xout#subdirectory=depthai_sdk", + ], + check=True, + ) + subprocess.run( + [py_executable, "-m", "pip", "install", "-r", f"{script_path}/requirements.txt"], + check=True, + ) + + venv_packages_dir = subprocess.run( + [py_executable, "-c", "import sysconfig; print(sysconfig.get_paths()['purelib'], end='')"], + capture_output=True, + text=True, + check=True, + ).stdout.strip() + + # Delete old requirements + for item in os.listdir(os.path.join(venv_dir, "..")): + if not item.startswith("venv-"): + continue + if item == os.path.basename(venv_dir): + continue + print(f"Removing old venv: {item}") + shutil.rmtree(os.path.join(venv_dir, "..", item)) + + # Restore original SIGINT handler + signal.signal(signal.SIGINT, original_sigint_handler) + # Return Python executable within the venv + return os.path.normpath(venv_packages_dir) + + except Exception as e: + print(f"Error occurred during the creation of the virtual environment or installation of dependencies: {e}") + print(traceback.format_exc()) + delete_partially_created_venv(venv_dir) + exit(1) def main() -> None: - # We don't need to call shutdown in this case. Rust should be handling everything + venv_site_packages = create_venv_and_install_dependencies() + python_exe = sys.executable + # Call the bindings.main using the Python executable in the venv unregister_shutdown() - exit(bindings.main(sys.argv, sys.executable)) + sys.exit(bindings.main(sys.argv, python_exe, venv_site_packages)) if __name__ == "__main__": diff --git a/rerun_py/depthai_viewer/_backend/classification_labels.py b/rerun_py/depthai_viewer/_backend/classification_labels.py index 7d8f6b7e1958..b88ff33fd251 100644 --- a/rerun_py/depthai_viewer/_backend/classification_labels.py +++ b/rerun_py/depthai_viewer/_backend/classification_labels.py @@ -1,4 +1,4 @@ -MOBILENET_LABELS = [ +MOBILENET_SSD = [ "background", "aeroplane", "bicycle", @@ -21,87 +21,3 @@ "train", "tvmonitor", ] - - -YOLO_TINY_LABELS = [ - "person", - "bicycle", - "car", - "motorcycle", - "airplane", - "bus", - "train", - "truck", - "boat", - "trafficlight", - "firehydrant", - "stopsign", - "parkingmeter", - "bench", - "bird", - "cat", - "dog", - "horse", - "sheep", - "cow", - "elephant", - "bear", - "zebra", - "giraffe", - "backpack", - "umbrella", - "handbag", - "tie", - "suitcase", - "frisbee", - "skis", - "snowboard", - "sportsball", - "kite", - "baseballbat", - "baseballglove", - "skateboard", - "surfboard", - "tennisracket", - "bottle", - "wineglass", - "cup", - "fork", - "knife", - "spoon", - "bowl", - "banana", - "apple", - "sandwich", - "orange", - "broccoli", - "carrot", - "hotdog", - "pizza", - "donut", - "cake", - "chair", - "couch", - "pottedplant", - "bed", - "diningtable", - "toilet", - "tv", - "laptop", - "mouse", - "remote", - "keyboard", - "cellphone", - "microwave", - "oven", - "toaster", - "sink", - "refrigerator", - "book", - "clock", - "vase", - "scissors", - "teddybear", - "hairdrier", - "toothbrush", -] diff --git a/rerun_py/depthai_viewer/_backend/config_api.py b/rerun_py/depthai_viewer/_backend/config_api.py index e19b09fc91c7..09630e56a70e 100644 --- a/rerun_py/depthai_viewer/_backend/config_api.py +++ b/rerun_py/depthai_viewer/_backend/config_api.py @@ -1,19 +1,27 @@ import asyncio +import atexit import json from enum import Enum from multiprocessing import Queue from queue import Empty as QueueEmptyException from signal import SIGINT, signal -from typing import Any, Dict, Optional, Tuple +from typing import Any, Dict import depthai as dai import websockets from websockets.server import WebSocketServerProtocol from depthai_viewer._backend.device_configuration import PipelineConfiguration -from depthai_viewer._backend.store import Action +from depthai_viewer._backend.messages import ( + DevicesMessage, + ErrorMessage, + InfoMessage, + Message, + MessageType, +) from depthai_viewer._backend.topic import Topic +atexit.register(lambda: print("Exiting...")) signal(SIGINT, lambda *args, **kwargs: exit(0)) # Definitions for linting @@ -27,38 +35,42 @@ send_message_queue: Queue # type: ignore[type-arg] -def dispatch_action(action: Action, **kwargs) -> Tuple[bool, Dict[str, Any]]: # type: ignore[no-untyped-def] +class Action(Enum): + UPDATE_PIPELINE = 0 + SELECT_DEVICE = 1 + GET_SUBSCRIPTIONS = 2 + SET_SUBSCRIPTIONS = 3 + GET_PIPELINE = 4 + RESET = 5 # When anything bad happens, a reset occurs (like closing ws connection) + GET_AVAILABLE_DEVICES = 6 + + +def dispatch_action(action: Action, **kwargs) -> Message: # type: ignore[no-untyped-def] """ - Dispatches an action that will be executed by store.py. + Dispatches an action that will be executed by main.py. - Returns: (success: bool, result: Dict[str, Any]). + Returns: Message that will be sent to the frontend """ dispatch_action_queue.put((action, kwargs)) return result_queue.get() # type: ignore[no-any-return] -class MessageType: - SUBSCRIPTIONS = "Subscriptions" # Get or set subscriptions - PIPELINE = "Pipeline" # Get or Set pipeline - DEVICES = "Devices" # Get device list - DEVICE = "Device" # Get or set device - ERROR = "Error" # Error message - - -class ErrorAction(Enum): - NONE = "None" - FULL_RESET = "FullReset" - - def __str__(self) -> str: - return self.value - - -def error(message: str, action: ErrorAction) -> str: - """Create an error message to send via ws.""" - return json.dumps({"type": MessageType.ERROR, "data": {"action": str(action), "message": message}}) +async def send_message(websocket: WebSocketServerProtocol, message: Message) -> None: + """Sends a message to the frontend without the frontend sending a message first.""" + if isinstance(message, InfoMessage) and not message.message: + return + await websocket.send(message.json()) async def ws_api(websocket: WebSocketServerProtocol) -> None: + """ + Receives messages from the frontend, dispatches them to the backend and sends the result back to the frontend. + + Received Messages include the wanted state of the backend, + e.g.: A DeviceMessage received from the frontend includes the device the user wants to select. + The backend then tries to select the device and sends back a DeviceMessage + with the selected device (selected device can be None if the selection failed). + """ while True: raw_message = None try: @@ -66,10 +78,9 @@ async def ws_api(websocket: WebSocketServerProtocol) -> None: except asyncio.TimeoutError: pass except websockets.exceptions.ConnectionClosed: - success, _ = dispatch_action(Action.RESET) # type: ignore[assignment] - if success: - return - raise Exception("Couldn't reset backend after websocket disconnect!") + if isinstance(dispatch_action(Action.RESET), ErrorMessage): + raise Exception("Couldn't reset backend after websocket disconnect!") + return if raw_message: try: @@ -82,86 +93,51 @@ async def ws_api(websocket: WebSocketServerProtocol) -> None: print("Missing message type") continue print("Got message: ", message) + if message_type == MessageType.SUBSCRIPTIONS: data = message.get("data", {}) subscriptions = [Topic.create(topic_name) for topic_name in data.get(MessageType.SUBSCRIPTIONS, [])] - dispatch_action(Action.SET_SUBSCRIPTIONS, subscriptions=subscriptions) - print("Subscriptions: ", subscriptions) - active_subscriptions = [ - topic.name # type: ignore[attr-defined] - for topic in dispatch_action(Action.GET_SUBSCRIPTIONS) - if topic - ] - await websocket.send(json.dumps({"type": MessageType.SUBSCRIPTIONS, "data": active_subscriptions})) + await send_message(websocket, dispatch_action(Action.SET_SUBSCRIPTIONS, subscriptions=subscriptions)) + elif message_type == MessageType.PIPELINE: data = message.get("data", {}) pipeline_config_json, runtime_only = data.get("Pipeline", ({}, False)) pipeline_config = PipelineConfiguration(**pipeline_config_json) print("Pipeline config: ", pipeline_config) - success, result = dispatch_action( - Action.UPDATE_PIPELINE, pipeline_config=pipeline_config, runtime_only=runtime_only + await send_message( + websocket, + dispatch_action(Action.UPDATE_PIPELINE, pipeline_config=pipeline_config, runtime_only=runtime_only), ) - if runtime_only: - # Send a full reset if setting a runtime config fails. - # Don't send pipeline config to save bandwidth. - if not success: - await websocket.send(error("Failed to set runtime config", ErrorAction.FULL_RESET)) - continue - if success: - active_config: Optional[PipelineConfiguration] = dispatch_action( - Action.GET_PIPELINE - ) # type: ignore[assignment] - print("Active config: ", active_config) - await websocket.send( - json.dumps( - { - "type": MessageType.PIPELINE, - "data": (active_config.to_json(), False) if active_config is not None else None, - } - ) - ) - else: - await websocket.send(error("Unknown error", ErrorAction.FULL_RESET)) + elif message_type == MessageType.DEVICES: - await websocket.send( - json.dumps( - { - "type": MessageType.DEVICES, - "data": [ - d.getMxId() for d in dai.Device.getAllAvailableDevices() # type: ignore[call-arg] - ], - } - ) + await send_message( + websocket, + DevicesMessage( + [d.getMxId() for d in dai.Device.getAllAvailableDevices()] + ), # type: ignore[call-arg] ) elif message_type == MessageType.DEVICE: data = message.get("data", {}) - device_repr = data.get("Device", {}) + device_repr = data.get(message_type, {}) device_id = device_repr.get("id", None) if device_id is None: print("Missing device id") continue - success, result = dispatch_action(Action.SELECT_DEVICE, device_id=device_id) - if success: - print("Selected device properties: ", result.get("device_properties", None)) - await websocket.send( - json.dumps({"type": MessageType.DEVICE, "data": result.get("device_properties", {})}) - ) - else: - await websocket.send(error(result.get("message", "Unknown error"), ErrorAction.FULL_RESET)) - + await send_message(websocket, dispatch_action(Action.SELECT_DEVICE, device_id=device_id)) else: print("Unknown message type: ", message_type) continue - send_message = None + + message_to_send = None try: - send_message = send_message_queue.get(timeout=0.01) + message_to_send = send_message_queue.get(timeout=0.01) except QueueEmptyException: pass - if send_message: - print("Sending message: ", send_message) - await websocket.send(send_message) + if message_to_send: + print("Sending message: ", message_to_send) + await send_message(websocket, message_to_send) async def main() -> None: diff --git a/rerun_py/depthai_viewer/_backend/device.py b/rerun_py/depthai_viewer/_backend/device.py new file mode 100644 index 000000000000..f8c45fad1998 --- /dev/null +++ b/rerun_py/depthai_viewer/_backend/device.py @@ -0,0 +1,386 @@ +import itertools +import time +from typing import Dict, List, Optional, Tuple + +import depthai as dai +import numpy as np +from depthai_sdk import OakCamera +from depthai_sdk.components import CameraComponent, NNComponent, StereoComponent +from numpy.typing import NDArray + +import depthai_viewer as viewer +from depthai_viewer._backend import classification_labels +from depthai_viewer._backend.device_configuration import ( + CameraConfiguration, + CameraFeatures, + DeviceProperties, + ImuKind, + PipelineConfiguration, + calculate_isp_scale, + compare_dai_camera_configs, + resolution_to_enum, +) +from depthai_viewer._backend.messages import ErrorMessage, InfoMessage, Message +from depthai_viewer._backend.packet_handler import ( + AiModelCallbackArgs, + DepthCallbackArgs, + PacketHandler, + SyncedCallbackArgs, +) +from depthai_viewer._backend.store import Store + + +class XlinkStatistics: + _device: dai.Device + _time_of_last_update: float = 0 # s since epoch + + def __init__(self, device: dai.Device): + self._device = device + + def update(self) -> None: + if time.time() - self._time_of_last_update >= 32e-3: + self._time_of_last_update = time.time() + if hasattr(self._device, "getProfilingData"): # Only on latest develop + try: + xlink_stats = self._device.getProfilingData() + viewer.log_xlink_stats( + xlink_stats.numBytesWritten, xlink_stats.numBytesRead, self._time_of_last_update + ) + except Exception: + pass + + +# import cProfile +# import time + + +class Device: + id: str + intrinsic_matrix: Dict[Tuple[dai.CameraBoardSocket, int, int], NDArray[np.float32]] = {} + calibration_data: Optional[dai.CalibrationHandler] = None + use_encoding: bool = False + store: Store + + _packet_handler: PacketHandler + _oak: Optional[OakCamera] = None + _cameras: List[CameraComponent] = [] + _stereo: StereoComponent = None + _nnet: NNComponent = None + _xlink_statistics: Optional[XlinkStatistics] = None + + # _profiler = cProfile.Profile() + + def __init__(self, device_id: str, store: Store): + self.id = device_id + self.set_oak(OakCamera(device_id)) + self.store = store + self._packet_handler = PacketHandler(self.store, self.get_intrinsic_matrix) + print("Oak cam: ", self._oak) + # self.start = time.time() + # self._profiler.enable() + + def set_oak(self, oak_cam: Optional[OakCamera]) -> None: + self._oak = oak_cam + self._xlink_statistics = None + if self._oak is not None: + self._xlink_statistics = XlinkStatistics(self._oak.device) + + def is_closed(self) -> bool: + return self._oak is not None and self._oak.device.isClosed() + + def get_intrinsic_matrix(self, board_socket: dai.CameraBoardSocket, width: int, height: int) -> NDArray[np.float32]: + if self.intrinsic_matrix.get((board_socket, width, height)) is not None: + return self.intrinsic_matrix.get((board_socket, width, height)) # type: ignore[return-value] + if self.calibration_data is None: + raise Exception("Missing calibration data!") + M_right = self.calibration_data.getCameraIntrinsics( # type: ignore[union-attr] + board_socket, dai.Size2f(width, height) + ) + self.intrinsic_matrix[(board_socket, width, height)] = np.array(M_right).reshape(3, 3) + return self.intrinsic_matrix[(board_socket, width, height)] + + def _get_possible_stereo_pairs_for_cam( + self, cam: dai.CameraFeatures, connected_camera_features: List[dai.CameraFeatures] + ) -> List[dai.CameraBoardSocket]: + """Tries to find the possible stereo pairs for a camera.""" + if self._oak is None: + return [] + calib_data = self._oak.device.readCalibration() + try: + calib_data.getCameraIntrinsics(cam.socket) + except IndexError: + return [] + possible_stereo_pairs = [] + if cam.name == "right": + possible_stereo_pairs.extend( + [features.socket for features in filter(lambda c: c.name == "left", connected_camera_features)] + ) + elif cam.name == "left": + possible_stereo_pairs.extend( + [features.socket for features in filter(lambda c: c.name == "right", connected_camera_features)] + ) + else: + possible_stereo_pairs.extend( + [ + camera.socket + for camera in connected_camera_features + if camera != cam + and all( + map( + lambda confs: compare_dai_camera_configs(confs[0], confs[1]), + zip(camera.configs, cam.configs), + ) + ) + ] + ) + stereo_pairs = [] + for pair in possible_stereo_pairs: + try: + calib_data.getCameraIntrinsics(pair) + except IndexError: + continue + stereo_pairs.append(pair) + return stereo_pairs + + def get_device_properties(self) -> DeviceProperties: + if self._oak is None: + raise Exception("No device selected!") + connected_cam_features = self._oak.device.getConnectedCameraFeatures() + imu = self._oak.device.getConnectedIMU() + imu = ImuKind.NINE_AXIS if "BNO" in imu else None if imu == "NONE" else ImuKind.SIX_AXIS + device_properties = DeviceProperties(id=self.id, imu=imu) + try: + calib = self._oak.device.readCalibration2() + left_cam = calib.getStereoLeftCameraId() + right_cam = calib.getStereoRightCameraId() + device_properties.default_stereo_pair = (left_cam, right_cam) + except RuntimeError: + pass + for cam in connected_cam_features: + prioritized_type = cam.supportedTypes[0] + device_properties.cameras.append( + CameraFeatures( + board_socket=cam.socket, + max_fps=60, + resolutions=[ + resolution_to_enum[(conf.width, conf.height)] + for conf in cam.configs + if conf.type == prioritized_type # Only support the prioritized type for now + ], + supported_types=cam.supportedTypes, + stereo_pairs=self._get_possible_stereo_pairs_for_cam(cam, connected_cam_features), + name=cam.name.capitalize(), + ) + ) + device_properties.stereo_pairs = list( + itertools.chain.from_iterable( + [(cam.board_socket, pair) for pair in cam.stereo_pairs] for cam in device_properties.cameras + ) + ) + return device_properties + + def close_oak(self) -> None: + if self._oak is None: + return + if self._oak.running(): + self._oak.device.__exit__(0, 0, 0) + + def reconnect_to_oak(self) -> Message: + """ + + Try to reconnect to the device with self.id. + + Timeout after 10 seconds. + """ + if self._oak is None: + return ErrorMessage("No device selected, can't reconnect!") + if self._oak.device.isClosed(): + timeout_start = time.time() + while time.time() - timeout_start < 10: + available_devices = [ + device.getMxId() for device in dai.Device.getAllAvailableDevices() # type: ignore[call-arg] + ] + if self.id in available_devices: + break + try: + self.set_oak(OakCamera(self.id)) + return InfoMessage("Successfully reconnected to device") + except RuntimeError as e: + print("Failed to create oak camera") + print(e) + self.set_oak(None) + return ErrorMessage("Failed to create oak camera") + + def _get_component_by_socket(self, socket: dai.CameraBoardSocket) -> Optional[CameraComponent]: + component = list(filter(lambda c: c.node.getBoardSocket() == socket, self._cameras)) + if not component: + return None + return component[0] + + def _get_camera_config_by_socket( + self, config: PipelineConfiguration, socket: dai.CameraBoardSocket + ) -> Optional[CameraConfiguration]: + print("Getting cam by socket: ", socket, " Cameras: ", config.cameras) + camera = list(filter(lambda c: c.board_socket == socket, config.cameras)) + if not camera: + return None + return camera[0] + + def update_pipeline(self, config: PipelineConfiguration, runtime_only: bool) -> Message: + if self._oak is None: + return ErrorMessage("No device selected, can't update pipeline!") + if self._oak.device.isPipelineRunning(): + if runtime_only: + if config.depth is not None: + self._stereo.control.send_controls(config.depth.to_runtime_controls()) + return InfoMessage("") + return ErrorMessage("Depth is disabled, can't send runtime controls!") + print("Cam running, closing...") + self.close_oak() + message = self.reconnect_to_oak() + if isinstance(message, ErrorMessage): + return message + + self._cameras = [] + self._stereo = None + self._packet_handler.reset() + synced_outputs = [] + synced_callback_args = SyncedCallbackArgs() + + is_poe = self._oak.device.getDeviceInfo().protocol == dai.XLinkProtocol.X_LINK_TCP_IP + print("Usb speed: ", self._oak.device.getUsbSpeed()) + is_usb2 = self._oak.device.getUsbSpeed() == dai.UsbSpeed.HIGH + if is_poe: + print("Connected to a PoE device, camera streams will be JPEG encoded...") + elif is_usb2: + print("Device is connected in USB2 mode, camera streams will be JPEG encoded...") + self.use_encoding = is_poe or is_usb2 + for cam in config.cameras: + print("Creating camera: ", cam) + sdk_cam = self._oak.create_camera( + cam.board_socket, + cam.resolution.as_sdk_resolution(), + cam.fps, + encode=self.use_encoding, + name=cam.name.capitalize(), + ) + if cam.stream_enabled: + if config.depth and ( + cam.board_socket == config.depth.align or cam.board_socket in config.depth.stereo_pair + ): + synced_outputs.append(sdk_cam.out.main) + else: + self._oak.callback( + sdk_cam, + self._packet_handler.build_callback(cam.board_socket), + ) + self._cameras.append(sdk_cam) + + if config.depth: + print("Creating depth") + stereo_pair = config.depth.stereo_pair + left_cam = self._get_component_by_socket(stereo_pair[0]) + right_cam = self._get_component_by_socket(stereo_pair[1]) + if not left_cam or not right_cam: + return ErrorMessage(f"{cam} is not configured. Couldn't create stereo pair.") + + if left_cam.node.getResolutionWidth() > 1280: + print("Left cam width > 1280, setting isp scale to get 800") + left_cam.config_color_camera(isp_scale=calculate_isp_scale(left_cam.node.getResolutionWidth())) + if right_cam.node.getResolutionWidth() > 1280: + print("Right cam width > 1280, setting isp scale to get 800") + right_cam.config_color_camera(isp_scale=calculate_isp_scale(right_cam.node.getResolutionWidth())) + self._stereo = self._oak.create_stereo(left=left_cam, right=right_cam, name="depth") + + # We used to be able to pass in the board socket to align to, but this was removed in depthai 1.10.0 + align_component = self._get_component_by_socket(config.depth.align) + if not align_component: + return ErrorMessage(f"{config.depth.align} is not configured. Couldn't create stereo pair.") + self._stereo.config_stereo( + lr_check=config.depth.lr_check, + subpixel=config.depth.subpixel_disparity, + confidence=config.depth.confidence, + align=align_component, + lr_check_threshold=config.depth.lrc_threshold, + median=config.depth.median, + ) + + aligned_camera = self._get_camera_config_by_socket(config, config.depth.align) + if not aligned_camera: + return ErrorMessage(f"{config.depth.align} is not configured. Couldn't create stereo pair.") + synced_callback_args.depth_args = DepthCallbackArgs( + alignment_camera=aligned_camera, stereo_pair=config.depth.stereo_pair + ) + synced_outputs.append(self._stereo.out.main) + + if self._oak.device.getConnectedIMU() != "NONE": + print("Creating IMU") + imu = self._oak.create_imu() + sensors = [ + dai.IMUSensor.ACCELEROMETER_RAW, + dai.IMUSensor.GYROSCOPE_RAW, + ] + if "BNO" in self._oak.device.getConnectedIMU(): + sensors.append(dai.IMUSensor.MAGNETOMETER_CALIBRATED) + imu.config_imu( + sensors, report_rate=config.imu.report_rate, batch_report_threshold=config.imu.batch_report_threshold + ) + self._oak.callback(imu, self._packet_handler.on_imu) + else: + print("Connected cam doesn't have IMU, skipping IMU creation...") + + if config.ai_model and config.ai_model.path: + cam_component = self._get_component_by_socket(config.ai_model.camera) + if not cam_component: + return ErrorMessage(f"{config.ai_model.camera} is not configured. Couldn't create NN.") + labels: Optional[List[str]] = None + if config.ai_model.path == "age-gender-recognition-retail-0013": + face_detection = self._oak.create_nn("face-detection-retail-0004", cam_component) + self._nnet = self._oak.create_nn("age-gender-recognition-retail-0013", input=face_detection) + else: + self._nnet = self._oak.create_nn(config.ai_model.path, cam_component) + labels = getattr(classification_labels, config.ai_model.path.upper().replace("-", "_"), None) + + camera = self._get_camera_config_by_socket(config, config.ai_model.camera) + if not camera: + return ErrorMessage(f"{config.ai_model.camera} is not configured. Couldn't create NN.") + + self._oak.callback( + self._nnet, + self._packet_handler.build_callback( + AiModelCallbackArgs(model_name=config.ai_model.path, camera=camera, labels=labels) + ), + ) + if synced_outputs: + self._oak.sync(synced_outputs, self._packet_handler.build_sync_callback(synced_callback_args)) + try: + self._oak.start(blocking=False) + except RuntimeError as e: + print("Couldn't start pipeline: ", e) + return ErrorMessage("Couldn't start pipeline") + + running = self._oak.running() + if running: + try: + self._oak.poll() + except RuntimeError: + return ErrorMessage("Runtime error when polling the device. Check the terminal for more info.") + self.calibration_data = self._oak.device.readCalibration() + self.intrinsic_matrix = {} + return InfoMessage("Pipeline started") if running else ErrorMessage("Couldn't start pipeline") + + def update(self) -> None: + if self._oak is None: + return + if not self._oak.running(): + return + self._oak.poll() + if self._xlink_statistics is not None: + self._xlink_statistics.update() + + # if time.time() - self.start > 10: + # print("Dumping profiling data") + # self._profiler.dump_stats("profile.prof") + # self._profiler.disable() + # self._profiler.enable() + # self.start = time.time() diff --git a/rerun_py/depthai_viewer/_backend/device_configuration.py b/rerun_py/depthai_viewer/_backend/device_configuration.py index 4a757a6a4874..8d5fd04655b6 100644 --- a/rerun_py/depthai_viewer/_backend/device_configuration.py +++ b/rerun_py/depthai_viewer/_backend/device_configuration.py @@ -1,86 +1,11 @@ -from typing import Any, Dict, Optional +from enum import Enum +from fractions import Fraction +from typing import Any, Dict, List, Optional, Tuple import depthai as dai from depthai_sdk import Previews as QueueNames from pydantic import BaseModel - -class ColorCameraConfiguration(BaseModel): # type: ignore[misc] - fps: Optional[int] = 30 - resolution: Optional[ - dai.ColorCameraProperties.SensorResolution - ] = dai.ColorCameraProperties.SensorResolution.THE_1080_P - board_socket: Optional[dai.CameraBoardSocket] = dai.CameraBoardSocket.RGB - out_preview: bool = False - xout_still: bool = False - xout_video: bool = True - input_control: bool = False - - class Config: - arbitrary_types_allowed = True - # Doesnt work atm - json_encoders = { - Optional[dai.MonoCameraProperties.SensorResolution]: lambda v: v.name, - dai.CameraBoardSocket: lambda v: v.name, - } - - def __init__(self, **v) -> None: # type: ignore[no-untyped-def] - if v.get("resolution"): - v["resolution"] = getattr(dai.ColorCameraProperties.SensorResolution, v["resolution"]) - if v.get("board_socket"): - v["board_socket"] = getattr(dai.CameraBoardSocket, v["board_socket"]) - return super().__init__(**v) # type: ignore[no-any-return] - - @property - # Make this select the queue based on ui, also probably not just one queue - def out_queue_name(self) -> Optional[str]: - prefix: str = QueueNames.color.name - if self.out_preview: - return prefix + "_preview" - if self.xout_still: - return prefix + "_still" - if self.xout_video: - return prefix + "_video" - return None - - -class MonoCameraConfiguration(BaseModel): # type: ignore[misc] - fps: Optional[int] = 30 - resolution: Optional[ - dai.MonoCameraProperties.SensorResolution - ] = dai.MonoCameraProperties.SensorResolution.THE_400_P - board_socket: Optional[dai.CameraBoardSocket] = dai.CameraBoardSocket.LEFT - xout: bool = False # Depth queue fails if I create this queue! - input_control: bool = False - - class Config: - arbitrary_types_allowed = True - # Doesnt work atm - json_encoders = { - Optional[dai.MonoCameraProperties.SensorResolution]: lambda v: v.name, - dai.CameraBoardSocket: lambda v: v.name, - } - - def __init__(self, **v) -> None: # type: ignore[no-untyped-def] - if v.get("resolution"): - v["resolution"] = getattr(dai.MonoCameraProperties.SensorResolution, v["resolution"]) - if v.get("board_socket"): - v["board_socket"] = getattr(dai.CameraBoardSocket, v["board_socket"]) - return super().__init__(**v) # type: ignore[no-any-return] - - @property - def out_queue_name(self) -> str: - return "left" if self.board_socket == dai.CameraBoardSocket.LEFT else "right" - - @classmethod - def create_left(cls, **kwargs) -> "MonoCameraConfiguration": # type: ignore[no-untyped-def] - return cls(board_socket="LEFT", **kwargs) - - @classmethod - def create_right(cls, **kwargs) -> "MonoCameraConfiguration": # type: ignore[no-untyped-def] - return cls(board_socket="RIGHT", **kwargs) - - # class PointcloudConfiguration(BaseModel): # enabled: bool = True @@ -91,22 +16,40 @@ class DepthConfiguration(BaseModel): # type: ignore[misc] lrc_threshold: int = 5 # 0..10 extended_disparity: Optional[bool] = False subpixel_disparity: Optional[bool] = True - align: Optional[dai.CameraBoardSocket] = dai.CameraBoardSocket.CENTER + align: dai.CameraBoardSocket = dai.CameraBoardSocket.CAM_B sigma: int = 0 # 0..65535 # pointcloud: PointcloudConfiguration | None = None confidence: int = 230 + stereo_pair: Tuple[dai.CameraBoardSocket, dai.CameraBoardSocket] class Config: arbitrary_types_allowed = True def __init__(self, **v) -> None: # type: ignore[no-untyped-def] - if v.get("median"): + if v.get("median", None): v["median"] = getattr(dai.MedianFilter, v["median"]) - if v.get("align"): + if v.get("align", None): v["align"] = getattr(dai.CameraBoardSocket, v["align"]) - + if v.get("stereo_pair", None) and all(isinstance(pair, str) for pair in v["stereo_pair"]): + v["stereo_pair"] = ( + getattr(dai.CameraBoardSocket, v["stereo_pair"][0]), + getattr(dai.CameraBoardSocket, v["stereo_pair"][1]), + ) return super().__init__(**v) # type: ignore[no-any-return] + def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + return { + "median": self.median.name if self.median else None, + "lr_check": self.lr_check, + "lrc_threshold": self.lrc_threshold, + "extended_disparity": self.extended_disparity, + "subpixel_disparity": self.subpixel_disparity, + "align": self.align.name, + "sigma": self.sigma, + "confidence": self.confidence, + "stereo_pair": [socket.name for socket in self.stereo_pair], + } + def to_runtime_controls(self) -> Dict[str, Any]: return { "algorithm_control": { @@ -144,6 +87,22 @@ def out_queue_name(self) -> str: class AiModelConfiguration(BaseModel): # type: ignore[misc] display_name: str = "Yolo V8" path: str = "yolov8n_coco_640x352" + camera: dai.CameraBoardSocket + + class Config: + arbitrary_types_allowed = True + + def __init__(self, **v) -> None: # type: ignore[no-untyped-def] + if v.get("camera", None) and isinstance(v["camera"], str): + v["camera"] = getattr(dai.CameraBoardSocket, v["camera"]) + return super().__init__(**v) # type: ignore[no-any-return] + + def dict(self, *args, **kwargs): # type: ignore[no-untyped-def] + return { + "display_name": self.display_name, + "path": self.path, + "camera": self.camera.name, + } class ImuConfiguration(BaseModel): # type: ignore[misc] @@ -151,40 +110,174 @@ class ImuConfiguration(BaseModel): # type: ignore[misc] batch_report_threshold: int = 5 +class CameraSensorResolution(Enum): + THE_400_P: str = "THE_400_P" + THE_480_P: str = "THE_480_P" + THE_720_P: str = "THE_720_P" + THE_800_P: str = "THE_800_P" + THE_1080_P: str = "THE_1080_P" + THE_1200_P: str = "THE_1200_P" + THE_12_MP: str = "THE_12_MP" + THE_13_MP: str = "THE_13_MP" + THE_1440X1080: str = "THE_1440X1080" + THE_4000X3000: str = "THE_4000X3000" + THE_48_MP: str = "THE_48_MP" + THE_4_K: str = "THE_4_K" + THE_5312X6000: str = "THE_5312X6000" + THE_5_MP: str = "THE_5_MP" + + def dict(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def] + return self.value + + def as_sdk_resolution(self) -> str: + return self.value.replace("_", "").replace("THE", "") + + +class ImuKind(Enum): + SIX_AXIS = "SIX_AXIS" + NINE_AXIS = "NINE_AXIS" + + +class CameraConfiguration(BaseModel): # type: ignore[misc] + fps: int = 30 + resolution: CameraSensorResolution + kind: dai.CameraSensorType + board_socket: dai.CameraBoardSocket + stream_enabled: bool = True + name: str = "" + + class Config: + arbitrary_types_allowed = True + + def __init__(self, **v) -> None: # type: ignore[no-untyped-def] + if v.get("board_socket", None): + if isinstance(v["board_socket"], str): + v["board_socket"] = getattr(dai.CameraBoardSocket, v["board_socket"]) + if v.get("kind", None): + if isinstance(v["kind"], str): + v["kind"] = getattr(dai.CameraSensorType, v["kind"]) + return super().__init__(**v) # type: ignore[no-any-return] + + def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + return { + "fps": self.fps, + "resolution": self.resolution.dict(), + "kind": self.kind.name, + "board_socket": self.board_socket.name, + "name": self.name, + "stream_enabled": self.stream_enabled, + } + + @classmethod + def create_left(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-untyped-def] + if not kwargs.get("kind", None): + kwargs["kind"] = dai.CameraSensorType.MONO + if not kwargs.get("resolution", None): + kwargs["resolution"] = CameraSensorResolution.THE_400_P + return cls(board_socket="LEFT", **kwargs) + + @classmethod + def create_right(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-untyped-def] + if not kwargs.get("kind", None): + kwargs["kind"] = dai.CameraSensorType.MONO + if not kwargs.get("resolution", None): + kwargs["resolution"] = CameraSensorResolution.THE_400_P + return cls(board_socket="RIGHT", **kwargs) + + @classmethod + def create_color(cls, **kwargs) -> "CameraConfiguration": # type: ignore[no-untyped-def] + if not kwargs.get("kind", None): + kwargs["kind"] = dai.CameraSensorType.COLOR + if not kwargs.get("resolution", None): + kwargs["resolution"] = CameraSensorResolution.THE_720_P + return cls(board_socket="RGB", **kwargs) + + +class CameraFeatures(BaseModel): # type: ignore[misc] + resolutions: List[CameraSensorResolution] = [] + max_fps: int = 60 + board_socket: dai.CameraBoardSocket + supported_types: List[dai.CameraSensorType] + stereo_pairs: List[dai.CameraBoardSocket] = [] + """Which cameras can be paired with this one""" + name: str + + class Config: + arbitrary_types_allowed = True + use_enum_values = True + + def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + return { + "resolutions": [r for r in self.resolutions], + "max_fps": self.max_fps, + "board_socket": self.board_socket.name, + "supported_types": [sensor_type.name for sensor_type in self.supported_types], + "stereo_pairs": [socket.name for socket in self.stereo_pairs], + "name": self.name, + } + + class PipelineConfiguration(BaseModel): # type: ignore[misc] - color_camera: ColorCameraConfiguration = ColorCameraConfiguration() - left_camera: Optional[MonoCameraConfiguration] = MonoCameraConfiguration.create_left() - right_camera: Optional[MonoCameraConfiguration] = MonoCameraConfiguration.create_right() - depth: Optional[DepthConfiguration] = DepthConfiguration() - ai_model: Optional[AiModelConfiguration] = AiModelConfiguration() + cameras: List[CameraConfiguration] = [] + depth: Optional[DepthConfiguration] + ai_model: Optional[AiModelConfiguration] imu: ImuConfiguration = ImuConfiguration() - def to_json(self) -> Dict[str, Any]: - as_dict = self.dict() - return self._fix_depthai_types(as_dict) - - def _fix_depthai_types(self, as_dict: Dict[str, Any]) -> Dict[str, Any]: - """ATM Config.json_encoders doesn't work, so we manually fix convert the depthai types to strings here.""" - if as_dict.get("color_camera", None): - as_dict["color_camera"] = self._fix_camera(as_dict["color_camera"]) - if as_dict.get("left_camera", None): - as_dict["left_camera"] = self._fix_camera(as_dict["left_camera"]) - if as_dict.get("right_camera", None): - as_dict["right_camera"] = self._fix_camera(as_dict["right_camera"]) - if as_dict.get("depth", None): - as_dict["depth"] = self._fix_depth(as_dict["depth"]) - return as_dict - - def _fix_depth(self, as_dict: Dict[str, Any]) -> Dict[str, Any]: - if as_dict.get("align"): - as_dict["align"] = as_dict["align"].name - if as_dict.get("median"): - as_dict["median"] = as_dict["median"].name - return as_dict - - def _fix_camera(self, as_dict: Dict[str, Any]) -> Dict[str, Any]: - if as_dict.get("resolution"): - as_dict["resolution"] = as_dict["resolution"].name - if as_dict.get("board_socket"): - as_dict["board_socket"] = as_dict["board_socket"].name - return as_dict + +class DeviceProperties(BaseModel): # type: ignore[misc] + id: str + cameras: List[CameraFeatures] = [] + imu: Optional[ImuKind] + stereo_pairs: List[ + Tuple[dai.CameraBoardSocket, dai.CameraBoardSocket] + ] = [] # Which cameras can be paired for stereo + default_stereo_pair: Optional[Tuple[dai.CameraBoardSocket, dai.CameraBoardSocket]] = None + + class Config: + arbitrary_types_allowed = True + use_enum_values = True + + def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] + if kwargs.get("stereo_pairs", None) and all(isinstance(pair[0], str) for pair in kwargs["stereo_pairs"]): + kwargs["stereo_pairs"] = [ + (getattr(dai.CameraBoardSocket, pair[0]), getattr(dai.CameraBoardSocket, pair[1])) + for pair in kwargs["stereo_pairs"] + ] + return super().__init__(*args, **kwargs) # type: ignore[no-any-return] + + def dict(self, *args, **kwargs) -> Dict[str, Any]: # type: ignore[no-untyped-def] + return { + "id": self.id, + "cameras": [cam.dict() for cam in self.cameras], + "imu": self.imu, + "stereo_pairs": [(left.name, right.name) for left, right in self.stereo_pairs], + } + + +resolution_to_enum = { + (640, 400): CameraSensorResolution.THE_400_P, + (1280, 720): CameraSensorResolution.THE_720_P, + (1280, 800): CameraSensorResolution.THE_800_P, + (1920, 1080): CameraSensorResolution.THE_1080_P, + (1920, 1200): CameraSensorResolution.THE_1200_P, + (3840, 2160): CameraSensorResolution.THE_4_K, + (4056, 3040): CameraSensorResolution.THE_12_MP, + (1440, 1080): CameraSensorResolution.THE_1440X1080, + (5312, 6000): CameraSensorResolution.THE_5312X6000, +} + + +def compare_dai_camera_configs(cam1: dai.CameraSensorConfig, cam2: dai.CameraSensorConfig) -> bool: + return ( # type: ignore[no-any-return] + cam1.height == cam2.height + and cam1.width == cam2.width + and cam1.type == cam2.type + and cam1.maxFps == cam2.maxFps + and cam1.minFps == cam2.minFps + ) + + +def calculate_isp_scale(resolution_width: int) -> Tuple[int, int]: + """Based on width, get ISP scale to target THE_800_P, aka 1280x800.""" + x = 1280 / resolution_width + return Fraction.from_float(x).limit_denominator().as_integer_ratio() diff --git a/rerun_py/depthai_viewer/_backend/main.py b/rerun_py/depthai_viewer/_backend/main.py index ee4b14639b01..75d254be9aec 100644 --- a/rerun_py/depthai_viewer/_backend/main.py +++ b/rerun_py/depthai_viewer/_backend/main.py @@ -1,351 +1,127 @@ -import json import threading -import time +from multiprocessing import Queue from queue import Empty as QueueEmptyException -from queue import Queue -from typing import Any, Dict, Optional, Tuple, Union - -import depthai as dai -import depthai_sdk -import numpy as np -import pkg_resources -from depthai_sdk import OakCamera -from depthai_sdk.components import CameraComponent, NNComponent, StereoComponent -from numpy.typing import NDArray +from typing import Optional import depthai_viewer as viewer -from depthai_viewer._backend.config_api import start_api -from depthai_viewer._backend.device_configuration import PipelineConfiguration -from depthai_viewer._backend.sdk_callbacks import SdkCallbacks +from depthai_viewer._backend.config_api import Action, start_api +from depthai_viewer._backend.device import Device +from depthai_viewer._backend.device_configuration import DeviceProperties +from depthai_viewer._backend.messages import ( + DeviceMessage, + ErrorMessage, + InfoMessage, + Message, + PipelineMessage, + SubscriptionsMessage, +) from depthai_viewer._backend.store import Store viewer.init("Depthai Viewer") viewer.connect() -color_wh_to_enum = { - (1280, 720): dai.ColorCameraProperties.SensorResolution.THE_720_P, - (1280, 800): dai.ColorCameraProperties.SensorResolution.THE_800_P, - (1920, 1080): dai.ColorCameraProperties.SensorResolution.THE_1080_P, - (3840, 2160): dai.ColorCameraProperties.SensorResolution.THE_4_K, - (4056, 3040): dai.ColorCameraProperties.SensorResolution.THE_12_MP, - (1440, 1080): dai.ColorCameraProperties.SensorResolution.THE_1440X1080, - (5312, 6000): dai.ColorCameraProperties.SensorResolution.THE_5312X6000, - # TODO(filip): Add other resolutions -} - -mono_wh_to_enum = { - (640, 400): dai.MonoCameraProperties.SensorResolution.THE_400_P, - (1280, 720): dai.MonoCameraProperties.SensorResolution.THE_720_P, - (1280, 800): dai.MonoCameraProperties.SensorResolution.THE_800_P, - (1920, 1200): dai.MonoCameraProperties.SensorResolution.THE_1200_P, -} - - -class SelectedDevice: - id: str - intrinsic_matrix: Dict[Tuple[dai.CameraBoardSocket, int, int], NDArray[np.float32]] = {} - calibration_data: Optional[dai.CalibrationHandler] = None - use_encoding: bool = False - _time_of_last_xlink_update: int = 0 - - _color: CameraComponent = None - _left: CameraComponent = None - _right: CameraComponent = None - _stereo: StereoComponent = None - _nnet: NNComponent = None - # _pc: PointcloudComponent = None - - oak_cam: OakCamera = None - - def __init__(self, device_id: str): - self.id = device_id - self.oak_cam = OakCamera(self.id) - print("Oak cam: ", self.oak_cam) - - def get_intrinsic_matrix(self, board_socket: dai.CameraBoardSocket, width: int, height: int) -> NDArray[np.float32]: - if self.intrinsic_matrix.get((board_socket, width, height)) is not None: - return self.intrinsic_matrix.get((board_socket, width, height)) # type: ignore[return-value] - if self.calibration_data is None: - raise Exception("Missing calibration data!") - M_right = self.calibration_data.getCameraIntrinsics( # type: ignore[union-attr] - board_socket, dai.Size2f(width, height) - ) - self.intrinsic_matrix[(board_socket, width, height)] = np.array(M_right).reshape(3, 3) - return self.intrinsic_matrix[(board_socket, width, height)] - - def get_device_properties(self) -> Dict[str, Any]: - dai_props = self.oak_cam.device.getConnectedCameraFeatures() - device_properties = { - "id": self.id, - "supported_color_resolutions": [], - "supported_left_mono_resolutions": [], - "supported_right_mono_resolutions": [], - } - for cam in dai_props: - resolutions_key = "supported_left_mono_resolutions" - if cam.socket == dai.CameraBoardSocket.RGB: - resolutions_key = "supported_color_resolutions" - elif cam.socket == dai.CameraBoardSocket.RIGHT: - resolutions_key = "supported_right_mono_resolutions" - for config in cam.configs: - wh = (config.width, config.height) - if wh not in device_properties[resolutions_key]: # type: ignore[comparison-overlap] - device_properties[resolutions_key].append( # type: ignore[attr-defined] - (config.width, config.height) - ) - device_properties["supported_color_resolutions"] = list( - map( - lambda x: color_wh_to_enum[x].name, # type: ignore[index, no-any-return] - sorted(device_properties["supported_color_resolutions"], key=lambda x: int(x[0]) * int(x[1])), - ) - ) - device_properties["supported_left_mono_resolutions"] = list( - map( - lambda x: color_wh_to_enum[x].name, # type: ignore[index, no-any-return] - sorted(device_properties["supported_left_mono_resolutions"], key=lambda x: int(x[0]) * int(x[1])), - ) - ) - device_properties["supported_right_mono_resolutions"] = list( - map( - lambda x: color_wh_to_enum[x].name, # type: ignore[index, no-any-return] - sorted(device_properties["supported_right_mono_resolutions"], key=lambda x: int(x[0]) * int(x[1])), - ) - ) - return device_properties - - def close_oak_cam(self) -> None: - if self.oak_cam.running(): - self.oak_cam.device.__exit__(0, 0, 0) - - def reconnect_to_oak_cam(self) -> Tuple[bool, Dict[str, str]]: - """ - - Try to reconnect to the device with self.id. - - Timeout after 10 seconds. - """ - if self.oak_cam.device.isClosed(): - timeout_start = time.time() - while time.time() - timeout_start < 10: - available_devices = [ - device.getMxId() for device in dai.Device.getAllAvailableDevices() # type: ignore[call-arg] - ] - if self.id in available_devices: - break - try: - self.oak_cam = OakCamera(self.id) - return True, {"message": "Successfully reconnected to device"} - except RuntimeError as e: - print("Failed to create oak camera") - print(e) - self.oak_cam = None - return False, {"message": "Failed to create oak camera"} - - def update_pipeline( - self, config: PipelineConfiguration, runtime_only: bool, callbacks: "SdkCallbacks" - ) -> Tuple[bool, Dict[str, str]]: - if self.oak_cam.running(): - if runtime_only: - if config.depth is not None: - return True, self._stereo.control.send_controls(config.depth.to_runtime_controls()) - return False, {"message": "Depth is not enabled, can't send runtime controls!"} - print("Cam running, closing...") - self.close_oak_cam() - # Check if the device is available, timeout after 10 seconds - success, message = self.reconnect_to_oak_cam() - if not success: - return success, message - - self.use_encoding = self.oak_cam.device.getDeviceInfo().protocol == dai.XLinkProtocol.X_LINK_TCP_IP - if self.use_encoding: - print("Connected device is PoE: Using encoding...") - else: - print("Connected device is USB: Not using encoding...") - if config.color_camera is not None: - print("Creating color camera") - self._color = self.oak_cam.create_camera( - "color", config.color_camera.resolution, config.color_camera.fps, name="color", encode=self.use_encoding - ) - if config.color_camera.xout_video: - self.oak_cam.callback(self._color, callbacks.on_color_frame, enable_visualizer=self.use_encoding) - if config.left_camera is not None: - print("Creating left camera") - self._left = self.oak_cam.create_camera( - "left", config.left_camera.resolution, config.left_camera.fps, name="left", encode=self.use_encoding - ) - if config.left_camera.xout: - self.oak_cam.callback(self._left, callbacks.on_left_frame, enable_visualizer=self.use_encoding) - if config.right_camera is not None: - print("Creating right camera") - self._right = self.oak_cam.create_camera( - "right", config.right_camera.resolution, config.right_camera.fps, name="right", encode=self.use_encoding - ) - if config.right_camera.xout: - self.oak_cam.callback(self._right, callbacks.on_right_frame, enable_visualizer=self.use_encoding) - if config.depth: - print("Creating depth") - self._stereo = self.oak_cam.create_stereo(left=self._left, right=self._right, name="depth") - - # We used to be able to pass in the board socket to align to, but this was removed in depthai 1.10.0 - align = config.depth.align - if pkg_resources.parse_version(depthai_sdk.__version__) >= pkg_resources.parse_version("1.10.0"): - align = ( - self._left - if config.depth.align == dai.CameraBoardSocket.LEFT - else self._right - if config.depth.align == dai.CameraBoardSocket.RIGHT - else self._color - ) - self._stereo.config_stereo( - lr_check=config.depth.lr_check, - subpixel=config.depth.subpixel_disparity, - confidence=config.depth.confidence, - align=align, - lr_check_threshold=config.depth.lrc_threshold, - median=config.depth.median, - ) - self.oak_cam.callback(self._stereo, callbacks.on_stereo_frame) - - if self.oak_cam.device.getConnectedIMU() != "NONE": - print("Creating IMU") - imu = self.oak_cam.create_imu() - sensors = [ - dai.IMUSensor.ACCELEROMETER_RAW, - dai.IMUSensor.GYROSCOPE_RAW, - ] - if "BNO" in self.oak_cam.device.getConnectedIMU(): - sensors.append(dai.IMUSensor.MAGNETOMETER_CALIBRATED) - imu.config_imu( - sensors, report_rate=config.imu.report_rate, batch_report_threshold=config.imu.batch_report_threshold - ) - self.oak_cam.callback(imu, callbacks.on_imu) - else: - print("Connected cam doesn't have IMU, skipping IMU creation...") - - if config.ai_model and config.ai_model.path: - if config.ai_model.path == "age-gender-recognition-retail-0013": - face_detection = self.oak_cam.create_nn("face-detection-retail-0004", self._color) - self._nnet = self.oak_cam.create_nn("age-gender-recognition-retail-0013", input=face_detection) - self.oak_cam.callback(self._nnet, callbacks.on_age_gender_packet) - elif config.ai_model.path == "mobilenet-ssd": - self._nnet = self.oak_cam.create_nn( - config.ai_model.path, - self._color, - ) - self.oak_cam.callback(self._nnet, callbacks.on_mobilenet_ssd_packet) - else: - self._nnet = self.oak_cam.create_nn(config.ai_model.path, self._color) - callback = callbacks.on_detections - if config.ai_model.path == "yolov8n_coco_640x352": - callback = callbacks.on_yolo_packet - self.oak_cam.callback( - self._nnet, callback, True - ) # in depthai-sdk=1.10.0 nnet callbacks don't work without visualizer enabled - try: - self.oak_cam.start(blocking=False) - except RuntimeError as e: - print("Couldn't start pipeline: ", e) - return False, {"message": "Couldn't start pipeline"} - running = self.oak_cam.running() - if running: - self.oak_cam.poll() - self.calibration_data = self.oak_cam.device.readCalibration() - self.intrinsic_matrix = {} - return running, {"message": "Pipeline started" if running else "Couldn't start pipeline"} - - def update(self) -> None: - self.oak_cam.poll() - if time.time_ns() - self._time_of_last_xlink_update >= 16e6: - self._time_of_last_xlink_update = time.time_ns() - if hasattr(self.oak_cam.device, "getProfilingData"): # Only on latest develop - xlink_stats = self.oak_cam.device.getProfilingData() - viewer.log_xlink_stats(xlink_stats.numBytesWritten, xlink_stats.numBytesRead) - class DepthaiViewerBack: - _device: Optional[SelectedDevice] = None + _device: Optional[Device] = None # Queues for communicating with the API process action_queue: Queue # type: ignore[type-arg] result_queue: Queue # type: ignore[type-arg] send_message_queue: Queue # type: ignore[type-arg] - # Sdk callbacks for handling data from the device and sending it to the frontend - sdk_callbacks: SdkCallbacks - - def __init__(self, compression: bool = False) -> None: + def __init__(self) -> None: self.action_queue = Queue() self.result_queue = Queue() self.send_message_queue = Queue() self.store = Store() - self.store.on_update_pipeline = self.update_pipeline - self.store.on_select_device = self.select_device - self.store.on_reset = self.on_reset - self.api_process = threading.Thread( target=start_api, args=(self.action_queue, self.result_queue, self.send_message_queue) ) self.api_process.start() - - self.sdk_callbacks = SdkCallbacks(self.store) self.run() - def set_device(self, device: Optional[SelectedDevice] = None) -> None: + def set_device(self, device: Optional[Device] = None) -> None: self._device = device - if device: - self.sdk_callbacks.set_camera_intrinsics_getter(device.get_intrinsic_matrix) - def on_reset(self) -> Tuple[bool, Dict[str, str]]: + def on_reset(self) -> Message: print("Resetting...") if self._device: print("Closing device...") - self._device.close_oak_cam() + self._device.close_oak() self.set_device(None) + self.store.reset() print("Done") - return True, {"message": "Reset successful"} + return InfoMessage("Reset successful") - def select_device(self, device_id: str) -> Tuple[bool, Dict[str, Union[str, Any]]]: + def on_select_device(self, device_id: str) -> Message: print("Selecting device: ", device_id) if self._device: self.on_reset() if device_id == "": - return True, {"message": "Successfully unselected device", "device_properties": {}} + return DeviceMessage(DeviceProperties(id=""), "Device successfully unselected") try: - self.set_device(SelectedDevice(device_id)) + self.set_device(Device(device_id, self.store)) except RuntimeError as e: print("Failed to select device:", e) - return False, { - "message": str(e) + ", Try plugging in the device on a different port.", - "device_properties": {}, - } + return ErrorMessage(f"{str(e)}, Try to connect the device to a different port.") try: if self._device is not None: device_properties = self._device.get_device_properties() - return True, {"message:": "Device selected successfully", "device_properties": device_properties} - return False, {"message": "CCouldn't select device", "device_properties": {}} + return DeviceMessage(device_properties, "Device selected successfully") + return ErrorMessage("Couldn't select device") except RuntimeError as e: print("Failed to get device properties:", e) self.on_reset() + self.send_message_queue.put(ErrorMessage("Device disconnected!")) print("Restarting backend...") # For now exit the backend, the frontend will restart it # (TODO(filip): Why does "Device already closed or disconnected: Input/output error happen") exit(-1) - # return False, {"message": "Failed to get device properties", "device_properties": {}} - def update_pipeline(self, runtime_only: bool) -> Tuple[bool, Dict[str, str]]: + def on_update_pipeline(self, runtime_only: bool) -> Message: if not self._device: print("No device selected, can't update pipeline!") - return False, {"message": "No device selected, can't update pipeline!"} + return ErrorMessage("No device selected, can't update pipeline!") print("Updating pipeline...") - started, message = False, {"message": "Couldn't start pipeline"} + message: Message = ErrorMessage("Couldn't update pipeline") if self.store.pipeline_config is not None: - started, message = self._device.update_pipeline( - self.store.pipeline_config, runtime_only, callbacks=self.sdk_callbacks - ) - if not started: - self.set_device(None) - return started, message + message = self._device.update_pipeline(self.store.pipeline_config, runtime_only) + if isinstance(message, InfoMessage): + return PipelineMessage(self.store.pipeline_config) + return message + + def handle_action(self, action: Action, **kwargs) -> Message: # type: ignore[no-untyped-def] + if action == Action.UPDATE_PIPELINE: + pipeline_config = kwargs.get("pipeline_config", None) + if pipeline_config is not None: + old_pipeline_config = self.store.pipeline_config + self.store.set_pipeline_config(pipeline_config) # type: ignore[arg-type] + message = self.on_update_pipeline(kwargs.get("runtime_only")) # type: ignore[arg-type] + if isinstance(message, ErrorMessage): + self.store.set_pipeline_config(old_pipeline_config) # type: ignore[arg-type] + return message + else: + return ErrorMessage("Pipeline config not provided") + elif action == Action.SELECT_DEVICE: + device_id = kwargs.get("device_id", None) + if device_id is not None: + self.device_id = device_id + return self.on_select_device(device_id) + else: + return ErrorMessage("Device id not provided") + + # TODO(filip): Fully deprecate subscriptions (Only IMU uses subscriptions) + elif action == Action.GET_SUBSCRIPTIONS: + return self.store.subscriptions # type: ignore[return-value] + elif action == Action.SET_SUBSCRIPTIONS: + self.store.set_subscriptions(kwargs.get("subscriptions", [])) + return SubscriptionsMessage([topic.name for topic in self.store.subscriptions]) + elif action == Action.GET_PIPELINE: + return self.store.pipeline_config # type: ignore[return-value] + elif action == Action.RESET: + return self.on_reset() + return ErrorMessage(f"Action: {action} not implemented") def run(self) -> None: """Handles ws messages and polls OakCam.""" @@ -353,20 +129,16 @@ def run(self) -> None: try: action, kwargs = self.action_queue.get(timeout=0.0001) print("Handling action: ", action) - self.result_queue.put(self.store.handle_action(action, **kwargs)) + self.result_queue.put(self.handle_action(action, **kwargs)) except QueueEmptyException: pass if self._device: self._device.update() - if self._device.oak_cam.device.isClosed(): - # TODO(filip): Typehint the messages properly + if self._device.is_closed(): self.on_reset() - self.send_message_queue.put( - json.dumps({"type": "Error", "data": {"action": "FullReset", "message": "Device disconnected"}}) - ) + self.send_message_queue.put(ErrorMessage("Device disconnected")) if __name__ == "__main__": - viewer.spawn(connect=True) DepthaiViewerBack() diff --git a/rerun_py/depthai_viewer/_backend/messages.py b/rerun_py/depthai_viewer/_backend/messages.py new file mode 100644 index 000000000000..25da4acffd28 --- /dev/null +++ b/rerun_py/depthai_viewer/_backend/messages.py @@ -0,0 +1,102 @@ +import json +from enum import Enum +from typing import List, Optional + +from depthai_viewer._backend.device_configuration import DeviceProperties, PipelineConfiguration + + +class MessageType: + SUBSCRIPTIONS = "Subscriptions" # Get or set subscriptions + PIPELINE = "Pipeline" # Get or Set pipeline + DEVICES = "Devices" # Get device list + DEVICE = "DeviceProperties" # Get or set device + ERROR = "Error" # Error message + INFO = "Info" # Info message + + +class ErrorAction(Enum): + NONE = "None" + FULL_RESET = "FullReset" + + def __str__(self) -> str: + return self.value + + +class Message: + message: Optional[str] = None + + def __init__(self) -> None: + raise NotImplementedError + + def json(self) -> str: + raise NotImplementedError + + +class ErrorMessage(Message): + def __init__(self, message: str, action: ErrorAction = ErrorAction.FULL_RESET): + self.action = action + self.message = message + + def json(self) -> str: + return json.dumps({"type": MessageType.ERROR, "data": {"action": str(self.action), "message": self.message}}) + + +class DevicesMessage(Message): + def __init__(self, devices: List[str], message: Optional[str] = None): + self.devices = devices + self.message = message + + def json(self) -> str: + return json.dumps({"type": MessageType.DEVICES, "data": self.devices}) + + +class DeviceMessage(Message): + def __init__(self, device_props: Optional[DeviceProperties], message: Optional[str] = None): + self.device_props = device_props + self.message = message + + def json(self) -> str: + return json.dumps( + { + "type": MessageType.DEVICE, + "data": self.device_props.dict() if self.device_props else DeviceProperties(id="").dict(), + } + ) + + +class SubscriptionsMessage(Message): + def __init__(self, subscriptions: List[str], message: Optional[str] = None): + self.subscriptions = subscriptions + self.message = message + + def json(self) -> str: + return json.dumps({"type": MessageType.SUBSCRIPTIONS, "data": self.subscriptions}) + + +class PipelineMessage(Message): + def __init__( + self, + pipeline_config: Optional[PipelineConfiguration], + runtime_only: bool = False, + message: Optional[str] = None, + ): + self.pipeline_config = pipeline_config + self.runtime_only = runtime_only + self.message = message + + def json(self) -> str: + return json.dumps( + { + "type": MessageType.PIPELINE, + "data": (self.pipeline_config.dict(), self.runtime_only) if self.pipeline_config else None, + "message": self.message, + } + ) + + +class InfoMessage(Message): + def __init__(self, message: str): + self.message = message + + def json(self) -> str: + return json.dumps({"type": MessageType.INFO, "data": self.message}) diff --git a/rerun_py/depthai_viewer/_backend/packet_handler.py b/rerun_py/depthai_viewer/_backend/packet_handler.py new file mode 100644 index 000000000000..6dd1c3f02a39 --- /dev/null +++ b/rerun_py/depthai_viewer/_backend/packet_handler.py @@ -0,0 +1,225 @@ +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import cv2 +import depthai as dai +import numpy as np +from ahrs.filters import Mahony +from depthai_sdk.classes.packets import ( # PointcloudPacket, + DepthPacket, + DetectionPacket, + FramePacket, + IMUPacket, + TwoStagePacket, + _Detection, +) +from numpy.typing import NDArray +from pydantic import BaseModel +from turbojpeg import TJFLAG_FASTDCT, TJFLAG_FASTUPSAMPLE, TurboJPEG + +import depthai_viewer as viewer +from depthai_viewer._backend.device_configuration import CameraConfiguration +from depthai_viewer._backend.store import Store +from depthai_viewer._backend.topic import Topic +from depthai_viewer.components.rect2d import RectFormat + + +class CallbackArgs(BaseModel): # type: ignore[misc] + pass + + +class DepthCallbackArgs(CallbackArgs): # type: ignore[misc] + alignment_camera: CameraConfiguration + stereo_pair: Tuple[dai.CameraBoardSocket, dai.CameraBoardSocket] + + class Config: + arbitrary_types_allowed = True + + +class AiModelCallbackArgs(CallbackArgs): # type: ignore[misc] + model_name: str + camera: CameraConfiguration + labels: Optional[List[str]] = None + + class Config: + arbitrary_types_allowed = True + + +class SyncedCallbackArgs(BaseModel): # type: ignore[misc] + depth_args: Optional[DepthCallbackArgs] = None + + +class PacketHandler: + store: Store + _ahrs: Mahony + _get_camera_intrinsics: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] + _jpeg_decoder: TurboJPEG = TurboJPEG() + + def __init__( + self, store: Store, intrinsics_getter: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] + ): + viewer.init("Depthai Viewer") + viewer.connect() + self.store = store + self._ahrs = Mahony(frequency=100) + self._ahrs.Q = np.array([1, 0, 0, 0], dtype=np.float64) + self.set_camera_intrinsics_getter(intrinsics_getter) + + def reset(self) -> None: + self._ahrs = Mahony(frequency=100) + self._ahrs.Q = np.array([1, 0, 0, 0], dtype=np.float64) + + def set_camera_intrinsics_getter( + self, camera_intrinsics_getter: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] + ) -> None: + self._get_camera_intrinsics = camera_intrinsics_getter # type: ignore[assignment, misc] + + def build_sync_callback(self, args: SyncedCallbackArgs) -> Callable[[Any], None]: + return lambda packets: self._on_synced_packets(args, packets) + + def _on_synced_packets(self, args: SyncedCallbackArgs, packets: Dict[str, Any]) -> None: + for descriptor, packet in packets.items(): + if type(packet) is FramePacket: + # Create dai.CameraBoardSocket from descriptor + split_descriptor = descriptor.split(".") + sock = getattr(dai, split_descriptor[0]) + for split in split_descriptor[1:]: + sock = getattr(sock, split) + self._on_camera_frame(packet, sock) + elif type(packet) is DepthPacket: + if args.depth_args is None: + continue + self._on_stereo_frame(packet, args.depth_args) + + def build_callback( + self, args: Union[dai.CameraBoardSocket, DepthCallbackArgs, AiModelCallbackArgs] + ) -> Callable[[Any], None]: + if isinstance(args, dai.CameraBoardSocket): + return lambda packet: self._on_camera_frame(packet, args) # type: ignore[arg-type] + elif isinstance(args, DepthCallbackArgs): + return lambda packet: self._on_stereo_frame(packet, args) # type: ignore[arg-type] + elif isinstance(args, AiModelCallbackArgs): + callback: Callable[[Any, AiModelCallbackArgs], None] = self._on_detections + if args.model_name == "age-gender-recognition-retail-0013": + callback = self._on_age_gender_packet + return lambda packet: callback(packet, args) # type: ignore[arg-type] + raise ValueError(f"Unknown callback args type: {type(args)}") + + def _on_camera_frame(self, packet: FramePacket, board_socket: dai.CameraBoardSocket) -> None: + viewer.log_rigid3(f"{board_socket.name}/transform", child_from_parent=([0, 0, 0], self._ahrs.Q), xyz="RDF") + h, w = packet.msg.getHeight(), packet.msg.getWidth() + child_from_parent: NDArray[np.float32] + try: + child_from_parent = self._get_camera_intrinsics( # type: ignore[call-arg, misc, arg-type] + board_socket, w, h # type: ignore[call-arg, misc, arg-type] + ) + except Exception: + f_len = (w * h) ** 0.5 + child_from_parent = np.array([[f_len, 0, w / 2], [0, f_len, h / 2], [0, 0, 1]]) + cam = cam_kind_from_frame_type(packet.msg.getType()) + viewer.log_pinhole( + f"{board_socket.name}/transform/{cam}/", + child_from_parent=child_from_parent, + width=w, + height=h, + ) + img_frame = packet.frame if packet.msg.getType() == dai.RawImgFrame.Type.RAW8 else packet.msg.getData() + entity_path = f"{board_socket.name}/transform/{cam}/Image" + if packet.msg.getType() == dai.ImgFrame.Type.BITSTREAM: + img_frame = cv2.cvtColor( + self._jpeg_decoder.decode(img_frame, flags=TJFLAG_FASTUPSAMPLE | TJFLAG_FASTDCT), cv2.COLOR_BGR2RGB + ) + + if packet.msg.getType() == dai.RawImgFrame.Type.NV12: + viewer.log_encoded_image( + entity_path, + img_frame, + width=w, + height=h, + encoding=viewer.ImageEncoding.NV12, + ) + else: + viewer.log_image(entity_path, img_frame) + + def on_imu(self, packet: IMUPacket) -> None: + for data in packet.data: + gyro: dai.IMUReportGyroscope = data.gyroscope + accel: dai.IMUReportAccelerometer = data.acceleroMeter + mag: dai.IMUReportMagneticField = data.magneticField + # TODO(filip): Move coordinate mapping to sdk + self._ahrs.Q = self._ahrs.updateIMU( + self._ahrs.Q, np.array([gyro.z, gyro.x, gyro.y]), np.array([accel.z, accel.x, accel.y]) + ) + if Topic.ImuData not in self.store.subscriptions: + return + viewer.log_imu([accel.z, accel.x, accel.y], [gyro.z, gyro.x, gyro.y], self._ahrs.Q, [mag.x, mag.y, mag.z]) + + def _on_stereo_frame(self, packet: DepthPacket, args: DepthCallbackArgs) -> None: + depth_frame = packet.frame + cam = cam_kind_from_sensor_kind(args.alignment_camera.kind) + path = f"{args.alignment_camera.board_socket.name}/transform/{cam}" + "/Depth" + if not self.store.pipeline_config or not self.store.pipeline_config.depth: + # Essentially impossible to get here + return + viewer.log_depth_image(path, depth_frame, meter=1e3) + + def _on_detections(self, packet: DetectionPacket, args: AiModelCallbackArgs) -> None: + rects, colors, labels = self._detections_to_rects_colors_labels(packet, args.labels) + cam = cam_kind_from_sensor_kind(args.camera.kind) + viewer.log_rects( + f"{args.camera.board_socket.name}/transform/{cam}/Detections", + rects, + rect_format=RectFormat.XYXY, + colors=colors, + labels=labels, + ) + + def _detections_to_rects_colors_labels( + self, packet: DetectionPacket, omz_labels: Optional[List[str]] = None + ) -> Tuple[List[List[int]], List[List[int]], List[str]]: + rects = [] + colors = [] + labels = [] + for detection in packet.detections: + rects.append(self._rect_from_detection(detection)) + colors.append([0, 255, 0]) + label: str = detection.label + # Open model zoo models output label index + if omz_labels is not None and isinstance(label, int): + label += omz_labels[label] + label += ", " + str(int(detection.img_detection.confidence * 100)) + "%" + labels.append(label) + return rects, colors, labels + + def _on_age_gender_packet(self, packet: TwoStagePacket, args: AiModelCallbackArgs) -> None: + for det, rec in zip(packet.detections, packet.nnData): + age = int(float(np.squeeze(np.array(rec.getLayerFp16("age_conv3")))) * 100) + gender = np.squeeze(np.array(rec.getLayerFp16("prob"))) + gender_str = "Woman" if gender[0] > gender[1] else "Man" + label = f"{gender_str}, {age}" + color = [255, 0, 0] if gender[0] > gender[1] else [0, 0, 255] + # TODO(filip): maybe use viewer.log_annotation_context to log class colors for detections + + cam = cam_kind_from_sensor_kind(args.camera.kind) + viewer.log_rect( + f"{args.camera.board_socket.name}/transform/{cam}/Detection", + self._rect_from_detection(det), + rect_format=RectFormat.XYXY, + color=color, + label=label, + ) + + def _rect_from_detection(self, detection: _Detection) -> List[int]: + return [ + *detection.bottom_right, + *detection.top_left, + ] + + +def cam_kind_from_frame_type(dtype: dai.RawImgFrame.Type) -> str: + """Returns camera kind string for given dai.RawImgFrame.Type.""" + return "mono_cam" if dtype == dai.RawImgFrame.Type.RAW8 else "color_cam" + + +def cam_kind_from_sensor_kind(kind: dai.CameraSensorType) -> str: + """Returns camera kind string for given sensor type.""" + return "mono_cam" if kind == dai.CameraSensorType.MONO else "color_cam" diff --git a/rerun_py/depthai_viewer/_backend/sdk_callbacks.py b/rerun_py/depthai_viewer/_backend/sdk_callbacks.py deleted file mode 100644 index 812de04af44e..000000000000 --- a/rerun_py/depthai_viewer/_backend/sdk_callbacks.py +++ /dev/null @@ -1,174 +0,0 @@ -from typing import Callable, List, Optional, Tuple - -import cv2 -import depthai as dai -import numpy as np -from ahrs.filters import Mahony -from depthai_sdk.classes.packets import ( - DepthPacket, - DetectionPacket, - FramePacket, - IMUPacket, - # PointcloudPacket, - TwoStagePacket, - _Detection, -) -from numpy.typing import NDArray - -import depthai_viewer as viewer -from depthai_viewer._backend import classification_labels -from depthai_viewer._backend.store import Store -from depthai_viewer._backend.topic import Topic -from depthai_viewer.components.rect2d import RectFormat - - -class EntityPath: - LEFT_PINHOLE_CAMERA = "mono/camera/left_mono" - LEFT_CAMERA_IMAGE = "mono/camera/left_mono/Left mono" - RIGHT_PINHOLE_CAMERA = "mono/camera/right_mono" - RIGHT_CAMERA_IMAGE = "mono/camera/right_mono/Right mono" - RGB_PINHOLE_CAMERA = "color/camera/rgb" - RGB_CAMERA_IMAGE = "color/camera/rgb/Color camera" - - DETECTIONS = "color/camera/rgb/Detections" - DETECTION = "color/camera/rgb/Detection" - - RGB_CAMERA_TRANSFORM = "color/camera" - MONO_CAMERA_TRANSFORM = "mono/camera" - - -class SdkCallbacks: - store: Store - ahrs: Mahony - _get_camera_intrinsics: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] - - def __init__(self, store: Store): - viewer.init("Depthai Viewer") - viewer.connect() - self.store = store - self.ahrs = Mahony(frequency=100) - self.ahrs.Q = np.array([1, 0, 0, 0], dtype=np.float64) - - def set_camera_intrinsics_getter( - self, camera_intrinsics_getter: Callable[[dai.CameraBoardSocket, int, int], NDArray[np.float32]] - ) -> None: - self._get_camera_intrinsics = camera_intrinsics_getter - - def on_imu(self, packet: IMUPacket) -> None: - for data in packet.data: - gyro: dai.IMUReportGyroscope = data.gyroscope - accel: dai.IMUReportAccelerometer = data.acceleroMeter - mag: dai.IMUReportMagneticField = data.magneticField - # TODO(filip): Move coordinate mapping to sdk - self.ahrs.Q = self.ahrs.updateIMU( - self.ahrs.Q, np.array([gyro.z, gyro.x, gyro.y]), np.array([accel.z, accel.x, accel.y]) - ) - if Topic.ImuData not in self.store.subscriptions: - return - viewer.log_imu([accel.z, accel.x, accel.y], [gyro.z, gyro.x, gyro.y], self.ahrs.Q, [mag.x, mag.y, mag.z]) - - def on_color_frame(self, frame: FramePacket) -> None: - # Always log pinhole cam and pose (TODO(filip): move somewhere else or not) - if Topic.ColorImage not in self.store.subscriptions: - return - viewer.log_rigid3(EntityPath.RGB_CAMERA_TRANSFORM, child_from_parent=([0, 0, 0], self.ahrs.Q), xyz="RDF") - h, w, _ = frame.frame.shape - viewer.log_pinhole( - EntityPath.RGB_PINHOLE_CAMERA, - child_from_parent=self._get_camera_intrinsics(dai.CameraBoardSocket.RGB, w, h), - width=w, - height=h, - ) - viewer.log_image(EntityPath.RGB_CAMERA_IMAGE, cv2.cvtColor(frame.frame, cv2.COLOR_BGR2RGB)) - - def on_left_frame(self, frame: FramePacket) -> None: - if Topic.LeftMono not in self.store.subscriptions: - return - h, w = frame.frame.shape - viewer.log_rigid3(EntityPath.MONO_CAMERA_TRANSFORM, child_from_parent=([0, 0, 0], self.ahrs.Q), xyz="RDF") - viewer.log_pinhole( - EntityPath.LEFT_PINHOLE_CAMERA, - child_from_parent=self._get_camera_intrinsics(dai.CameraBoardSocket.LEFT, w, h), - width=w, - height=h, - ) - viewer.log_image(EntityPath.LEFT_CAMERA_IMAGE, frame.frame) - - def on_right_frame(self, frame: FramePacket) -> None: - if Topic.RightMono not in self.store.subscriptions: - return - h, w = frame.frame.shape - viewer.log_rigid3(EntityPath.MONO_CAMERA_TRANSFORM, child_from_parent=([0, 0, 0], self.ahrs.Q), xyz="RDF") - viewer.log_pinhole( - EntityPath.RIGHT_PINHOLE_CAMERA, - child_from_parent=self._get_camera_intrinsics(dai.CameraBoardSocket.RIGHT, w, h), - width=w, - height=h, - ) - viewer.log_image(EntityPath.RIGHT_CAMERA_IMAGE, frame.frame) - - def on_stereo_frame(self, frame: DepthPacket) -> None: - if Topic.DepthImage not in self.store.subscriptions: - return - depth_frame = frame.frame - path = EntityPath.RGB_PINHOLE_CAMERA + "/Depth" - if not self.store.pipeline_config or not self.store.pipeline_config.depth: - # Essentially impossible to get here - return - depth = self.store.pipeline_config.depth - if depth.align == dai.CameraBoardSocket.LEFT: - path = EntityPath.LEFT_PINHOLE_CAMERA + "/Depth" - elif depth.align == dai.CameraBoardSocket.RIGHT: - path = EntityPath.RIGHT_PINHOLE_CAMERA + "/Depth" - viewer.log_depth_image(path, depth_frame, meter=1e3) - - def on_detections(self, packet: DetectionPacket) -> None: - rects, colors, labels = self._detections_to_rects_colors_labels(packet) - viewer.log_rects(EntityPath.DETECTIONS, rects, rect_format=RectFormat.XYXY, colors=colors, labels=labels) - - def _detections_to_rects_colors_labels( - self, packet: DetectionPacket, omz_labels: Optional[List[str]] = None - ) -> Tuple[List[List[int]], List[List[int]], List[str]]: - rects = [] - colors = [] - labels = [] - for detection in packet.detections: - rects.append(self._rect_from_detection(detection)) - colors.append([0, 255, 0]) - label: str = detection.label - # Open model zoo models output label index - if omz_labels is not None and isinstance(label, int): - label += omz_labels[label] - label += ", " + str(int(detection.img_detection.confidence * 100)) + "%" - labels.append(label) - return rects, colors, labels - - def on_yolo_packet(self, packet: DetectionPacket) -> None: - rects, colors, labels = self._detections_to_rects_colors_labels(packet) - viewer.log_rects(EntityPath.DETECTIONS, rects=rects, colors=colors, labels=labels, rect_format=RectFormat.XYXY) - - def on_age_gender_packet(self, packet: TwoStagePacket) -> None: - for det, rec in zip(packet.detections, packet.nnData): - age = int(float(np.squeeze(np.array(rec.getLayerFp16("age_conv3")))) * 100) - gender = np.squeeze(np.array(rec.getLayerFp16("prob"))) - gender_str = "Woman" if gender[0] > gender[1] else "Man" - label = f"{gender_str}, {age}" - color = [255, 0, 0] if gender[0] > gender[1] else [0, 0, 255] - # TODO(filip): maybe use viewer.log_annotation_context to log class colors for detections - viewer.log_rect( - EntityPath.DETECTION, - self._rect_from_detection(det), - rect_format=RectFormat.XYXY, - color=color, - label=label, - ) - - def _rect_from_detection(self, detection: _Detection) -> List[int]: - return [ - *detection.bottom_right, - *detection.top_left, - ] - - def on_mobilenet_ssd_packet(self, packet: DetectionPacket) -> None: - rects, colors, labels = self._detections_to_rects_colors_labels(packet, classification_labels.MOBILENET_LABELS) - viewer.log_rects(EntityPath.DETECTIONS, rects=rects, colors=colors, labels=labels, rect_format=RectFormat.XYXY) diff --git a/rerun_py/depthai_viewer/_backend/store.py b/rerun_py/depthai_viewer/_backend/store.py index c810a2eb8d78..c5756b56c089 100644 --- a/rerun_py/depthai_viewer/_backend/store.py +++ b/rerun_py/depthai_viewer/_backend/store.py @@ -1,53 +1,27 @@ -from enum import Enum -from typing import Callable, Dict, List, Optional, Tuple +from typing import List, Optional from depthai_viewer._backend.device_configuration import PipelineConfiguration from depthai_viewer._backend.topic import Topic -class Action(Enum): - UPDATE_PIPELINE = 0 - SELECT_DEVICE = 1 - GET_SUBSCRIPTIONS = 2 - SET_SUBSCRIPTIONS = 3 - GET_PIPELINE = 4 - RESET = 5 # When anything bad happens, a reset occurs (like closing ws connection) - GET_AVAILABLE_DEVICES = 6 +class Store: + _pipeline_config: Optional[PipelineConfiguration] = None + _subscriptions: List[Topic] = [] + def set_pipeline_config(self, pipeline_config: PipelineConfiguration) -> None: + self._pipeline_config = pipeline_config -class Store: - pipeline_config: Optional[PipelineConfiguration] = PipelineConfiguration() - subscriptions: List[Topic] = [] - on_update_pipeline: Optional[Callable[[bool], Tuple[bool, Dict[str, str]]]] = None - on_select_device: Optional[Callable[[str], Tuple[bool, Dict[str, str]]]] = None - on_reset: Optional[Callable[[], Tuple[bool, Dict[str, str]]]] = None - - def handle_action(self, action: Action, **kwargs) -> Tuple[bool, Dict[str, str]]: # type: ignore[no-untyped-def] - if action == Action.UPDATE_PIPELINE: - if kwargs.get("pipeline_config", None): - if self.on_update_pipeline: - old_pipeline_config = self.pipeline_config - self.pipeline_config = kwargs.get("pipeline_config") - success, message = self.on_update_pipeline(kwargs.get("runtime_only")) # type: ignore[arg-type] - if success: - return success, message - self.pipeline_config = old_pipeline_config - return success, message - elif action == Action.SELECT_DEVICE: - device_id = kwargs.get("device_id", None) - if device_id is not None: - self.device_id = device_id - if self.on_select_device: - return self.on_select_device(device_id) - elif action == Action.GET_SUBSCRIPTIONS: - return self.subscriptions # type: ignore[return-value] - elif action == Action.SET_SUBSCRIPTIONS: - self.subscriptions = kwargs.get("subscriptions", []) - elif action == Action.GET_PIPELINE: - return self.pipeline_config # type: ignore[return-value] - elif action == Action.RESET: - if self.on_reset: - self.pipeline_config = None - self.subscriptions = [] - return self.on_reset() - return False, {"message": f"Action: {action} didn't succeed!"} + def set_subscriptions(self, subscriptions: List[Topic]) -> None: + self._subscriptions = subscriptions + + def reset(self) -> None: + self._pipeline_config = None + self._subscriptions = [] + + @property + def pipeline_config(self) -> Optional[PipelineConfiguration]: + return self._pipeline_config + + @property + def subscriptions(self) -> List[Topic]: + return self._subscriptions diff --git a/rerun_py/depthai_viewer/components/tensor.py b/rerun_py/depthai_viewer/components/tensor.py index 284bc7eb6b78..4d054120b60b 100644 --- a/rerun_py/depthai_viewer/components/tensor.py +++ b/rerun_py/depthai_viewer/components/tensor.py @@ -1,6 +1,7 @@ from __future__ import annotations import uuid +from enum import Enum from typing import Final, Iterable, Union, cast import numpy as np @@ -18,8 +19,14 @@ "TensorArray", "TensorType", "TensorDType", + "ImageEncoding", ] + +class ImageEncoding(Enum): + NV12 = "NV12" + + TensorDType = Union[ np.uint8, np.uint16, @@ -53,6 +60,7 @@ class TensorArray(pa.ExtensionArray): # type: ignore[misc] def from_numpy( array: npt.NDArray[TensorDType], + encoding: ImageEncoding | None = None, names: Iterable[str | None] | None = None, meaning: bindings.TensorDataMeaning = None, meter: float | None = None, @@ -67,14 +75,14 @@ def from_numpy( shape = pa.array(shape_data, type=TensorType.storage_type["shape"].type) if array.dtype == np.uint8: - data_inner = pa.array([array.flatten().tobytes()], type=pa.binary()) + data_inner = pa.array([memoryview(array).tobytes()], type=pa.binary()) # type: ignore[arg-type] else: data_storage = pa.array(array.flatten()) data_inner = pa.ListArray.from_arrays(pa.array([0, len(data_storage)]), data_storage) data = build_dense_union( TensorType.storage_type["data"].type, - discriminant=DTYPE_MAP[cast(TensorDType, array.dtype.type)], + discriminant=DTYPE_MAP[cast(TensorDType, array.dtype.type)] if encoding is None else encoding.name, child=data_inner, ) @@ -99,13 +107,7 @@ def from_numpy( meter = pa.array([meter], mask=[False], type=pa.float32()) storage = pa.StructArray.from_arrays( - [ - tensor_id, - shape, - data, - meaning, - meter, - ], + [tensor_id, shape, data, meaning, meter], fields=list(TensorType.storage_type), ).cast(TensorType.storage_type) storage.validate(full=True) diff --git a/rerun_py/depthai_viewer/components/xlink_stats.py b/rerun_py/depthai_viewer/components/xlink_stats.py index 65070f9cfe02..19d4c92bc097 100644 --- a/rerun_py/depthai_viewer/components/xlink_stats.py +++ b/rerun_py/depthai_viewer/components/xlink_stats.py @@ -13,11 +13,12 @@ class XLinkStats(pa.ExtensionArray): # type: ignore[misc] def create( total_bytes_written: int, total_bytes_read: int, + timestamp: float, ) -> "XLinkStats": """Build XLinkStats data from total bytes written and read.""" return pa.StructArray.from_arrays( # type: ignore[no-any-return] fields=XLinkStatsType.storage_type, - arrays=[[total_bytes_written], [total_bytes_read]], + arrays=[[total_bytes_written], [total_bytes_read], [timestamp]], mask=pa.array([False, False], type=pa.bool_()), ) diff --git a/rerun_py/depthai_viewer/log/image.py b/rerun_py/depthai_viewer/log/image.py index 9e435678cd42..a243f8a6dbef 100644 --- a/rerun_py/depthai_viewer/log/image.py +++ b/rerun_py/depthai_viewer/log/image.py @@ -4,6 +4,7 @@ import numpy.typing as npt from depthai_viewer import bindings +from depthai_viewer.components.tensor import ImageEncoding from depthai_viewer.log.error_utils import _send_warning from depthai_viewer.log.log_decorator import log_decorator from depthai_viewer.log.tensor import Tensor, _log_tensor, _to_numpy @@ -12,6 +13,7 @@ "log_image", "log_depth_image", "log_segmentation_image", + "log_encoded_image", ] @@ -201,3 +203,46 @@ def log_segmentation_image( ext=ext, timeless=timeless, ) + + +@log_decorator +def log_encoded_image( + entity_path: str, + image: npt.ArrayLike, + width: int, + height: int, + encoding: ImageEncoding, + *, + ext: Optional[Dict[str, Any]] = None, + timeless: bool = False, +) -> None: + """ + Log an image encoded as a string. + + The image should be encoded as a string, e.g. using base64. + + Parameters + ---------- + entity_path: + Path to the image in the space hierarchy. + image: + A [Tensor][rerun.log.tensor.Tensor] representing the image to log. + width: + The (RGB) width of the image. + height: + The (RGB) height of the image. + encoding: + The encoding of the image. + ext: + Optional dictionary of extension components. See [rerun.log_extension_components][] + timeless: + If true, the image will be timeless (default: False). + """ + image = np.array(image, copy=False) + tensor_height = height + if encoding == ImageEncoding.NV12: + tmp_height = height * 1.5 + if tmp_height % 2 != 0: + _send_warning(f"Invalid height {height} for NV12 encoded image: height * 1.5 must be divisible by 2.", 1) + tensor_height = int(tmp_height) + _log_tensor(entity_path, image.reshape(tensor_height, width), ext=ext, timeless=timeless, encoding=encoding) diff --git a/rerun_py/depthai_viewer/log/tensor.py b/rerun_py/depthai_viewer/log/tensor.py index e79351f517d5..bac2da9e41b5 100644 --- a/rerun_py/depthai_viewer/log/tensor.py +++ b/rerun_py/depthai_viewer/log/tensor.py @@ -5,7 +5,7 @@ from depthai_viewer import bindings from depthai_viewer.components.instance import InstanceArray -from depthai_viewer.components.tensor import TensorArray +from depthai_viewer.components.tensor import ImageEncoding, TensorArray from depthai_viewer.log.error_utils import _send_warning from depthai_viewer.log.extension_components import _add_extension_components from depthai_viewer.log.log_decorator import log_decorator @@ -85,6 +85,7 @@ def _log_tensor( meaning: bindings.TensorDataMeaning = None, ext: Optional[Dict[str, Any]] = None, timeless: bool = False, + encoding: Optional[ImageEncoding] = None, ) -> None: """Log a general tensor, perhaps with named dimensions.""" @@ -127,7 +128,7 @@ def _log_tensor( instanced: Dict[str, Any] = {} splats: Dict[str, Any] = {} - instanced["rerun.tensor"] = TensorArray.from_numpy(tensor, names, meaning, meter) + instanced["rerun.tensor"] = TensorArray.from_numpy(tensor, encoding, names, meaning, meter) if ext: _add_extension_components(instanced, splats, ext, None) diff --git a/rerun_py/depthai_viewer/log/xlink_stats.py b/rerun_py/depthai_viewer/log/xlink_stats.py index 7b6f2cae04fb..7529f595edfa 100644 --- a/rerun_py/depthai_viewer/log/xlink_stats.py +++ b/rerun_py/depthai_viewer/log/xlink_stats.py @@ -6,7 +6,7 @@ @log_decorator -def log_xlink_stats(total_bytes_written: int, total_bytes_read: int) -> None: +def log_xlink_stats(total_bytes_written: int, total_bytes_read: int, timestamp: float) -> None: """ Log an XLink throughput statistic. @@ -16,7 +16,11 @@ def log_xlink_stats(total_bytes_written: int, total_bytes_read: int) -> None: Total bytes written to the XLink by the host. total_bytes_read: Total bytes read from the XLink by the host. + timestamp: + Timestamp of the XLink throughput statistic in s since epoch. """ instanced: Dict[str, Any] = {} - instanced["rerun.xlink_stats"] = XLinkStats.create(total_bytes_written, total_bytes_read) # type: ignore[arg-type] + instanced["rerun.xlink_stats"] = XLinkStats.create( + total_bytes_written, total_bytes_read, timestamp + ) # type: ignore[arg-type] bindings.log_arrow_msg("xlink_stats", components=instanced, timeless=False) diff --git a/rerun_py/depthai_viewer/requirements.txt b/rerun_py/depthai_viewer/requirements.txt new file mode 100644 index 000000000000..6951baf051db --- /dev/null +++ b/rerun_py/depthai_viewer/requirements.txt @@ -0,0 +1,10 @@ +numpy>=1.23 +pyarrow==10.0.1 +setuptools +ahrs +# depthai_sdk conflicts with depthai, so it's installed seperatelly in __main__.py +depthai==2.22.0.0 +websockets +pydantic +deprecated +pyturbojpeg==1.7.1 diff --git a/rerun_py/pyproject.toml b/rerun_py/pyproject.toml index 11fcfcfe33ea..c679bf18914e 100644 --- a/rerun_py/pyproject.toml +++ b/rerun_py/pyproject.toml @@ -3,17 +3,7 @@ build-backend = "maturin" requires = ["maturin>=0.14.0,<0.15"] [project] -dependencies = [ - "deprecated", - "numpy>=1.23", - "pyarrow==10.0.1", - "setuptools", - "ahrs", - "depthai", # Atm python3 -m pip install --extra-index-url https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local/ depthai==2.21.2.0.dev0+5004cc71950e6786feb36147b7919e146f4ef8da --force-reinstall # is required - "depthai-sdk>=1.10.1", - "websockets", - "pydantic", -] +dependencies = ["deprecated", "numpy>=1.23", "pyarrow==10.0.1", "setuptools"] classifiers = [ "Programming Language :: Rust", "Programming Language :: Python :: Implementation :: CPython", diff --git a/rerun_py/src/python_bridge.rs b/rerun_py/src/python_bridge.rs index 1152e5e5dbbd..b61271edec13 100644 --- a/rerun_py/src/python_bridge.rs +++ b/rerun_py/src/python_bridge.rs @@ -180,6 +180,7 @@ fn depthai_viewer_bindings(py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(log_image_file, m)?)?; m.add_function(wrap_pyfunction!(log_cleared, m)?)?; m.add_function(wrap_pyfunction!(log_arrow_msg, m)?)?; + m.add_function(wrap_pyfunction!(version, m)?)?; Ok(()) } @@ -249,9 +250,9 @@ fn time(timeless: bool) -> TimePoint { // ---------------------------------------------------------------------------- #[pyfunction] -fn main(py: Python<'_>, argv: Vec, sys_exe: String) -> PyResult { +fn main(py: Python<'_>, argv: Vec, sys_exe: String, venv_site: String) -> PyResult { let build_info = re_build_info::build_info!(); - let call_src = depthai_viewer::CallSource::Python(python_version(py), sys_exe); + let call_src = depthai_viewer::CallSource::Python(python_version(py), sys_exe, venv_site); tokio::runtime::Builder::new_multi_thread() .enable_all() .build() @@ -315,6 +316,11 @@ fn connect(addr: Option) -> PyResult<()> { Ok(()) } +#[pyfunction] +fn version() -> PyResult { + Ok(python_session().version()) +} + #[must_use = "the tokio_runtime guard must be kept alive while using tokio"] #[cfg(feature = "web_viewer")] fn enter_tokio_runtime() -> tokio::runtime::EnterGuard<'static> { diff --git a/rerun_py/src/python_session.rs b/rerun_py/src/python_session.rs index 2e15b0de6304..21b62498a5fd 100644 --- a/rerun_py/src/python_session.rs +++ b/rerun_py/src/python_session.rs @@ -113,7 +113,7 @@ impl PythonSession { sys_exe: SysExePath, ) { self.recording_meta_data.recording_source = - re_log_types::RecordingSource::PythonSdk(python_version, sys_exe); + re_log_types::RecordingSource::PythonSdk(python_version, sys_exe, String::new()); } /// Check if logging is enabled on this `Session`. @@ -121,6 +121,11 @@ impl PythonSession { self.enabled } + pub fn version(&self) -> String { + let build_info = re_build_info::build_info!(); + build_info.version.to_string() + } + /// Enable or disable logging on this `Session`. pub fn set_enabled(&mut self, enabled: bool) { self.enabled = enabled; diff --git a/scripts/check_shader.py b/scripts/check_shader.py new file mode 100644 index 000000000000..19a130424c20 --- /dev/null +++ b/scripts/check_shader.py @@ -0,0 +1,61 @@ +import argparse +import os +import re +import subprocess + +resolved_paths = set() + + +def resolve_import(includes, path): + shader_text = "" + + for include in includes: + include_shader_path = include.replace("#import", "").replace("<", "").replace(">", "").strip() + # Get absolute path to the included file + include_file_path = os.path.abspath(os.path.join(os.path.dirname(path), include_shader_path)) + + # Check if the file has already been included + if include_file_path in resolved_paths: + continue + + # Mark this file as included + resolved_paths.add(include_file_path) + + with open(include_file_path, "r") as include_file: + include_file_text = include_file.read() + + # Recursively resolve imports in the included file + nested_includes = re.findall(r"#import <.*>", include_file_text) + resolved_nested_includes = resolve_import(nested_includes, include_file_path) + + # Concatenate the resolved includes and the content of the included file + shader_text += resolved_nested_includes + include_file_text + + return shader_text + + +# Parse which shader to check +parser = argparse.ArgumentParser(description="Check shader for errors.") +parser.add_argument("shader", metavar="shader", type=str, nargs=1, help="shader to check") + +args = parser.parse_args() + +shader = args.shader[0] + +with open(shader, "r") as shader_file: + shader_text = shader_file.read() + + +# Get all the imports: #import +includes = re.findall(r"#import <.*>", shader_text) +resolved_includes = resolve_import(includes, shader) + +# Add the resolved includes to the original shader text +shader_text = resolved_includes + shader_text +includes = re.findall(r"#import <.*>", shader_text) +# Remove original import statements +for include in includes: + shader_text = shader_text.replace(include, "") + + +subprocess.run(["naga", "--stdin-file-path", shader], input=shader_text.encode()) diff --git a/scripts/version_util.py b/scripts/version_util.py index 0a57bcc071ff..dfb8a63fa531 100755 --- a/scripts/version_util.py +++ b/scripts/version_util.py @@ -22,7 +22,7 @@ # A regex to match the version number in Cargo.toml as SemVer, e.g., 1.2.3-alpha.0 CARGO_VERSION_REGEX: Final = r"^version\s*=\s*\"(.+)\"$" -VERSION_TAG_REGEX: Final = r"^v(?P([0-9]+)\.([0-9]+)\.([0-9]+))$" +VERSION_TAG_REGEX: Final = r"^v(?P([0-9]+)\.([0-9]+)\.([0-9]+)(?:-(alpha|beta)(?:\.([0-9]+))?)?)$" def get_cargo_version(cargo_toml: str) -> semver.VersionInfo: