Skip to content

Commit cb4141e

Browse files
irudoySomethingNew71
authored andcommitted
fix(ui): stop chart jitter on play with anchored bucket downsampling
LTTB partitions a slice into N equal buckets *by index* and picks the "best" point per bucket. During cursor-tracked playback the slice itself shifts a few samples each frame, so each bucket gets a slightly different sample set and the chosen peak flickers — visibly so when zoomed far out, where peaks are flattened to single buckets. Replace LTTB in compute_viewport_points with min/max-per-bucket, where bucket boundaries are anchored to absolute time (k × bucket_size from t = 0) instead of the viewport. As the cursor advances, samples slide through a fixed grid and each bucket's contents are invariant to the viewport offset, so peaks stay put. Two points per bucket (min, max) preserve the envelope; the bucket count is halved so total output stays under MAX_CHART_POINTS.
1 parent 660050d commit cb4141e

3 files changed

Lines changed: 108 additions & 56 deletions

File tree

src/app.rs

Lines changed: 32 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,6 @@ pub struct UltraLogApp {
5151
load_receiver: Option<Receiver<LoadResult>>,
5252
/// Current loading state
5353
pub(crate) loading_state: LoadingState,
54-
/// Cache for downsampled chart data
55-
pub(crate) downsample_cache: HashMap<CacheKey, Vec<[f64; 2]>>,
5654
/// Cache for channel min/max values (avoids O(n) scans)
5755
pub(crate) minmax_cache: HashMap<CacheKey, (f64, f64)>,
5856
/// Last X-axis bounds shown by each plot area. Used to slice raw data to
@@ -178,7 +176,6 @@ impl Default for UltraLogApp {
178176
last_drop_time: None,
179177
load_receiver: None,
180178
loading_state: LoadingState::Idle,
181-
downsample_cache: HashMap::new(),
182179
minmax_cache: HashMap::new(),
183180
chart_last_x_bounds: HashMap::new(),
184181
cursor_time: None,
@@ -812,6 +809,26 @@ impl UltraLogApp {
812809
}
813810
}
814811

812+
/// Borrow channel data without copying. Returns an empty slice for invalid
813+
/// indices or computed channels that haven't been evaluated yet. Used in
814+
/// per-frame chart paths where cloning the full channel would be wasteful.
815+
pub fn get_channel_data_ref(&self, file_index: usize, channel_index: usize) -> &[f64] {
816+
let Some(file) = self.files.get(file_index) else {
817+
return &[];
818+
};
819+
let regular_count = file.log.channels.len();
820+
if channel_index < regular_count {
821+
file.get_channel_column(channel_index).unwrap_or(&[])
822+
} else {
823+
let computed_idx = channel_index - regular_count;
824+
self.file_computed_channels
825+
.get(&file_index)
826+
.and_then(|c| c.get(computed_idx))
827+
.and_then(|c| c.cached_data.as_deref())
828+
.unwrap_or(&[])
829+
}
830+
}
831+
815832
/// Get the display name of a channel by index (handles both regular and computed channels)
816833
pub fn get_channel_name(&self, file_index: usize, channel_index: usize) -> String {
817834
if file_index >= self.files.len() {
@@ -853,20 +870,19 @@ impl UltraLogApp {
853870
return Some(cached);
854871
}
855872

856-
// Compute min/max (handles both regular and computed channels)
857-
let data = self.get_channel_data(file_index, channel_index);
858-
859-
if data.is_empty() {
860-
return None;
861-
}
862-
863-
let (min_val, max_val) = data
864-
.iter()
865-
.fold((f64::INFINITY, f64::NEG_INFINITY), |(min, max), &v| {
866-
(min.min(v), max.max(v))
867-
});
873+
// Compute min/max (handles both regular and computed channels).
874+
// Scoped to release the borrow before mutating the cache below.
875+
let (min_val, max_val) = {
876+
let data = self.get_channel_data_ref(file_index, channel_index);
877+
if data.is_empty() {
878+
return None;
879+
}
880+
data.iter()
881+
.fold((f64::INFINITY, f64::NEG_INFINITY), |(min, max), &v| {
882+
(min.min(v), max.max(v))
883+
})
884+
};
868885

869-
// Cache the result
870886
self.minmax_cache.insert(cache_key, (min_val, max_val));
871887
Some((min_val, max_val))
872888
}
@@ -883,28 +899,6 @@ impl UltraLogApp {
883899
self.close_tab(tab_idx);
884900
}
885901

886-
// Clear downsample cache entries for this file and update indices
887-
let mut new_cache = HashMap::new();
888-
for (key, value) in self.downsample_cache.drain() {
889-
if key.file_index == index {
890-
// Skip entries for removed file
891-
continue;
892-
} else if key.file_index > index {
893-
// Update indices for files after the removed one
894-
new_cache.insert(
895-
CacheKey {
896-
file_index: key.file_index - 1,
897-
channel_index: key.channel_index,
898-
plot_area_id: key.plot_area_id,
899-
},
900-
value,
901-
);
902-
} else {
903-
new_cache.insert(key, value);
904-
}
905-
}
906-
self.downsample_cache = new_cache;
907-
908902
// Clear minmax cache entries for this file and update indices
909903
let mut new_minmax_cache = HashMap::new();
910904
for (key, value) in self.minmax_cache.drain() {

src/state.rs

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
//! the application, including loaded files, selected channels, and color palettes.
55
66
use std::path::PathBuf;
7+
use std::sync::OnceLock;
78

89
use crate::parsers::{Channel, EcuType, Log};
910

@@ -84,6 +85,11 @@ pub struct LoadedFile {
8485
/// Cached flag for each channel: true if channel has non-zero data
8586
/// Computed once on load for UI performance
8687
pub channels_with_data: Vec<bool>,
88+
/// Lazy column-major view of `log.data` as `Vec<Vec<f64>>`. Built on first
89+
/// access so the chart hot path can borrow `&[f64]` for a channel instead
90+
/// of re-collecting an owned `Vec<f64>` from the row-major store on every
91+
/// frame.
92+
channel_columns: OnceLock<Vec<Vec<f64>>>,
8793
}
8894

8995
impl LoadedFile {
@@ -103,6 +109,7 @@ impl LoadedFile {
103109
ecu_type,
104110
log,
105111
channels_with_data,
112+
channel_columns: OnceLock::new(),
106113
}
107114
}
108115

@@ -114,6 +121,17 @@ impl LoadedFile {
114121
.copied()
115122
.unwrap_or(false)
116123
}
124+
125+
/// Borrow a regular channel's f64 data without copying. Lazily transposes
126+
/// `log.data` into column-major form on first call.
127+
pub fn get_channel_column(&self, channel_index: usize) -> Option<&[f64]> {
128+
let cols = self.channel_columns.get_or_init(|| {
129+
(0..self.log.channels.len())
130+
.map(|i| self.log.get_channel_data(i))
131+
.collect()
132+
});
133+
cols.get(channel_index).map(Vec::as_slice)
134+
}
117135
}
118136

119137
/// A channel selected for visualization on the chart

src/ui/chart.rs

Lines changed: 58 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -851,35 +851,75 @@ impl UltraLogApp {
851851
channel_index: usize,
852852
viewport: Option<(f64, f64)>,
853853
) -> Option<Vec<[f64; 2]>> {
854+
// Resolve min/max first so the mutable borrow on the cache ends before
855+
// we take immutable borrows on the channel data below.
856+
let (min_y, max_y) = self
857+
.get_channel_min_max(file_index, channel_index)
858+
.unwrap_or((0.0, 1.0));
859+
854860
let file = self.files.get(file_index)?;
855861
let times = file.log.get_times_as_f64();
856-
let data = self.get_channel_data(file_index, channel_index);
862+
let data = self.get_channel_data_ref(file_index, channel_index);
857863
if times.is_empty() || times.len() != data.len() {
858864
return None;
859865
}
860866

861-
let (lo, hi) = match viewport {
867+
let full_lttb = || Self::downsample_lttb(times, data, MAX_CHART_POINTS);
868+
let downsampled = match viewport {
862869
Some((vmin, vmax)) if vmax > vmin => {
870+
// Anchored min/max-per-bucket downsampling. Bucket
871+
// boundaries are at multiples of `bucket_size` from t=0,
872+
// so during cursor-tracked playback samples slide through
873+
// a fixed grid instead of being re-bucketed every frame.
874+
// Without this anchoring, LTTB-by-index re-selects a
875+
// different "best peak" per frame and the curve jitters
876+
// at far zoom-out.
863877
let pad = (vmax - vmin) * 0.1;
864-
let lo_t = vmin - pad;
865-
let hi_t = vmax + pad;
866-
let lo_i = times.partition_point(|&t| t < lo_t).saturating_sub(1);
867-
let hi_i = times
868-
.partition_point(|&t| t <= hi_t)
869-
.saturating_add(1)
870-
.min(times.len());
871-
(lo_i, hi_i.max(lo_i + 1))
878+
let padded_span = (vmax - vmin) + 2.0 * pad;
879+
let n_buckets = (MAX_CHART_POINTS / 2).max(1);
880+
let bucket_size = padded_span / n_buckets as f64;
881+
if bucket_size <= 0.0 {
882+
full_lttb()
883+
} else {
884+
let raw_lo = vmin - pad;
885+
let k_lo = (raw_lo / bucket_size).floor() as i64;
886+
let mut points: Vec<[f64; 2]> = Vec::with_capacity(MAX_CHART_POINTS);
887+
let mut idx = times.partition_point(|&t| t < k_lo as f64 * bucket_size);
888+
for k in 0..n_buckets as i64 {
889+
let bucket_end = (k_lo + k + 1) as f64 * bucket_size;
890+
let mut end_idx = idx;
891+
while end_idx < times.len() && times[end_idx] < bucket_end {
892+
end_idx += 1;
893+
}
894+
if end_idx > idx {
895+
let mut min_i = idx;
896+
let mut max_i = idx;
897+
for i in idx..end_idx {
898+
if data[i] < data[min_i] {
899+
min_i = i;
900+
}
901+
if data[i] > data[max_i] {
902+
max_i = i;
903+
}
904+
}
905+
if min_i == max_i {
906+
points.push([times[min_i], data[min_i]]);
907+
} else if min_i < max_i {
908+
points.push([times[min_i], data[min_i]]);
909+
points.push([times[max_i], data[max_i]]);
910+
} else {
911+
points.push([times[max_i], data[max_i]]);
912+
points.push([times[min_i], data[min_i]]);
913+
}
914+
}
915+
idx = end_idx;
916+
}
917+
points
918+
}
872919
}
873-
_ => (0, times.len()),
920+
_ => full_lttb(),
874921
};
875922

876-
let times_slice = &times[lo..hi];
877-
let data_slice = &data[lo..hi];
878-
let downsampled = Self::downsample_lttb(times_slice, data_slice, MAX_CHART_POINTS);
879-
880-
let (min_y, max_y) = self
881-
.get_channel_min_max(file_index, channel_index)
882-
.unwrap_or((0.0, 1.0));
883923
let range = (max_y - min_y).abs();
884924
// Constant channels (range ≈ 0) get parked at the middle of the
885925
// overlay strip so they remain visible instead of pinning to the

0 commit comments

Comments
 (0)