diff --git a/AGENTS.md b/AGENTS.md index 2a7295118d..7a1dd5542b 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -44,3 +44,7 @@ - Convert the effectful API to a Next.js handler with `apiToHandler(ApiLive)` from `@/lib/server` and export the returned `handler`—avoid calling `runPromise` inside route files. - On the server, run effects through `EffectRuntime.runPromise` from `@/lib/server`, typically after `provideOptionalAuth`, so cookies and per-request context are attached automatically. - On the client, use `useEffectQuery`/`useEffectMutation` from `@/lib/EffectRuntime`; they already bind the managed runtime and tracing so you shouldn't call `EffectRuntime.run*` directly in components. + +## Code Formatting +- Always format code before completing work: run `pnpm format` for TypeScript/JavaScript and `cargo fmt` for Rust. +- Run these commands regularly during development and always at the end of a coding session to ensure consistent formatting. diff --git a/CLAUDE.md b/CLAUDE.md index 7617ae5098..d13e14a888 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -414,3 +414,11 @@ Transcription/AI Enhancement → Database Storage - **Monorepo Guide**: Turborepo documentation - **Effect System**: Used in web-backend packages - **Media Processing**: FFmpeg documentation for Rust bindings + +## Code Formatting + +Always format code before completing work: +- **TypeScript/JavaScript**: Run `pnpm format` to format all code with Biome +- **Rust**: Run `cargo fmt` to format all Rust code with rustfmt + +These commands should be run regularly during development and always at the end of a coding session to ensure consistent formatting across the codebase. diff --git a/apps/desktop/src-tauri/src/camera.rs b/apps/desktop/src-tauri/src/camera.rs index 4160fa2f57..7263105ff6 100644 --- a/apps/desktop/src-tauri/src/camera.rs +++ b/apps/desktop/src-tauri/src/camera.rs @@ -28,13 +28,9 @@ static TOOLBAR_HEIGHT: f32 = 56.0; // also defined in Typescript // Basically poor man's MSAA static GPU_SURFACE_SCALE: u32 = 4; -#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize, Type)] -#[serde(rename_all = "lowercase")] -pub enum CameraPreviewSize { - #[default] - Sm, - Lg, -} +pub const MIN_CAMERA_SIZE: f32 = 150.0; +pub const MAX_CAMERA_SIZE: f32 = 600.0; +pub const DEFAULT_CAMERA_SIZE: f32 = 230.0; #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize, Type)] #[serde(rename_all = "lowercase")] @@ -45,13 +41,27 @@ pub enum CameraPreviewShape { Full, } -#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize, Type)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Type)] pub struct CameraPreviewState { - size: CameraPreviewSize, + size: f32, shape: CameraPreviewShape, mirrored: bool, } +impl Default for CameraPreviewState { + fn default() -> Self { + Self { + size: DEFAULT_CAMERA_SIZE, + shape: CameraPreviewShape::default(), + mirrored: false, + } + } +} + +fn clamp_size(size: f32) -> f32 { + size.max(MIN_CAMERA_SIZE).min(MAX_CAMERA_SIZE) +} + pub struct CameraPreviewManager { store: Result>, String>, preview: Option, @@ -70,17 +80,22 @@ impl CameraPreviewManager { /// Get the current state of the camera window. pub fn get_state(&self) -> anyhow::Result { - Ok(self + let mut state: CameraPreviewState = self .store .as_ref() .map_err(|err| anyhow!("{err}"))? .get("state") - .and_then(|v| serde_json::from_value(v).ok().unwrap_or_default()) - .unwrap_or_default()) + .and_then(|v| serde_json::from_value(v).ok()) + .unwrap_or_default(); + + state.size = clamp_size(state.size); + Ok(state) } /// Save the current state of the camera window. - pub fn set_state(&self, state: CameraPreviewState) -> anyhow::Result<()> { + pub fn set_state(&self, mut state: CameraPreviewState) -> anyhow::Result<()> { + state.size = clamp_size(state.size); + let store = self.store.as_ref().map_err(|err| anyhow!("{err}"))?; store.set("state", serde_json::to_value(&state)?); store.save()?; @@ -607,16 +622,17 @@ impl Renderer { /// Update the uniforms which hold the camera preview state fn update_state_uniforms(&self, state: &CameraPreviewState) { + let clamped_size = clamp_size(state.size); + let normalized_size = + (clamped_size - MIN_CAMERA_SIZE) / (MAX_CAMERA_SIZE - MIN_CAMERA_SIZE); + let state_uniforms = StateUniforms { shape: match state.shape { CameraPreviewShape::Round => 0.0, CameraPreviewShape::Square => 1.0, CameraPreviewShape::Full => 2.0, }, - size: match state.size { - CameraPreviewSize::Sm => 0.0, - CameraPreviewSize::Lg => 1.0, - }, + size: normalized_size, mirrored: if state.mirrored { 1.0 } else { 0.0 }, _padding: 0.0, }; @@ -664,11 +680,7 @@ fn resize_window( ) -> tauri::Result<(u32, u32)> { trace!("CameraPreview/resize_window"); - let base: f32 = if state.size == CameraPreviewSize::Sm { - 230.0 - } else { - 400.0 - }; + let base = clamp_size(state.size); let window_width = if state.shape == CameraPreviewShape::Full { if aspect >= 1.0 { base * aspect } else { base } } else { diff --git a/apps/desktop/src-tauri/src/camera.wgsl b/apps/desktop/src-tauri/src/camera.wgsl index a55893f3fa..5caa8f2f18 100644 --- a/apps/desktop/src-tauri/src/camera.wgsl +++ b/apps/desktop/src-tauri/src/camera.wgsl @@ -124,7 +124,8 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4 { } else if (shape == 1.0) { // Square shape with enhanced corner anti-aliasing - let corner_radius = select(0.1, 0.12, size == 1.0); + // Interpolate corner radius based on normalized size (0-1) + let corner_radius = mix(0.10, 0.14, size); let abs_uv = abs(center_uv); let corner_pos = abs_uv - (1.0 - corner_radius); let corner_dist = length(max(corner_pos, vec2(0.0, 0.0))); @@ -138,7 +139,8 @@ fn fs_main(in: VertexOutput) -> @location(0) vec4 { } else if (shape == 2.0) { // Full shape with aspect ratio-corrected rounded corners let window_aspect = window_uniforms.window_width / window_uniforms.window_height; - let corner_radius = select(0.08, 0.1, size == 1.0); // radius based on size (8% for small, 10% for large) + // Interpolate corner radius based on normalized size (0-1) + let corner_radius = mix(0.08, 0.12, size); let abs_uv = abs(center_uv); let corner_pos = abs_uv - (1.0 - corner_radius); diff --git a/apps/desktop/src-tauri/src/deeplink_actions.rs b/apps/desktop/src-tauri/src/deeplink_actions.rs index 86b6245cf8..dbd90f667f 100644 --- a/apps/desktop/src-tauri/src/deeplink_actions.rs +++ b/apps/desktop/src-tauri/src/deeplink_actions.rs @@ -6,10 +6,7 @@ use std::path::{Path, PathBuf}; use tauri::{AppHandle, Manager, Url}; use tracing::trace; -use crate::{ - App, ArcLock, apply_camera_input, apply_mic_input, recording::StartRecordingInputs, - windows::ShowCapWindow, -}; +use crate::{App, ArcLock, recording::StartRecordingInputs, windows::ShowCapWindow}; #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] @@ -119,8 +116,8 @@ impl DeepLinkAction { } => { let state = app.state::>(); - apply_camera_input(app.clone(), state.clone(), camera).await?; - apply_mic_input(state.clone(), mic_label).await?; + crate::set_camera_input(app.clone(), state.clone(), camera).await?; + crate::set_mic_input(state.clone(), mic_label).await?; let capture_target: ScreenCaptureTarget = match capture_mode { CaptureMode::Screen(name) => cap_recording::screen_capture::list_displays() diff --git a/apps/desktop/src-tauri/src/lib.rs b/apps/desktop/src-tauri/src/lib.rs index ec2a2ae79b..284f8b3222 100644 --- a/apps/desktop/src-tauri/src/lib.rs +++ b/apps/desktop/src-tauri/src/lib.rs @@ -331,13 +331,6 @@ impl App { #[specta::specta] #[instrument(skip(state))] async fn set_mic_input(state: MutableState<'_, App>, label: Option) -> Result<(), String> { - apply_mic_input(state, label).await -} - -pub(crate) async fn apply_mic_input( - state: MutableState<'_, App>, - label: Option, -) -> Result<(), String> { let (mic_feed, studio_handle, current_label) = { let app = state.read().await; let handle = match app.current_recording() { @@ -421,14 +414,6 @@ async fn set_camera_input( app_handle: AppHandle, state: MutableState<'_, App>, id: Option, -) -> Result<(), String> { - apply_camera_input(app_handle, state, id).await -} - -pub(crate) async fn apply_camera_input( - app_handle: AppHandle, - state: MutableState<'_, App>, - id: Option, ) -> Result<(), String> { let app = state.read().await; let camera_feed = app.camera_feed.clone(); @@ -459,12 +444,39 @@ pub(crate) async fn apply_camera_input( .map_err(|e| e.to_string())?; } Some(id) => { - camera_feed - .ask(feeds::camera::SetInput { id: id.clone() }) - .await - .map_err(|e| e.to_string())? - .await - .map_err(|e| e.to_string())?; + let mut attempts = 0; + loop { + attempts += 1; + + // We first ask the actor to set the input + // This returns a future that resolves when the camera is actually ready + let request = camera_feed + .ask(feeds::camera::SetInput { id: id.clone() }) + .await + .map_err(|e| e.to_string()); + + let result = match request { + Ok(future) => future.await.map_err(|e| e.to_string()), + Err(e) => Err(e), + }; + + match result { + Ok(_) => break, + Err(e) => { + if attempts >= 3 { + return Err(format!( + "Failed to initialize camera after {} attempts: {}", + attempts, e + )); + } + warn!( + "Failed to set camera input (attempt {}): {}. Retrying...", + attempts, e + ); + tokio::time::sleep(Duration::from_millis(500)).await; + } + } + } ShowCapWindow::Camera .show(&app_handle) @@ -741,11 +753,19 @@ enum CurrentRecordingTarget { }, } +#[derive(Serialize, Type)] +#[serde(rename_all = "camelCase")] +pub enum RecordingStatus { + Pending, + Recording, +} + #[derive(Serialize, Type)] #[serde(rename_all = "camelCase")] struct CurrentRecording { target: CurrentRecordingTarget, mode: RecordingMode, + status: RecordingStatus, } #[tauri::command] @@ -756,10 +776,14 @@ async fn get_current_recording( ) -> Result>, ()> { let state = state.read().await; - let (mode, capture_target) = match &state.recording_state { + let (mode, capture_target, status) = match &state.recording_state { RecordingState::None => return Ok(JsonValue::new(&None)), - RecordingState::Pending { mode, target } => (*mode, target), - RecordingState::Active(inner) => (inner.mode(), inner.capture_target()), + RecordingState::Pending { mode, target } => (*mode, target, RecordingStatus::Pending), + RecordingState::Active(inner) => ( + inner.mode(), + inner.capture_target(), + RecordingStatus::Recording, + ), }; let target = match capture_target { @@ -777,7 +801,11 @@ async fn get_current_recording( }, }; - Ok(JsonValue::new(&Some(CurrentRecording { target, mode }))) + Ok(JsonValue::new(&Some(CurrentRecording { + target, + mode, + status, + }))) } #[derive(Serialize, Type, tauri_specta::Event, Clone)] @@ -2558,8 +2586,8 @@ pub async fn run(recording_logging_handle: LoggingHandle, logs_dir: PathBuf) { .flatten() .unwrap_or_default(); - let _ = apply_mic_input(app.state(), settings.mic_name).await; - let _ = apply_camera_input(app.clone(), app.state(), settings.camera_id).await; + let _ = set_mic_input(app.state(), settings.mic_name).await; + let _ = set_camera_input(app.clone(), app.state(), settings.camera_id).await; let _ = start_recording(app.clone(), app.state(), { recording::StartRecordingInputs { @@ -2635,6 +2663,7 @@ pub async fn run(recording_logging_handle: LoggingHandle, logs_dir: PathBuf) { .camera_feed .ask(feeds::camera::RemoveInput) .await; + app_state.selected_mic_label = None; app_state.selected_camera_id = None; app_state.camera_in_use = false; @@ -2695,11 +2724,18 @@ pub async fn run(recording_logging_handle: LoggingHandle, logs_dir: PathBuf) { CapWindowId::Camera => { let app = app.clone(); tokio::spawn(async move { - app.state::>() - .write() - .await - .camera_preview - .on_window_close(); + let state = app.state::>(); + let mut app_state = state.write().await; + + app_state.camera_preview.on_window_close(); + + if !app_state.is_recording_active_or_pending() { + let _ = app_state + .camera_feed + .ask(feeds::camera::RemoveInput) + .await; + app_state.camera_in_use = false; + } }); } _ => {} diff --git a/apps/desktop/src-tauri/src/recording.rs b/apps/desktop/src-tauri/src/recording.rs index 3a2ea626d8..1c821f87b2 100644 --- a/apps/desktop/src-tauri/src/recording.rs +++ b/apps/desktop/src-tauri/src/recording.rs @@ -47,7 +47,6 @@ use crate::{ App, CurrentRecordingChanged, MutableState, NewStudioRecordingAdded, RecordingState, RecordingStopped, VideoUploadInfo, api::PresignedS3PutRequestMethod, - apply_camera_input, apply_mic_input, audio::AppSounds, auth::AuthStore, create_screenshot, @@ -56,7 +55,6 @@ use crate::{ }, open_external_link, presets::PresetsStore, - recording_settings::RecordingSettingsStore, thumbnails::*, upload::{ InstantMultipartUpload, build_video_meta, compress_image, create_or_get_video, upload_video, @@ -351,41 +349,6 @@ pub enum RecordingAction { UpgradeRequired, } -async fn restore_inputs_from_store_if_missing(app: &AppHandle, state: &MutableState<'_, App>) { - let guard = state.read().await; - let recording_active = !matches!(guard.recording_state, RecordingState::None); - let needs_mic = guard.selected_mic_label.is_none(); - let needs_camera = guard.selected_camera_id.is_none(); - drop(guard); - - if recording_active || (!needs_mic && !needs_camera) { - return; - } - - let settings = match RecordingSettingsStore::get(app) { - Ok(Some(settings)) => settings, - Ok(None) => return, - Err(err) => { - warn!(%err, "Failed to load recording settings while restoring inputs"); - return; - } - }; - - if let Some(mic) = settings.mic_name.clone().filter(|_| needs_mic) { - match apply_mic_input(app.state(), Some(mic)).await { - Err(err) => warn!(%err, "Failed to restore microphone input"), - Ok(_) => {} - } - } - - if let Some(camera) = settings.camera_id.clone().filter(|_| needs_camera) { - match apply_camera_input(app.clone(), app.state(), Some(camera)).await { - Err(err) => warn!(%err, "Failed to restore camera input"), - Ok(_) => {} - } - } -} - #[tauri::command] #[specta::specta] #[tracing::instrument(name = "recording", skip_all)] @@ -394,28 +357,10 @@ pub async fn start_recording( state_mtx: MutableState<'_, App>, inputs: StartRecordingInputs, ) -> Result { - restore_inputs_from_store_if_missing(&app, &state_mtx).await; - if !matches!(state_mtx.read().await.recording_state, RecordingState::None) { return Err("Recording already in progress".to_string()); } - let has_camera_selected = { - let guard = state_mtx.read().await; - guard.selected_camera_id.is_some() - }; - let camera_window_open = CapWindowId::Camera.get(&app).is_some(); - let should_open_camera_preview = - matches!(inputs.mode, RecordingMode::Instant) && has_camera_selected && !camera_window_open; - - if should_open_camera_preview { - ShowCapWindow::Camera - .show(&app) - .await - .map_err(|err| error!("Failed to show camera preview window: {err}")) - .ok(); - } - let id = uuid::Uuid::new_v4().to_string(); let general_settings = GeneralSettingsStore::get(&app).ok().flatten(); let general_settings = general_settings.as_ref(); @@ -600,16 +545,57 @@ pub async fn start_recording( let inputs = inputs.clone(); async move { fail!("recording::spawn_actor"); - let mut state = state_mtx.write().await; - use kameo::error::SendError; - let camera_feed = match state.camera_feed.ask(camera::Lock).await { - Ok(lock) => Some(Arc::new(lock)), - Err(SendError::HandlerError(camera::LockFeedError::NoInput)) => None, + // Initialize camera if selected but not active + let (camera_feed_actor, selected_camera_id) = { + let state = state_mtx.read().await; + (state.camera_feed.clone(), state.selected_camera_id.clone()) + }; + + let camera_lock_result = camera_feed_actor.ask(camera::Lock).await; + + let camera_feed_lock = match camera_lock_result { + Ok(lock) => Some(lock), + Err(SendError::HandlerError(camera::LockFeedError::NoInput)) => { + if let Some(id) = selected_camera_id { + info!( + "Camera selected but not initialized, initializing: {:?}", + id + ); + match camera_feed_actor + .ask(camera::SetInput { id: id.clone() }) + .await + { + Ok(fut) => match fut.await { + Ok(_) => match camera_feed_actor.ask(camera::Lock).await { + Ok(lock) => Some(lock), + Err(e) => { + warn!("Failed to lock camera after initialization: {}", e); + None + } + }, + Err(e) => { + warn!("Failed to initialize camera: {}", e); + None + } + }, + Err(e) => { + warn!("Failed to ask SetInput: {}", e); + None + } + } + } else { + None + } + } Err(e) => return Err(anyhow!(e.to_string())), }; + let mut state = state_mtx.write().await; + + let camera_feed = camera_feed_lock.map(Arc::new); + state.camera_in_use = camera_feed.is_some(); #[cfg(target_os = "macos")] @@ -1087,6 +1073,9 @@ async fn handle_recording_end( } let _ = app.mic_feed.ask(microphone::RemoveInput).await; let _ = app.camera_feed.ask(camera::RemoveInput).await; + app.selected_mic_label = None; + app.selected_camera_id = None; + app.camera_in_use = false; if let Some(win) = CapWindowId::Camera.get(&handle) { win.close().ok(); } diff --git a/apps/desktop/src-tauri/src/windows.rs b/apps/desktop/src-tauri/src/windows.rs index 3148ad0b85..3d567e5797 100644 --- a/apps/desktop/src-tauri/src/windows.rs +++ b/apps/desktop/src-tauri/src/windows.rs @@ -21,13 +21,14 @@ use tokio::sync::RwLock; use tracing::{debug, error, instrument, warn}; use crate::{ - App, ArcLock, RequestScreenCapturePrewarm, apply_camera_input, apply_mic_input, fake_window, + App, ArcLock, RequestScreenCapturePrewarm, fake_window, general_settings::{self, AppTheme, GeneralSettingsStore}, permissions, - recording_settings::{RecordingSettingsStore, RecordingTargetMode}, + recording_settings::RecordingTargetMode, target_select_overlay::WindowFocusManager, window_exclusion::WindowExclusion, }; +use cap_recording::feeds; #[cfg(target_os = "macos")] const DEFAULT_TRAFFIC_LIGHTS_INSET: LogicalPosition = LogicalPosition::new(12.0, 12.0); @@ -282,8 +283,6 @@ impl ShowCapWindow { crate::platform::set_window_level(window.as_ref().window(), 50); } - restore_recording_inputs_if_idle(app); - #[cfg(target_os = "macos")] { let app_handle = app.clone(); @@ -502,6 +501,13 @@ impl ShowCapWindow { let window = window_builder.build()?; if enable_native_camera_preview { + if let Some(id) = state.selected_camera_id.clone() + && !state.camera_in_use + { + let _ = state.camera_feed.ask(feeds::camera::SetInput { id }).await; + state.camera_in_use = true; + } + let camera_feed = state.camera_feed.clone(); if let Err(err) = state .camera_preview @@ -799,48 +805,6 @@ impl ShowCapWindow { } } -fn restore_recording_inputs_if_idle(app: &AppHandle) { - let settings = match RecordingSettingsStore::get(app) { - Ok(Some(settings)) => settings, - Ok(None) => return, - Err(err) => { - warn!(%err, "Failed to load recording settings while restoring inputs"); - return; - } - }; - - let mic_name = settings.mic_name.clone(); - let camera_id = settings.camera_id.clone(); - - if mic_name.is_none() && camera_id.is_none() { - return; - } - - let app_handle = app.clone(); - let state = app_handle.state::>(); - let app_state = state.inner().clone(); - - tauri::async_runtime::spawn(async move { - if app_state.read().await.is_recording_active_or_pending() { - return; - } - - if let Some(mic) = mic_name { - match apply_mic_input(app_handle.state(), Some(mic)).await { - Err(err) => warn!(%err, "Failed to restore microphone input"), - Ok(_) => {} - } - } - - if let Some(camera) = camera_id { - match apply_camera_input(app_handle.clone(), app_handle.state(), Some(camera)).await { - Err(err) => warn!(%err, "Failed to restore camera input"), - Ok(_) => {} - } - } - }); -} - #[cfg(target_os = "macos")] fn add_traffic_lights(window: &WebviewWindow, controls_inset: Option>) { use crate::platform::delegates; diff --git a/apps/desktop/src/routes/(window-chrome)/settings/experimental.tsx b/apps/desktop/src/routes/(window-chrome)/settings/experimental.tsx index 5c8e47264d..1f9f8371fe 100644 --- a/apps/desktop/src/routes/(window-chrome)/settings/experimental.tsx +++ b/apps/desktop/src/routes/(window-chrome)/settings/experimental.tsx @@ -1,3 +1,4 @@ +import { type } from "@tauri-apps/plugin-os"; import { createResource, Show } from "solid-js"; import { createStore } from "solid-js/store"; @@ -62,14 +63,16 @@ function Inner(props: { initialStore: GeneralSettingsStore | null }) { handleChange("custom_cursor_capture2", value) } /> - - handleChange("enableNativeCameraPreview", value) - } - /> + {type() !== "windows" && ( + + handleChange("enableNativeCameraPreview", value) + } + /> + )} }) { const [state, setState] = makePersisted( - createStore({ - size: "sm", + createStore({ + size: CAMERA_DEFAULT_SIZE, shape: "round", mirrored: false, }), { name: "cameraWindowState" }, ); - createEffect(() => commands.setCameraPreviewState(state)); + const [isResizing, setIsResizing] = createSignal(false); + const [resizeStart, setResizeStart] = createSignal({ + size: 0, + x: 0, + y: 0, + corner: "", + }); + + createEffect(() => { + const clampedSize = Math.max( + CAMERA_MIN_SIZE, + Math.min(CAMERA_MAX_SIZE, state.size), + ); + if (clampedSize !== state.size) { + setState("size", clampedSize); + } + commands.setCameraPreviewState(state); + }); const [cameraPreviewReady] = createResource(() => commands.awaitCameraPreviewReady(), ); + const setCamera = createCameraMutation(); + + const scale = () => { + const normalized = + (state.size - CAMERA_MIN_SIZE) / (CAMERA_MAX_SIZE - CAMERA_MIN_SIZE); + return 0.7 + normalized * 0.3; + }; + + const handleResizeStart = (corner: string) => (e: MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + setIsResizing(true); + setResizeStart({ size: state.size, x: e.clientX, y: e.clientY, corner }); + }; + + const handleResizeMove = (e: MouseEvent) => { + if (!isResizing()) return; + const start = resizeStart(); + const deltaX = e.clientX - start.x; + const deltaY = e.clientY - start.y; + + let delta = 0; + if (start.corner.includes("e") && start.corner.includes("s")) { + delta = Math.max(deltaX, deltaY); + } else if (start.corner.includes("e") && start.corner.includes("n")) { + delta = Math.max(deltaX, -deltaY); + } else if (start.corner.includes("w") && start.corner.includes("s")) { + delta = Math.max(-deltaX, deltaY); + } else if (start.corner.includes("w") && start.corner.includes("n")) { + delta = Math.max(-deltaX, -deltaY); + } else if (start.corner.includes("e")) { + delta = deltaX; + } else if (start.corner.includes("w")) { + delta = -deltaX; + } else if (start.corner.includes("s")) { + delta = deltaY; + } else if (start.corner.includes("n")) { + delta = -deltaY; + } + + const newSize = Math.max( + CAMERA_MIN_SIZE, + Math.min(CAMERA_MAX_SIZE, start.size + delta), + ); + setState("size", newSize); + }; + + const handleResizeEnd = () => { + setIsResizing(false); + }; + + createEffect(() => { + if (isResizing()) { + window.addEventListener("mousemove", handleResizeMove); + window.addEventListener("mouseup", handleResizeEnd); + onCleanup(() => { + window.removeEventListener("mousemove", handleResizeMove); + window.removeEventListener("mouseup", handleResizeEnd); + }); + } + }); + return (
}) {
-
- void getCurrentWindow().close()}> +
+ getCurrentWindow().close()}> = CAMERA_PRESET_LARGE} onClick={() => { - setState("size", (s) => (s === "sm" ? "lg" : "sm")); + setState( + "size", + state.size < CAMERA_PRESET_LARGE + ? CAMERA_PRESET_LARGE + : CAMERA_PRESET_SMALL, + ); }} > @@ -133,6 +226,27 @@ function NativeCameraPreviewPage(props: { disconnected: Accessor }) {
+
+
+
+
+ {/* The camera preview is rendered in Rust by wgpu */}
@@ -163,14 +277,22 @@ function LegacyCameraPreviewPage(props: { disconnected: Accessor }) { const { rawOptions } = useRecordingOptions(); const [state, setState] = makePersisted( - createStore({ - size: "sm", + createStore({ + size: CAMERA_DEFAULT_SIZE, shape: "round", mirrored: false, }), { name: "cameraWindowState" }, ); + const [isResizing, setIsResizing] = createSignal(false); + const [resizeStart, setResizeStart] = createSignal({ + size: 0, + x: 0, + y: 0, + corner: "", + }); + const [latestFrame, setLatestFrame] = createLazySignal<{ width: number; data: ImageData; @@ -224,6 +346,66 @@ function LegacyCameraPreviewPage(props: { disconnected: Accessor }) { ws.close(); }); + const scale = () => { + const normalized = + (state.size - CAMERA_MIN_SIZE) / (CAMERA_MAX_SIZE - CAMERA_MIN_SIZE); + return 0.7 + normalized * 0.3; + }; + + const handleResizeStart = (corner: string) => (e: MouseEvent) => { + e.preventDefault(); + e.stopPropagation(); + setIsResizing(true); + setResizeStart({ size: state.size, x: e.clientX, y: e.clientY, corner }); + }; + + const handleResizeMove = (e: MouseEvent) => { + if (!isResizing()) return; + const start = resizeStart(); + const deltaX = e.clientX - start.x; + const deltaY = e.clientY - start.y; + + let delta = 0; + if (start.corner.includes("e") && start.corner.includes("s")) { + delta = Math.max(deltaX, deltaY); + } else if (start.corner.includes("e") && start.corner.includes("n")) { + delta = Math.max(deltaX, -deltaY); + } else if (start.corner.includes("w") && start.corner.includes("s")) { + delta = Math.max(-deltaX, deltaY); + } else if (start.corner.includes("w") && start.corner.includes("n")) { + delta = Math.max(-deltaX, -deltaY); + } else if (start.corner.includes("e")) { + delta = deltaX; + } else if (start.corner.includes("w")) { + delta = -deltaX; + } else if (start.corner.includes("s")) { + delta = deltaY; + } else if (start.corner.includes("n")) { + delta = -deltaY; + } + + const newSize = Math.max( + CAMERA_MIN_SIZE, + Math.min(CAMERA_MAX_SIZE, start.size + delta), + ); + setState("size", newSize); + }; + + const handleResizeEnd = () => { + setIsResizing(false); + }; + + createEffect(() => { + if (isResizing()) { + window.addEventListener("mousemove", handleResizeMove); + window.addEventListener("mouseup", handleResizeEnd); + onCleanup(() => { + window.removeEventListener("mousemove", handleResizeMove); + window.removeEventListener("mouseup", handleResizeEnd); + }); + } + }); + const [windowSize] = createResource( () => [ @@ -236,7 +418,7 @@ function LegacyCameraPreviewPage(props: { disconnected: Accessor }) { const monitor = await currentMonitor(); const BAR_HEIGHT = 56; - const base = size === "sm" ? 230 : 400; + const base = Math.max(CAMERA_MIN_SIZE, Math.min(CAMERA_MAX_SIZE, size)); const aspect = frameWidth && frameHeight ? frameWidth / frameHeight : 1; const windowWidth = shape === "full" ? (aspect >= 1 ? base * aspect : base) : base; @@ -265,6 +447,8 @@ function LegacyCameraPreviewPage(props: { disconnected: Accessor }) { let cameraCanvasRef: HTMLCanvasElement | undefined; + const setCamera = createCameraMutation(); + createEffect( on( () => rawOptions.cameraLabel, @@ -288,14 +472,22 @@ function LegacyCameraPreviewPage(props: { disconnected: Accessor }) {
-
- void getCurrentWindow().close()}> +
+ getCurrentWindow().close()}> = CAMERA_PRESET_LARGE} onClick={() => { - setState("size", (s) => (s === "sm" ? "lg" : "sm")); + setState( + "size", + state.size < CAMERA_PRESET_LARGE + ? CAMERA_PRESET_LARGE + : CAMERA_PRESET_SMALL, + ); }} > @@ -323,6 +515,26 @@ function LegacyCameraPreviewPage(props: { disconnected: Accessor }) {
+
+
+
+
}) { const aspectRatio = latestFrame().data.width / latestFrame().data.height; - const base = windowSize.latest?.size ?? 0; - const winWidth = windowSize.latest?.windowWidth ?? base; - const winHeight = windowSize.latest?.windowHeight ?? base; + // Use state.size directly for immediate feedback + const base = state.size; + + // Replicate window size logic synchronously for the canvas + const winWidth = + state.shape === "full" + ? aspectRatio >= 1 + ? base * aspectRatio + : base + : base; + const winHeight = + state.shape === "full" + ? aspectRatio >= 1 + ? base + : base / aspectRatio + : base; if (state.shape === "full") { return { @@ -400,10 +625,12 @@ function CameraLoadingState() { ); } -function cameraBorderRadius(state: CameraWindow.State) { +function cameraBorderRadius(state: CameraWindowState) { if (state.shape === "round") return "9999px"; - if (state.size === "sm") return "3rem"; - return "4rem"; + const normalized = + (state.size - CAMERA_MIN_SIZE) / (CAMERA_MAX_SIZE - CAMERA_MIN_SIZE); + const radius = 3 + normalized * 1.5; + return `${radius}rem`; } function CameraDisconnectedOverlay() { diff --git a/apps/desktop/src/routes/editor/Timeline/ClipTrack.tsx b/apps/desktop/src/routes/editor/Timeline/ClipTrack.tsx index af712758a1..0d1fcc13fd 100644 --- a/apps/desktop/src/routes/editor/Timeline/ClipTrack.tsx +++ b/apps/desktop/src/routes/editor/Timeline/ClipTrack.tsx @@ -30,6 +30,69 @@ import { useSegmentWidth, } from "./Track"; +const CANVAS_HEIGHT = 52; +const WAVEFORM_MIN_DB = -60; +const WAVEFORM_SAMPLE_STEP = 0.1; +const WAVEFORM_CONTROL_STEP = 0.05; +const WAVEFORM_PADDING_SECONDS = 0.3; + +function gainToScale(gain?: number) { + if (!Number.isFinite(gain)) return 1; + const value = gain as number; + if (value <= WAVEFORM_MIN_DB) return 0; + return Math.max(0, 1 + value / -WAVEFORM_MIN_DB); +} + +function createWaveformPath( + segment: { start: number; end: number }, + waveform?: number[], +) { + if (typeof Path2D === "undefined") return; + if (!waveform || waveform.length === 0) return; + + const duration = Math.max(segment.end - segment.start, WAVEFORM_SAMPLE_STEP); + if (!Number.isFinite(duration) || duration <= 0) return; + + const path = new Path2D(); + path.moveTo(0, 1); + + const amplitudeAt = (index: number) => { + const sample = waveform[index]; + const db = + typeof sample === "number" && Number.isFinite(sample) + ? sample + : WAVEFORM_MIN_DB; + const clamped = Math.max(db, WAVEFORM_MIN_DB); + const amplitude = 1 + clamped / -WAVEFORM_MIN_DB; + return Math.min(Math.max(amplitude, 0), 1); + }; + + const controlStep = Math.min(WAVEFORM_CONTROL_STEP / duration, 0.25); + + for ( + let time = segment.start; + time <= segment.end + WAVEFORM_SAMPLE_STEP; + time += WAVEFORM_SAMPLE_STEP + ) { + const index = Math.floor(time * 10); + const normalizedX = (index / 10 - segment.start) / duration; + const prevX = + (index / 10 - WAVEFORM_SAMPLE_STEP - segment.start) / duration; + const y = 1 - amplitudeAt(index); + const prevY = 1 - amplitudeAt(index - 1); + const cpX1 = prevX + controlStep / 2; + const cpX2 = normalizedX - controlStep / 2; + path.bezierCurveTo(cpX1, prevY, cpX2, y, normalizedX, y); + } + + const closingX = + (segment.end + WAVEFORM_PADDING_SECONDS - segment.start) / duration; + path.lineTo(closingX, 1); + path.closePath(); + + return path; +} + function formatTime(totalSeconds: number): string { const hours = Math.floor(totalSeconds / 3600); const minutes = Math.floor((totalSeconds % 3600) / 60); @@ -48,115 +111,61 @@ function WaveformCanvas(props: { systemWaveform?: number[]; micWaveform?: number[]; segment: { start: number; end: number }; - secsPerPixel: number; }) { const { project } = useEditorContext(); - - let canvas: HTMLCanvasElement | undefined; const { width } = useSegmentContext(); - const { secsPerPixel } = useTimelineContext(); - - const render = ( - ctx: CanvasRenderingContext2D, - h: number, - waveform: number[], - color: string, - gain = 0, - ) => { - const maxAmplitude = h; - - // yellow please - ctx.fillStyle = color; - ctx.beginPath(); - - const step = 0.05 / secsPerPixel(); - - ctx.moveTo(0, h); - - const norm = (w: number) => { - const ww = Number.isFinite(w) ? w : -60; - return 1.0 - Math.max(ww + gain, -60) / -60; - }; - - for ( - let segmentTime = props.segment.start; - segmentTime <= props.segment.end + 0.1; - segmentTime += 0.1 - ) { - const index = Math.floor(segmentTime * 10); - const xTime = index / 10; - - const currentDb = - typeof waveform[index] === "number" ? waveform[index] : -60; - const amplitude = norm(currentDb) * maxAmplitude; - - const x = (xTime - props.segment.start) / secsPerPixel(); - const y = h - amplitude; - - const prevX = (xTime - 0.1 - props.segment.start) / secsPerPixel(); - const prevDb = - typeof waveform[index - 1] === "number" ? waveform[index - 1] : -60; - const prevAmplitude = norm(prevDb) * maxAmplitude; - const prevY = h - prevAmplitude; - - const cpX1 = prevX + step / 2; - const cpX2 = x - step / 2; - - ctx.bezierCurveTo(cpX1, prevY, cpX2, y, x, y); - } - - ctx.lineTo( - (props.segment.end + 0.3 - props.segment.start) / secsPerPixel(), - h, - ); + const segmentRange = createMemo(() => ({ + start: props.segment.start, + end: props.segment.end, + })); + const micPath = createMemo(() => + createWaveformPath(segmentRange(), props.micWaveform), + ); + const systemPath = createMemo(() => + createWaveformPath(segmentRange(), props.systemWaveform), + ); - ctx.closePath(); - ctx.fill(); - }; + let canvas: HTMLCanvasElement | undefined; - function renderWaveforms() { + createEffect(() => { if (!canvas) return; const ctx = canvas.getContext("2d"); if (!ctx) return; - const w = width(); - if (w <= 0) return; - - const h = canvas.height; - canvas.width = w; - ctx.clearRect(0, 0, w, h); - - if (props.micWaveform) - render( - ctx, - h, - props.micWaveform, - "rgba(255,255,255,0.4)", - project.audio.micVolumeDb, - ); - - if (props.systemWaveform) - render( - ctx, - h, - props.systemWaveform, - "rgba(255,150,0,0.5)", - project.audio.systemVolumeDb, - ); - } + const canvasWidth = Math.max(width(), 1); + canvas.width = canvasWidth; + const canvasHeight = canvas.height; + ctx.clearRect(0, 0, canvasWidth, canvasHeight); + + const drawPath = ( + path: Path2D | undefined, + color: string, + gain?: number, + ) => { + if (!path) return; + const scale = gainToScale(gain); + if (scale <= 0) return; + ctx.save(); + ctx.translate(0, -1); + ctx.scale(1, scale); + ctx.translate(0, 1); + ctx.scale(canvasWidth, canvasHeight); + ctx.fillStyle = color; + ctx.fill(path); + ctx.restore(); + }; - createEffect(() => { - renderWaveforms(); + drawPath(micPath(), "rgba(255,255,255,0.4)", project.audio.micVolumeDb); + drawPath(systemPath(), "rgba(255,150,0,0.5)", project.audio.systemVolumeDb); }); return ( { canvas = el; - renderWaveforms(); }} class="absolute inset-0 w-full h-full pointer-events-none" - height={52} + height={CANVAS_HEIGHT} /> ); } @@ -184,6 +193,17 @@ export function ClipTrack( const segments = (): Array => project.timeline?.segments ?? [{ start: 0, end: duration(), timescale: 1 }]; + const segmentOffsets = createMemo(() => { + const segs = segments(); + const offsets: number[] = new Array(segs.length); + let sum = 0; + for (let idx = 0; idx < segs.length; idx++) { + offsets[idx] = sum; + sum += (segs[idx].end - segs[idx].start) / segs[idx].timescale; + } + return offsets; + }); + function onHandleReleased() { const { transform } = editorState.timeline; @@ -210,17 +230,7 @@ export function ClipTrack( initialStart: number; }>(null); - const prefixOffsets = createMemo(() => { - const segs = segments(); - const out: number[] = new Array(segs.length); - let sum = 0; - for (let k = 0; k < segs.length; k++) { - out[k] = sum; - sum += (segs[k].end - segs[k].start) / segs[k].timescale; - } - return out; - }); - const prevDuration = createMemo(() => prefixOffsets()[i()] ?? 0); + const prevDuration = createMemo(() => segmentOffsets()[i()] ?? 0); const relativeSegment = createMemo(() => { const ds = startHandleDrag(); @@ -481,7 +491,6 @@ export function ClipTrack( micWaveform={micWaveform()} systemWaveform={systemAudioWaveform()} segment={segment} - secsPerPixel={secsPerPixel()} /> )} diff --git a/apps/desktop/src/routes/editor/context.ts b/apps/desktop/src/routes/editor/context.ts index f9e3d1142a..05c880a9e8 100644 --- a/apps/desktop/src/routes/editor/context.ts +++ b/apps/desktop/src/routes/editor/context.ts @@ -6,7 +6,6 @@ import { createContextProvider } from "@solid-primitives/context"; import { trackStore } from "@solid-primitives/deep"; import { createEventListener } from "@solid-primitives/event-listener"; import { createUndoHistory } from "@solid-primitives/history"; -import { debounce } from "@solid-primitives/scheduled"; import { createQuery, skipToken } from "@tanstack/solid-query"; import { type Accessor, @@ -15,6 +14,7 @@ import { createResource, createSignal, on, + onCleanup, } from "solid-js"; import { createStore, produce, reconcile, unwrap } from "solid-js/store"; @@ -51,6 +51,7 @@ export const OUTPUT_SIZE = { }; export const MAX_ZOOM_IN = 3; +const PROJECT_SAVE_DEBOUNCE_MS = 250; export type RenderState = | { type: "starting" } @@ -295,14 +296,62 @@ export const [EditorContextProvider, useEditorContext] = createContextProvider( }, }; + let projectSaveTimeout: number | undefined; + let saveInFlight = false; + let shouldResave = false; + let hasPendingProjectSave = false; + + const flushProjectConfig = async () => { + if (!hasPendingProjectSave && !saveInFlight) return; + if (saveInFlight) { + if (hasPendingProjectSave) { + shouldResave = true; + } + return; + } + saveInFlight = true; + shouldResave = false; + hasPendingProjectSave = false; + try { + await commands.setProjectConfig(serializeProjectConfiguration(project)); + } catch (error) { + console.error("Failed to persist project config", error); + } finally { + saveInFlight = false; + if (shouldResave) { + shouldResave = false; + void flushProjectConfig(); + } + } + }; + + const scheduleProjectConfigSave = () => { + hasPendingProjectSave = true; + if (projectSaveTimeout) { + clearTimeout(projectSaveTimeout); + } + projectSaveTimeout = window.setTimeout(() => { + projectSaveTimeout = undefined; + void flushProjectConfig(); + }, PROJECT_SAVE_DEBOUNCE_MS); + }; + + onCleanup(() => { + if (projectSaveTimeout) { + clearTimeout(projectSaveTimeout); + projectSaveTimeout = undefined; + } + void flushProjectConfig(); + }); + createEffect( on( () => { trackStore(project); }, - debounce(() => { - commands.setProjectConfig(serializeProjectConfiguration(project)); - }), + () => { + scheduleProjectConfigSave(); + }, { defer: true }, ), ); diff --git a/apps/desktop/src/routes/in-progress-recording.tsx b/apps/desktop/src/routes/in-progress-recording.tsx index 351cc6604d..b4cbd007ad 100644 --- a/apps/desktop/src/routes/in-progress-recording.tsx +++ b/apps/desktop/src/routes/in-progress-recording.tsx @@ -22,7 +22,6 @@ import { Show, } from "solid-js"; import { createStore, produce } from "solid-js/store"; -import createPresence from "solid-presence"; import { authStore } from "~/store"; import { createTauriEventListener } from "~/utils/createEventListener"; import { @@ -32,12 +31,14 @@ import { import { handleRecordingResult } from "~/utils/recording"; import type { CameraInfo, + CurrentRecording, DeviceOrModelID, RecordingInputKind, } from "~/utils/tauri"; import { commands, events } from "~/utils/tauri"; type State = + | { variant: "initializing" } | { variant: "countdown"; from: number; current: number } | { variant: "recording" } | { variant: "paused" } @@ -59,7 +60,7 @@ const FAKE_WINDOW_BOUNDS_NAME = "recording-controls-interactive-area"; export default function () { const [state, setState] = createSignal( window.COUNTDOWN === 0 - ? { variant: "recording" } + ? { variant: "initializing" } : { variant: "countdown", from: window.COUNTDOWN, @@ -189,6 +190,18 @@ export default function () { } }); + createEffect(() => { + if (state().variant === "initializing") { + const recording = currentRecording.data as CurrentRecording; + if (recording?.status === "recording") { + setDisconnectedInputs({ microphone: false, camera: false }); + setRecordingFailure(null); + setState({ variant: "recording" }); + setStart(Date.now()); + } + } + }); + createTimer( () => { if (state().variant !== "recording") return; @@ -467,7 +480,8 @@ export default function () { }; const adjustedTime = () => { - if (state().variant === "countdown") return 0; + if (state().variant === "countdown" || state().variant === "initializing") + return 0; let t = time() - start(); for (const { pause, resume } of pauseResumes) { if (pause && resume) t -= resume - pause; @@ -502,21 +516,12 @@ export default function () { return MAX_RECORDING_FOR_FREE - adjustedTime(); }; - const [countdownRef, setCountdownRef] = createSignal( - null, - ); - const showCountdown = () => state().variant === "countdown"; - const countdownPresence = createPresence({ - show: showCountdown, - element: countdownRef, - }); - const countdownState = createMemo< - Extract | undefined - >((prev) => { + const isInitializing = () => state().variant === "initializing"; + const isCountdown = () => state().variant === "countdown"; + const countdownCurrent = () => { const s = state(); - if (s.variant === "countdown") return s; - if (prev && countdownPresence.present()) return prev; - }); + return s.variant === "countdown" ? s.current : 0; + }; return (
@@ -540,24 +545,13 @@ export default function () {
-
- - {(state) => ( -
- -
- )} -
+
@@ -622,7 +623,11 @@ export default function () { {canPauseRecording() && ( togglePause.mutate()} title={ state().variant === "paused" @@ -644,7 +649,7 @@ export default function () { )} restartRecording.mutate()} title="Restart recording" aria-label="Restart recording" @@ -652,7 +657,7 @@ export default function () { deleteRecording.mutate()} title="Delete recording" aria-label="Delete recording" @@ -728,49 +733,6 @@ function createAudioInputLevel() { return level; } -function Countdown(props: { from: number; current: number }) { - const [animation, setAnimation] = createSignal(1); - setTimeout(() => setAnimation(0), 10); - - return ( -
-
-
Recording starting...
-
- - - - - - {props.current} - -
-
-
- ); -} - function cameraMatchesSelection( camera: CameraInfo, selected?: DeviceOrModelID | null, diff --git a/apps/desktop/src/utils/tauri.ts b/apps/desktop/src/utils/tauri.ts index 57ce4676f9..05762f6987 100644 --- a/apps/desktop/src/utils/tauri.ts +++ b/apps/desktop/src/utils/tauri.ts @@ -365,8 +365,7 @@ export type Camera = { hide: boolean; mirror: boolean; position: CameraPosition; export type CameraInfo = { device_id: string; model_id: ModelIDType | null; display_name: string } export type CameraPosition = { x: CameraXPosition; y: CameraYPosition } export type CameraPreviewShape = "round" | "square" | "full" -export type CameraPreviewSize = "sm" | "lg" -export type CameraPreviewState = { size: CameraPreviewSize; shape: CameraPreviewShape; mirrored: boolean } +export type CameraPreviewState = { size: number; shape: CameraPreviewShape; mirrored: boolean } export type CameraShape = "square" | "source" export type CameraXPosition = "left" | "center" | "right" export type CameraYPosition = "top" | "bottom" @@ -383,7 +382,7 @@ export type ClipOffsets = { camera?: number; mic?: number; system_audio?: number export type CommercialLicense = { licenseKey: string; expiryDate: number | null; refresh: number; activatedOn: number } export type CornerStyle = "squircle" | "rounded" export type Crop = { position: XY; size: XY } -export type CurrentRecording = { target: CurrentRecordingTarget; mode: RecordingMode } +export type CurrentRecording = { target: CurrentRecordingTarget; mode: RecordingMode; status: RecordingStatus } export type CurrentRecordingChanged = null export type CurrentRecordingTarget = { window: { id: WindowId; bounds: LogicalBounds } } | { screen: { id: DisplayId } } | { area: { screen: DisplayId; bounds: LogicalBounds } } export type CursorAnimationStyle = "slow" | "mellow" | "custom" @@ -456,6 +455,7 @@ export type RecordingMode = "studio" | "instant" export type RecordingOptionsChanged = null export type RecordingSettingsStore = { target: ScreenCaptureTarget | null; micName: string | null; cameraId: DeviceOrModelID | null; mode: RecordingMode | null; systemAudio: boolean; organizationId: string | null } export type RecordingStarted = null +export type RecordingStatus = "pending" | "recording" export type RecordingStopped = null export type RecordingTargetMode = "display" | "window" | "area" export type RenderFrameEvent = { frame_number: number; fps: number; resolution_base: XY } diff --git a/crates/project/src/configuration.rs b/crates/project/src/configuration.rs index c95f318a00..cd3bf369b0 100644 --- a/crates/project/src/configuration.rs +++ b/crates/project/src/configuration.rs @@ -401,8 +401,8 @@ pub enum CursorType { #[derive(Type, Serialize, Deserialize, Clone, Copy, Debug, Default, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub enum CursorAnimationStyle { - #[default] Slow, + #[default] #[serde(alias = "regular", alias = "quick", alias = "rapid", alias = "fast")] Mellow, Custom, diff --git a/crates/recording/examples/camera_stream.rs b/crates/recording/examples/camera_stream.rs deleted file mode 100644 index cfe2765711..0000000000 --- a/crates/recording/examples/camera_stream.rs +++ /dev/null @@ -1,56 +0,0 @@ -use cap_recording::feeds::camera::{self, CameraFeed, DeviceOrModelID}; -use kameo::Actor as _; -use std::time::Duration; -use tokio::{task::JoinHandle, time::Instant}; - -#[tokio::main] -async fn main() { - tracing_subscriber::fmt::init(); - - let camera_info = cap_camera::list_cameras() - .next() - .expect("no cameras detected"); - - println!("Using camera: {}", camera_info.display_name()); - - let camera_feed = CameraFeed::spawn(CameraFeed::default()); - - camera_feed - .ask(camera::SetInput { - id: DeviceOrModelID::from_info(&camera_info), - }) - .await - .expect("failed to request camera") - .await - .expect("failed to initialize camera"); - - let lock = camera_feed.ask(camera::Lock).await.expect("lock failed"); - - let (tx, rx) = flume::bounded(8); - lock.ask(camera::AddSender(tx)) - .await - .expect("add sender failed"); - - let reader: JoinHandle<()> = tokio::spawn(async move { - let start = Instant::now(); - let mut frames = 0usize; - - while start.elapsed() < Duration::from_secs(5) { - match rx.recv_async().await { - Ok(_frame) => { - frames += 1; - } - Err(err) => { - eprintln!("Channel closed: {err}"); - break; - } - } - } - - println!("Captured {frames} frames in 5 seconds"); - }); - - reader.await.expect("reader crashed"); - - drop(lock); -} diff --git a/crates/recording/src/feeds/camera.rs b/crates/recording/src/feeds/camera.rs index a7300ad734..e8baae2948 100644 --- a/crates/recording/src/feeds/camera.rs +++ b/crates/recording/src/feeds/camera.rs @@ -577,6 +577,7 @@ impl Message for CameraFeed { type Reply = (); async fn handle(&mut self, msg: AddSender, _: &mut Context) -> Self::Reply { + debug!("CameraFeed: Adding new sender"); self.senders.push(msg.0); } } @@ -592,8 +593,7 @@ impl Message for CameraFeed { match self.state { State::Locked { .. } | State::Open(OpenState { - connecting: None, - attached: Some(..), + connecting: None, .. }) => { msg.0.send(()).ok(); } @@ -625,6 +625,10 @@ impl Message for CameraFeed { for (i, sender) in self.senders.iter().enumerate() { if let Err(flume::TrySendError::Disconnected(_)) = sender.try_send(msg.0.clone()) { warn!("Camera sender {} disconnected, will be removed", i); + info!( + "Camera sender {} disconnected (rx dropped), removing from list", + i + ); to_remove.push(i); }; } @@ -743,6 +747,10 @@ impl Message for CameraFeed { && connecting.id == msg.id { state.connecting = None; + + for tx in &mut self.on_ready.drain(..) { + tx.send(()).ok(); + } } Ok(()) diff --git a/crates/recording/src/output_pipeline/core.rs b/crates/recording/src/output_pipeline/core.rs index c91af5e677..1c5ff7b8bc 100644 --- a/crates/recording/src/output_pipeline/core.rs +++ b/crates/recording/src/output_pipeline/core.rs @@ -429,7 +429,7 @@ async fn setup_muxer( fn spawn_video_encoder, TVideo: VideoSource>( setup_ctx: &mut SetupCtx, mut video_source: TVideo, - video_rx: mpsc::Receiver, + mut video_rx: mpsc::Receiver, first_tx: oneshot::Sender, stop_token: CancellationToken, muxer: Arc>, @@ -450,53 +450,39 @@ fn spawn_video_encoder, TVideo: V } }); - setup_ctx.tasks().spawn("mux-video", { - let stop_token_on_close = stop_token.clone(); - async move { - use futures::StreamExt; + setup_ctx.tasks().spawn("mux-video", async move { + use futures::StreamExt; - let mut first_tx = Some(first_tx); - let cancelled = stop_token.cancelled_owned(); - tokio::pin!(cancelled); - let mut video_rx = video_rx.fuse(); + let mut first_tx = Some(first_tx); - loop { - tokio::select! { - _ = &mut cancelled => { - break; - } - maybe_frame = video_rx.next() => { - match maybe_frame { - Some(frame) => { - let timestamp = frame.timestamp(); - - if let Some(first_tx) = first_tx.take() { - let _ = first_tx.send(timestamp); - } - - muxer - .lock() - .await - .send_video_frame(frame, timestamp.duration_since(timestamps)) - .map_err(|e| anyhow!("Error queueing video frame: {e}"))?; - } - None => { - warn!( - video_source = %std::any::type_name::(), - "Video mux channel closed before cancellation; cancelling pipeline" - ); - stop_token_on_close.cancel(); - break; - } - } + let res = stop_token + .run_until_cancelled(async { + while let Some(frame) = video_rx.next().await { + let timestamp = frame.timestamp(); + + if let Some(first_tx) = first_tx.take() { + let _ = first_tx.send(timestamp); } + + muxer + .lock() + .await + .send_video_frame(frame, timestamp.duration_since(timestamps)) + .map_err(|e| anyhow!("Error queueing video frame: {e}"))?; } - } - muxer.lock().await.stop(); + info!("mux-video stream ended (rx closed)"); + Ok::<(), anyhow::Error>(()) + }) + .await; - Ok(()) + if res.is_none() { + info!("mux-video cancelled"); } + + muxer.lock().await.stop(); + + Ok(()) }); } diff --git a/crates/recording/src/output_pipeline/ffmpeg.rs b/crates/recording/src/output_pipeline/ffmpeg.rs index 37ec3b8269..8453479de1 100644 --- a/crates/recording/src/output_pipeline/ffmpeg.rs +++ b/crates/recording/src/output_pipeline/ffmpeg.rs @@ -28,8 +28,6 @@ pub struct Mp4Muxer { output: ffmpeg::format::context::Output, video_encoder: Option, audio_encoder: Option, - video_frame_duration: Option, - last_video_ts: Option, } impl Muxer for Mp4Muxer { @@ -48,16 +46,10 @@ impl Muxer for Mp4Muxer { { let mut output = ffmpeg::format::output(&output_path)?; - let (video_encoder, video_frame_duration) = match video_config { - Some(config) => { - let duration = Self::frame_duration(&config); - let encoder = H264Encoder::builder(config) - .build(&mut output) - .context("video encoder")?; - (Some(encoder), Some(duration)) - } - None => (None, None), - }; + let video_encoder = video_config + .map(|video_config| H264Encoder::builder(video_config).build(&mut output)) + .transpose() + .context("video encoder")?; let audio_encoder = audio_config .map(|config| AACEncoder::init(config, &mut output)) @@ -70,8 +62,6 @@ impl Muxer for Mp4Muxer { output, video_encoder, audio_encoder, - video_frame_duration, - last_video_ts: None, }) } @@ -106,19 +96,9 @@ impl VideoMuxer for Mp4Muxer { fn send_video_frame( &mut self, frame: Self::VideoFrame, - mut timestamp: Duration, + timestamp: Duration, ) -> anyhow::Result<()> { if let Some(video_encoder) = self.video_encoder.as_mut() { - if let Some(frame_duration) = self.video_frame_duration { - if let Some(last_ts) = self.last_video_ts - && timestamp <= last_ts - { - timestamp = last_ts + frame_duration; - } - - self.last_video_ts = Some(timestamp); - } - video_encoder.queue_frame(frame.inner, timestamp, &mut self.output)?; } @@ -126,17 +106,6 @@ impl VideoMuxer for Mp4Muxer { } } -impl Mp4Muxer { - fn frame_duration(info: &VideoInfo) -> Duration { - let num = info.frame_rate.numerator().max(1); - let den = info.frame_rate.denominator().max(1); - - let nanos = ((den as u128 * 1_000_000_000u128) / num as u128).max(1); - - Duration::from_nanos(nanos as u64) - } -} - impl AudioMuxer for Mp4Muxer { fn send_audio_frame(&mut self, frame: AudioFrame, timestamp: Duration) -> anyhow::Result<()> { if let Some(audio_encoder) = self.audio_encoder.as_mut() { diff --git a/crates/recording/src/sources/camera.rs b/crates/recording/src/sources/camera.rs index f26a8c5f5c..30ba67392d 100644 --- a/crates/recording/src/sources/camera.rs +++ b/crates/recording/src/sources/camera.rs @@ -7,10 +7,27 @@ use anyhow::anyhow; use cap_media_info::VideoInfo; use futures::{SinkExt, channel::mpsc}; use std::sync::Arc; -use tracing::{error, warn}; pub struct Camera(Arc); +struct LogDrop(T, &'static str); +impl Drop for LogDrop { + fn drop(&mut self) { + tracing::debug!("Dropping {}", self.1); + } +} +impl std::ops::Deref for LogDrop { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} +impl std::ops::DerefMut for LogDrop { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + impl VideoSource for Camera { type Config = Arc; type Frame = FFmpegVideoFrame; @@ -23,45 +40,38 @@ impl VideoSource for Camera { where Self: Sized, { - let (tx, rx) = flume::bounded(8); + let (tx, rx) = flume::bounded(32); feed_lock .ask(camera::AddSender(tx)) .await .map_err(|e| anyhow!("Failed to add camera sender: {e}"))?; - tokio::spawn({ - let feed_lock = feed_lock.clone(); - async move { - let mut receiver = rx; + let mut video_tx = LogDrop(video_tx, "camera_video_tx"); - loop { - match receiver.recv_async().await { - Ok(frame) => { - if let Err(err) = video_tx.send(frame).await { - error!( - ?err, - "Camera pipeline receiver dropped; stopping camera forwarding" - ); - break; - } - } - Err(_) => { - let (tx, new_rx) = flume::bounded(8); - - if let Err(err) = feed_lock.ask(camera::AddSender(tx)).await { - warn!( - ?err, - "Camera sender disconnected and could not be reattached" - ); - break; - } - - receiver = new_rx; + tokio::spawn(async move { + tracing::debug!("Camera source task started"); + loop { + match rx.recv_async().await { + Ok(frame) => { + // tracing::trace!("Sending camera frame"); + if let Err(e) = video_tx.send(frame).await { + tracing::warn!("Failed to send to video pipeline: {e}"); + // If pipeline is closed, we should stop? + // But lets continue to keep rx alive for now to see if it helps, + // or maybe break? + // If we break, we disconnect from CameraFeed. + // If pipeline is closed, we SHOULD disconnect. + break; } } + Err(e) => { + tracing::debug!("Camera feed disconnected (rx closed): {e}"); + break; + } } } + tracing::debug!("Camera source task finished"); }); Ok(Self(feed_lock)) diff --git a/crates/recording/src/studio_recording.rs b/crates/recording/src/studio_recording.rs index 75ef41659c..79a7b19c27 100644 --- a/crates/recording/src/studio_recording.rs +++ b/crates/recording/src/studio_recording.rs @@ -855,7 +855,7 @@ async fn create_segment_pipeline( })) .await .transpose() - .context("microphone pipeline setup")?; + .context("system audio pipeline setup")?; let cursor = custom_cursor_capture .then(move || {