From 8ae819422946826a1d5ac56a6cb0722fce13bc73 Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 21:25:19 -0600 Subject: [PATCH 01/11] =?UTF-8?q?=F0=9F=A4=96=20fix:=20match=20waveform=20?= =?UTF-8?q?recording=20color=20to=20current=20mode?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Recording overlay border/background uses mode color (plan=blue, exec=purple) - WaveformBars use bg-plan-mode or bg-exec-mode based on mode - Recording text uses mode-light variant for better contrast - VoiceInputButton receives mode prop for consistent styling - Transcribing state remains amber (processing indicator) --- .../components/ChatInput/VoiceInputButton.tsx | 17 ++++++++++-- src/browser/components/ChatInput/index.tsx | 27 ++++++++++++++++--- 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/src/browser/components/ChatInput/VoiceInputButton.tsx b/src/browser/components/ChatInput/VoiceInputButton.tsx index 288cfeaa6..0256e3de4 100644 --- a/src/browser/components/ChatInput/VoiceInputButton.tsx +++ b/src/browser/components/ChatInput/VoiceInputButton.tsx @@ -9,6 +9,7 @@ import { TooltipWrapper, Tooltip } from "../Tooltip"; import { formatKeybind, KEYBINDS } from "@/browser/utils/ui/keybinds"; import { cn } from "@/common/lib/utils"; import type { VoiceInputState } from "@/browser/hooks/useVoiceInput"; +import type { UIMode } from "@/common/types/mode"; interface VoiceInputButtonProps { state: VoiceInputState; @@ -17,11 +18,18 @@ interface VoiceInputButtonProps { requiresSecureContext: boolean; onToggle: () => void; disabled?: boolean; + mode: UIMode; +} + +function getRecordingColorClass(mode: UIMode): string { + return mode === "plan" + ? "text-plan-mode-light animate-pulse" + : "text-exec-mode-light animate-pulse"; } const STATE_CONFIG: Record = { idle: { label: "Voice input", colorClass: "text-muted/50 hover:text-muted" }, - recording: { label: "Stop recording", colorClass: "text-blue-500 animate-pulse" }, + recording: { label: "Stop recording", colorClass: "" }, // handled dynamically transcribing: { label: "Transcribing...", colorClass: "text-amber-500" }, }; @@ -32,6 +40,7 @@ export const VoiceInputButton: React.FC = (props) => { const needsApiKey = !needsHttps && !props.isApiKeySet; const isDisabledReason = needsHttps || needsApiKey; + const stateConfig = STATE_CONFIG[props.state]; const { label, colorClass } = isDisabledReason ? { label: needsHttps @@ -39,7 +48,11 @@ export const VoiceInputButton: React.FC = (props) => { : "Voice input (requires OpenAI API key)", colorClass: "text-muted/50", } - : STATE_CONFIG[props.state]; + : { + label: stateConfig.label, + colorClass: + props.state === "recording" ? getRecordingColorClass(props.mode) : stateConfig.colorClass, + }; const Icon = props.state === "transcribing" ? Loader2 : Mic; const isTranscribing = props.state === "transcribing"; diff --git a/src/browser/components/ChatInput/index.tsx b/src/browser/components/ChatInput/index.tsx index 2f9005236..570c0cc95 100644 --- a/src/browser/components/ChatInput/index.tsx +++ b/src/browser/components/ChatInput/index.tsx @@ -1226,18 +1226,30 @@ export const ChatInput: React.FC = (props) => { className={cn( "mb-1 flex min-h-[60px] w-full items-center justify-center gap-3 rounded-md border px-4 py-4 transition-all focus:outline-none", voiceInput.state === "recording" - ? "cursor-pointer border-blue-500 bg-blue-500/10" + ? mode === "plan" + ? "cursor-pointer border-plan-mode bg-plan-mode/10" + : "cursor-pointer border-exec-mode bg-exec-mode/10" : "cursor-wait border-amber-500 bg-amber-500/10" )} aria-label={voiceInput.state === "recording" ? "Stop recording" : "Transcribing..."} > {voiceInput.state === "recording" @@ -1245,7 +1257,13 @@ export const ChatInput: React.FC = (props) => { : "Transcribing..."} @@ -1283,6 +1301,7 @@ export const ChatInput: React.FC = (props) => { requiresSecureContext={voiceInput.requiresSecureContext} onToggle={voiceInput.toggle} disabled={disabled || isSending} + mode={mode} /> From ef76e7ca24d2ae13c1379a1eb06284cba03e0ffd Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 21:29:38 -0600 Subject: [PATCH 02/11] feat: integrate live audio waveform visualizer - Add react-audio-visualize for real-time audio visualization - Expose MediaRecorder from useVoiceInput hook for visualization - Create dedicated RecordingOverlay component with cleaner design - LiveAudioVisualizer shows dynamic bars responding to actual audio - Mode-colored visualization (plan=blue, exec=purple) - Simplified keyboard hint display with better formatting - Remove old static WaveformBars component --- bun.lock | 3 + package.json | 1 + .../components/ChatInput/RecordingOverlay.tsx | 103 ++++++++++++++++++ .../components/ChatInput/WaveformBars.tsx | 32 ------ src/browser/components/ChatInput/index.tsx | 56 ++-------- src/browser/hooks/useVoiceInput.ts | 7 ++ 6 files changed, 121 insertions(+), 81 deletions(-) create mode 100644 src/browser/components/ChatInput/RecordingOverlay.tsx delete mode 100644 src/browser/components/ChatInput/WaveformBars.tsx diff --git a/bun.lock b/bun.lock index a7826169d..3b61c7c0f 100644 --- a/bun.lock +++ b/bun.lock @@ -47,6 +47,7 @@ "motion": "^12.23.24", "ollama-ai-provider-v2": "^1.5.4", "openai": "^6.9.1", + "react-audio-visualize": "^1.2.0", "rehype-harden": "^1.1.5", "shescape": "^2.1.6", "source-map-support": "^0.5.21", @@ -3055,6 +3056,8 @@ "react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="], + "react-audio-visualize": ["react-audio-visualize@1.2.0", "", { "peerDependencies": { "react": ">=16.2.0", "react-dom": ">=16.2.0" } }, "sha512-rfO5nmT0fp23gjU0y2WQT6+ZOq2ZsuPTMphchwX1PCz1Di4oaIr6x7JZII8MLrbHdG7UB0OHfGONTIsWdh67kQ=="], + "react-compiler-runtime": ["react-compiler-runtime@1.0.0", "", { "peerDependencies": { "react": "^17.0.0 || ^18.0.0 || ^19.0.0 || ^0.0.0-experimental" } }, "sha512-rRfjYv66HlG8896yPUDONgKzG5BxZD1nV9U6rkm+7VCuvQc903C4MjcoZR4zPw53IKSOX9wMQVpA1IAbRtzQ7w=="], "react-dnd": ["react-dnd@16.0.1", "", { "dependencies": { "@react-dnd/invariant": "^4.0.1", "@react-dnd/shallowequal": "^4.0.1", "dnd-core": "^16.0.1", "fast-deep-equal": "^3.1.3", "hoist-non-react-statics": "^3.3.2" }, "peerDependencies": { "@types/hoist-non-react-statics": ">= 3.3.1", "@types/node": ">= 12", "@types/react": ">= 16", "react": ">= 16.14" }, "optionalPeers": ["@types/hoist-non-react-statics", "@types/node", "@types/react"] }, "sha512-QeoM/i73HHu2XF9aKksIUuamHPDvRglEwdHL4jsp784BgUuWcg6mzfxT0QDdQz8Wj0qyRKx2eMg8iZtWvU4E2Q=="], diff --git a/package.json b/package.json index 543b23dc6..3640d0b7b 100644 --- a/package.json +++ b/package.json @@ -88,6 +88,7 @@ "motion": "^12.23.24", "ollama-ai-provider-v2": "^1.5.4", "openai": "^6.9.1", + "react-audio-visualize": "^1.2.0", "rehype-harden": "^1.1.5", "shescape": "^2.1.6", "source-map-support": "^0.5.21", diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx new file mode 100644 index 000000000..cfe60eedc --- /dev/null +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -0,0 +1,103 @@ +/** + * Recording overlay - shows live audio visualization during voice recording. + * Replaces the chat textarea when voice input is active. + */ + +import React from "react"; +import { LiveAudioVisualizer } from "react-audio-visualize"; +import { Loader2 } from "lucide-react"; +import { cn } from "@/common/lib/utils"; +import { formatKeybind, KEYBINDS } from "@/browser/utils/ui/keybinds"; +import type { UIMode } from "@/common/types/mode"; +import type { VoiceInputState } from "@/browser/hooks/useVoiceInput"; + +// Mode color values for the visualizer (CSS var values from globals.css) +const MODE_COLORS = { + plan: "hsl(210, 70%, 55%)", // Slightly lighter than --color-plan-mode for visibility + exec: "hsl(268, 94%, 65%)", // Slightly lighter than --color-exec-mode for visibility +} as const; + +interface RecordingOverlayProps { + state: VoiceInputState; + mode: UIMode; + mediaRecorder: MediaRecorder | null; + onStop: () => void; +} + +export const RecordingOverlay: React.FC = (props) => { + const isRecording = props.state === "recording"; + const isTranscribing = props.state === "transcribing"; + + const modeColor = MODE_COLORS[props.mode]; + + // Border and background classes based on state + const containerClasses = cn( + "mb-1 flex min-h-[72px] w-full flex-col items-center justify-center gap-2 rounded-md border px-4 py-3 transition-all focus:outline-none", + isRecording + ? props.mode === "plan" + ? "cursor-pointer border-plan-mode bg-plan-mode/10" + : "cursor-pointer border-exec-mode bg-exec-mode/10" + : "cursor-wait border-amber-500 bg-amber-500/10" + ); + + return ( + + ); +}; + +/** + * Simple pulsing animation for transcribing state + */ +const TranscribingAnimation: React.FC = () => ( +
+ +
+); diff --git a/src/browser/components/ChatInput/WaveformBars.tsx b/src/browser/components/ChatInput/WaveformBars.tsx deleted file mode 100644 index e8c834016..000000000 --- a/src/browser/components/ChatInput/WaveformBars.tsx +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Animated waveform bars for voice recording UI. - * Shows 5 bars with staggered pulse animation. - */ - -import { cn } from "@/common/lib/utils"; - -interface WaveformBarsProps { - /** Color class for the bars (e.g., "bg-blue-500") */ - colorClass: string; - /** Whether to mirror the animation (for right-side waveform) */ - mirrored?: boolean; -} - -export const WaveformBars: React.FC = (props) => { - const indices = props.mirrored ? [4, 3, 2, 1, 0] : [0, 1, 2, 3, 4]; - - return ( -
- {indices.map((i, displayIndex) => ( -
- ))} -
- ); -}; diff --git a/src/browser/components/ChatInput/index.tsx b/src/browser/components/ChatInput/index.tsx index 570c0cc95..7588eb53f 100644 --- a/src/browser/components/ChatInput/index.tsx +++ b/src/browser/components/ChatInput/index.tsx @@ -72,7 +72,7 @@ import { useCreationWorkspace } from "./useCreationWorkspace"; import { useTutorial } from "@/browser/contexts/TutorialContext"; import { useVoiceInput } from "@/browser/hooks/useVoiceInput"; import { VoiceInputButton } from "./VoiceInputButton"; -import { WaveformBars } from "./WaveformBars"; +import { RecordingOverlay } from "./RecordingOverlay"; type TokenCountReader = () => number; @@ -1219,54 +1219,12 @@ export const ChatInput: React.FC = (props) => {
{/* Recording/transcribing overlay - replaces textarea when active */} {voiceInput.state !== "idle" ? ( - + ) : ( <> void; stop: (options?: { send?: boolean }) => void; cancel: () => void; @@ -73,7 +75,9 @@ export function useVoiceInput(options: UseVoiceInputOptions): UseVoiceInputResul const [state, setState] = useState("idle"); // Refs for MediaRecorder lifecycle + // We use both ref (for callbacks) and state (to trigger re-render for visualizer) const recorderRef = useRef(null); + const [mediaRecorder, setMediaRecorder] = useState(null); const streamRef = useRef(null); const chunksRef = useRef([]); @@ -197,6 +201,7 @@ export function useVoiceInput(options: UseVoiceInputOptions): UseVoiceInputResul }; recorderRef.current = recorder; + setMediaRecorder(recorder); recorder.start(); setState("recording"); } catch (err) { @@ -221,6 +226,7 @@ export function useVoiceInput(options: UseVoiceInputOptions): UseVoiceInputResul if (recorderRef.current?.state !== "inactive") { recorderRef.current?.stop(); recorderRef.current = null; + setMediaRecorder(null); } }, []); @@ -305,6 +311,7 @@ export function useVoiceInput(options: UseVoiceInputOptions): UseVoiceInputResul isApiKeySet: callbacksRef.current.openAIKeySet, shouldShowUI: HAS_MEDIA_RECORDER && !HAS_TOUCH_DICTATION, requiresSecureContext: HAS_MEDIA_RECORDER && !HAS_GET_USER_MEDIA, + mediaRecorder, start: () => void start(), stop, cancel, From 29fa552a1b062e7c90926f28802c11ed0a50c839 Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 21:33:57 -0600 Subject: [PATCH 03/11] polish: expand waveform to full width, clarify 'review' action --- src/browser/components/ChatInput/RecordingOverlay.tsx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx index cfe60eedc..90e7c631b 100644 --- a/src/browser/components/ChatInput/RecordingOverlay.tsx +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -53,13 +53,13 @@ export const RecordingOverlay: React.FC = (props) => { {isRecording && props.mediaRecorder ? ( @@ -82,7 +82,7 @@ export const RecordingOverlay: React.FC = (props) => { {isRecording ? ( <> space send ·{" "} - {formatKeybind(KEYBINDS.TOGGLE_VOICE_INPUT)} stop ·{" "} + {formatKeybind(KEYBINDS.TOGGLE_VOICE_INPUT)} review ·{" "} esc cancel ) : ( From b1848f86e3751b913509850945491519dcbedd80 Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 21:35:25 -0600 Subject: [PATCH 04/11] polish: full-width waveform with measured container, smaller vertical height --- .../components/ChatInput/RecordingOverlay.tsx | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx index 90e7c631b..1836e0b1b 100644 --- a/src/browser/components/ChatInput/RecordingOverlay.tsx +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -3,7 +3,7 @@ * Replaces the chat textarea when voice input is active. */ -import React from "react"; +import React, { useRef, useState, useLayoutEffect } from "react"; import { LiveAudioVisualizer } from "react-audio-visualize"; import { Loader2 } from "lucide-react"; import { cn } from "@/common/lib/utils"; @@ -27,12 +27,26 @@ interface RecordingOverlayProps { export const RecordingOverlay: React.FC = (props) => { const isRecording = props.state === "recording"; const isTranscribing = props.state === "transcribing"; + const containerRef = useRef(null); + const [containerWidth, setContainerWidth] = useState(400); + + // Measure container width for the canvas + useLayoutEffect(() => { + const measure = () => { + if (containerRef.current) { + setContainerWidth(containerRef.current.offsetWidth); + } + }; + measure(); + window.addEventListener("resize", measure); + return () => window.removeEventListener("resize", measure); + }, []); const modeColor = MODE_COLORS[props.mode]; // Border and background classes based on state const containerClasses = cn( - "mb-1 flex min-h-[72px] w-full flex-col items-center justify-center gap-2 rounded-md border px-4 py-3 transition-all focus:outline-none", + "mb-1 flex w-full flex-col items-center justify-center gap-1 rounded-md border px-3 py-2 transition-all focus:outline-none", isRecording ? props.mode === "plan" ? "cursor-pointer border-plan-mode bg-plan-mode/10" @@ -49,14 +63,14 @@ export const RecordingOverlay: React.FC = (props) => { aria-label={isRecording ? "Stop recording" : "Transcribing..."} > {/* Visualizer / Animation Area */} -
+
{isRecording && props.mediaRecorder ? ( Date: Tue, 2 Dec 2025 21:37:38 -0600 Subject: [PATCH 05/11] fix: calculate bar dimensions to fill container, use ResizeObserver, smoother animation - Use ResizeObserver for reliable container width measurement - Calculate barWidth and gap dynamically based on container width - Increase smoothingTimeConstant to 0.8 for less jittery animation - Adjust decibel range for better sensitivity --- .../components/ChatInput/RecordingOverlay.tsx | 50 +++++++++++++------ 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx index 1836e0b1b..616e53aef 100644 --- a/src/browser/components/ChatInput/RecordingOverlay.tsx +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -17,6 +17,11 @@ const MODE_COLORS = { exec: "hsl(268, 94%, 65%)", // Slightly lighter than --color-exec-mode for visibility } as const; +// FFT size determines number of frequency bins (fftSize / 2) +// Higher = more bars but less responsive, lower = fewer bars but more responsive +const FFT_SIZE = 128; // 64 bars +const NUM_BARS = FFT_SIZE / 2; + interface RecordingOverlayProps { state: VoiceInputState; mode: UIMode; @@ -28,22 +33,37 @@ export const RecordingOverlay: React.FC = (props) => { const isRecording = props.state === "recording"; const isTranscribing = props.state === "transcribing"; const containerRef = useRef(null); - const [containerWidth, setContainerWidth] = useState(400); + const [containerWidth, setContainerWidth] = useState(600); - // Measure container width for the canvas + // Measure container width for the canvas using ResizeObserver useLayoutEffect(() => { - const measure = () => { - if (containerRef.current) { - setContainerWidth(containerRef.current.offsetWidth); + const container = containerRef.current; + if (!container) return; + + const observer = new ResizeObserver((entries) => { + for (const entry of entries) { + setContainerWidth(entry.contentRect.width); } - }; - measure(); - window.addEventListener("resize", measure); - return () => window.removeEventListener("resize", measure); + }); + + observer.observe(container); + // Initial measurement + setContainerWidth(container.offsetWidth); + + return () => observer.disconnect(); }, []); const modeColor = MODE_COLORS[props.mode]; + // Calculate bar dimensions to fill the container width + // Total width = numBars * barWidth + (numBars - 1) * gap + // We want gap = barWidth / 2 for nice spacing + // So: width = numBars * barWidth + (numBars - 1) * barWidth/2 + // = barWidth * (numBars + (numBars - 1) / 2) + // = barWidth * (1.5 * numBars - 0.5) + const barWidth = Math.max(2, Math.floor(containerWidth / (1.5 * NUM_BARS - 0.5))); + const gap = Math.max(1, Math.floor(barWidth / 2)); + // Border and background classes based on state const containerClasses = cn( "mb-1 flex w-full flex-col items-center justify-center gap-1 rounded-md border px-3 py-2 transition-all focus:outline-none", @@ -69,13 +89,13 @@ export const RecordingOverlay: React.FC = (props) => { mediaRecorder={props.mediaRecorder} width={containerWidth} height={32} - barWidth={2} - gap={1} + barWidth={barWidth} + gap={gap} barColor={modeColor} - smoothingTimeConstant={0.5} - fftSize={256} - minDecibels={-80} - maxDecibels={-20} + smoothingTimeConstant={0.8} + fftSize={FFT_SIZE} + minDecibels={-70} + maxDecibels={-30} /> ) : ( From 0334482db8567e64f95ad44ee1e87fdd66b455e5 Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 21:40:17 -0600 Subject: [PATCH 06/11] feat: custom sliding window waveform showing last 10 seconds Replace react-audio-visualize with custom SlidingWaveform component: - Shows amplitude history over the last 10 seconds - Samples audio every 50ms (200 samples total) - New samples appear on right, slide left over time - Uses RMS amplitude calculation for smooth visualization - Bars scale dynamically to fill container width - Remove react-audio-visualize dependency --- bun.lock | 3 - package.json | 1 - .../components/ChatInput/RecordingOverlay.tsx | 203 +++++++++++++----- 3 files changed, 155 insertions(+), 52 deletions(-) diff --git a/bun.lock b/bun.lock index 3b61c7c0f..a7826169d 100644 --- a/bun.lock +++ b/bun.lock @@ -47,7 +47,6 @@ "motion": "^12.23.24", "ollama-ai-provider-v2": "^1.5.4", "openai": "^6.9.1", - "react-audio-visualize": "^1.2.0", "rehype-harden": "^1.1.5", "shescape": "^2.1.6", "source-map-support": "^0.5.21", @@ -3056,8 +3055,6 @@ "react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="], - "react-audio-visualize": ["react-audio-visualize@1.2.0", "", { "peerDependencies": { "react": ">=16.2.0", "react-dom": ">=16.2.0" } }, "sha512-rfO5nmT0fp23gjU0y2WQT6+ZOq2ZsuPTMphchwX1PCz1Di4oaIr6x7JZII8MLrbHdG7UB0OHfGONTIsWdh67kQ=="], - "react-compiler-runtime": ["react-compiler-runtime@1.0.0", "", { "peerDependencies": { "react": "^17.0.0 || ^18.0.0 || ^19.0.0 || ^0.0.0-experimental" } }, "sha512-rRfjYv66HlG8896yPUDONgKzG5BxZD1nV9U6rkm+7VCuvQc903C4MjcoZR4zPw53IKSOX9wMQVpA1IAbRtzQ7w=="], "react-dnd": ["react-dnd@16.0.1", "", { "dependencies": { "@react-dnd/invariant": "^4.0.1", "@react-dnd/shallowequal": "^4.0.1", "dnd-core": "^16.0.1", "fast-deep-equal": "^3.1.3", "hoist-non-react-statics": "^3.3.2" }, "peerDependencies": { "@types/hoist-non-react-statics": ">= 3.3.1", "@types/node": ">= 12", "@types/react": ">= 16", "react": ">= 16.14" }, "optionalPeers": ["@types/hoist-non-react-statics", "@types/node", "@types/react"] }, "sha512-QeoM/i73HHu2XF9aKksIUuamHPDvRglEwdHL4jsp784BgUuWcg6mzfxT0QDdQz8Wj0qyRKx2eMg8iZtWvU4E2Q=="], diff --git a/package.json b/package.json index 3640d0b7b..543b23dc6 100644 --- a/package.json +++ b/package.json @@ -88,7 +88,6 @@ "motion": "^12.23.24", "ollama-ai-provider-v2": "^1.5.4", "openai": "^6.9.1", - "react-audio-visualize": "^1.2.0", "rehype-harden": "^1.1.5", "shescape": "^2.1.6", "source-map-support": "^0.5.21", diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx index 616e53aef..5de2f378c 100644 --- a/src/browser/components/ChatInput/RecordingOverlay.tsx +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -3,8 +3,7 @@ * Replaces the chat textarea when voice input is active. */ -import React, { useRef, useState, useLayoutEffect } from "react"; -import { LiveAudioVisualizer } from "react-audio-visualize"; +import React, { useRef, useState, useLayoutEffect, useEffect, useCallback } from "react"; import { Loader2 } from "lucide-react"; import { cn } from "@/common/lib/utils"; import { formatKeybind, KEYBINDS } from "@/browser/utils/ui/keybinds"; @@ -17,10 +16,10 @@ const MODE_COLORS = { exec: "hsl(268, 94%, 65%)", // Slightly lighter than --color-exec-mode for visibility } as const; -// FFT size determines number of frequency bins (fftSize / 2) -// Higher = more bars but less responsive, lower = fewer bars but more responsive -const FFT_SIZE = 128; // 64 bars -const NUM_BARS = FFT_SIZE / 2; +// Sliding window config +const WINDOW_DURATION_MS = 10000; // 10 seconds of history +const SAMPLE_INTERVAL_MS = 50; // Sample every 50ms +const NUM_SAMPLES = Math.floor(WINDOW_DURATION_MS / SAMPLE_INTERVAL_MS); // 200 samples interface RecordingOverlayProps { state: VoiceInputState; @@ -32,38 +31,9 @@ interface RecordingOverlayProps { export const RecordingOverlay: React.FC = (props) => { const isRecording = props.state === "recording"; const isTranscribing = props.state === "transcribing"; - const containerRef = useRef(null); - const [containerWidth, setContainerWidth] = useState(600); - - // Measure container width for the canvas using ResizeObserver - useLayoutEffect(() => { - const container = containerRef.current; - if (!container) return; - - const observer = new ResizeObserver((entries) => { - for (const entry of entries) { - setContainerWidth(entry.contentRect.width); - } - }); - - observer.observe(container); - // Initial measurement - setContainerWidth(container.offsetWidth); - - return () => observer.disconnect(); - }, []); const modeColor = MODE_COLORS[props.mode]; - // Calculate bar dimensions to fill the container width - // Total width = numBars * barWidth + (numBars - 1) * gap - // We want gap = barWidth / 2 for nice spacing - // So: width = numBars * barWidth + (numBars - 1) * barWidth/2 - // = barWidth * (numBars + (numBars - 1) / 2) - // = barWidth * (1.5 * numBars - 0.5) - const barWidth = Math.max(2, Math.floor(containerWidth / (1.5 * NUM_BARS - 0.5))); - const gap = Math.max(1, Math.floor(barWidth / 2)); - // Border and background classes based on state const containerClasses = cn( "mb-1 flex w-full flex-col items-center justify-center gap-1 rounded-md border px-3 py-2 transition-all focus:outline-none", @@ -83,20 +53,9 @@ export const RecordingOverlay: React.FC = (props) => { aria-label={isRecording ? "Stop recording" : "Transcribing..."} > {/* Visualizer / Animation Area */} -
+
{isRecording && props.mediaRecorder ? ( - + ) : ( )} @@ -127,6 +86,154 @@ export const RecordingOverlay: React.FC = (props) => { ); }; +/** + * Sliding window waveform - shows amplitude over the last ~10 seconds. + * New samples appear on the right and slide left over time. + */ +interface SlidingWaveformProps { + mediaRecorder: MediaRecorder; + color: string; + height: number; +} + +const SlidingWaveform: React.FC = (props) => { + const canvasRef = useRef(null); + const containerRef = useRef(null); + const [containerWidth, setContainerWidth] = useState(600); + + // Audio analysis refs (persist across renders) + const audioContextRef = useRef(null); + const analyserRef = useRef(null); + const samplesRef = useRef(new Array(NUM_SAMPLES).fill(0)); + const animationFrameRef = useRef(0); + const lastSampleTimeRef = useRef(0); + + // Measure container width + useLayoutEffect(() => { + const container = containerRef.current; + if (!container) return; + + const observer = new ResizeObserver((entries) => { + for (const entry of entries) { + setContainerWidth(entry.contentRect.width); + } + }); + + observer.observe(container); + setContainerWidth(container.offsetWidth); + + return () => observer.disconnect(); + }, []); + + // Set up audio analysis + useEffect(() => { + const stream = props.mediaRecorder.stream; + if (!stream) return; + + const audioContext = new AudioContext(); + const analyser = audioContext.createAnalyser(); + analyser.fftSize = 256; + analyser.smoothingTimeConstant = 0.3; + + const source = audioContext.createMediaStreamSource(stream); + source.connect(analyser); + + audioContextRef.current = audioContext; + analyserRef.current = analyser; + + // Reset samples when starting + samplesRef.current = new Array(NUM_SAMPLES).fill(0); + lastSampleTimeRef.current = performance.now(); + + return () => { + audioContext.close(); + audioContextRef.current = null; + analyserRef.current = null; + }; + }, [props.mediaRecorder]); + + // Animation loop - sample audio and render + const draw = useCallback(() => { + const canvas = canvasRef.current; + const analyser = analyserRef.current; + if (!canvas || !analyser) return; + + const ctx = canvas.getContext("2d"); + if (!ctx) return; + + const now = performance.now(); + const timeSinceLastSample = now - lastSampleTimeRef.current; + + // Take a new sample if enough time has passed + if (timeSinceLastSample >= SAMPLE_INTERVAL_MS) { + const dataArray = new Uint8Array(analyser.frequencyBinCount); + analyser.getByteTimeDomainData(dataArray); + + // Calculate RMS amplitude (0-1 range) + let sum = 0; + for (let i = 0; i < dataArray.length; i++) { + const normalized = (dataArray[i] - 128) / 128; // -1 to 1 + sum += normalized * normalized; + } + const rms = Math.sqrt(sum / dataArray.length); + + // Shift samples left and add new one + samplesRef.current.shift(); + samplesRef.current.push(rms); + lastSampleTimeRef.current = now; + } + + // Clear canvas + ctx.clearRect(0, 0, canvas.width, canvas.height); + + // Draw waveform bars + const samples = samplesRef.current; + const barWidth = Math.max(1, Math.floor(canvas.width / samples.length)); + const gap = Math.max(1, Math.floor(barWidth * 0.3)); + const effectiveBarWidth = barWidth - gap; + const centerY = canvas.height / 2; + + ctx.fillStyle = props.color; + + for (let i = 0; i < samples.length; i++) { + const amplitude = samples[i]; + // Scale amplitude for visibility (boost quiet sounds) + const scaledAmplitude = Math.min(1, amplitude * 3); + const barHeight = Math.max(2, scaledAmplitude * canvas.height * 0.9); + + const x = i * barWidth; + const y = centerY - barHeight / 2; + + ctx.beginPath(); + ctx.roundRect(x, y, effectiveBarWidth, barHeight, 1); + ctx.fill(); + } + + animationFrameRef.current = requestAnimationFrame(draw); + }, [props.color]); + + // Start/stop animation loop + useEffect(() => { + animationFrameRef.current = requestAnimationFrame(draw); + return () => { + if (animationFrameRef.current) { + cancelAnimationFrame(animationFrameRef.current); + } + }; + }, [draw]); + + return ( +
+ +
+ ); +}; + /** * Simple pulsing animation for transcribing state */ From c27ce9ed7083e119046cf91e053130bc059c08ba Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 21:55:52 -0600 Subject: [PATCH 07/11] fix: full-width centered waveform with proper bar spacing calculation --- .../components/ChatInput/RecordingOverlay.tsx | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx index 5de2f378c..f33b9e9c0 100644 --- a/src/browser/components/ChatInput/RecordingOverlay.tsx +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -186,26 +186,34 @@ const SlidingWaveform: React.FC = (props) => { // Clear canvas ctx.clearRect(0, 0, canvas.width, canvas.height); - // Draw waveform bars + // Draw waveform bars - calculate to fill full width const samples = samplesRef.current; - const barWidth = Math.max(1, Math.floor(canvas.width / samples.length)); - const gap = Math.max(1, Math.floor(barWidth * 0.3)); - const effectiveBarWidth = barWidth - gap; + const numBars = samples.length; + + // Calculate bar and gap sizes to fill exactly the canvas width + // We want: numBars * barWidth + (numBars - 1) * gap = canvasWidth + // With gap = barWidth * 0.4, we get: + // numBars * barWidth + (numBars - 1) * 0.4 * barWidth = canvasWidth + // barWidth * (numBars + 0.4 * numBars - 0.4) = canvasWidth + // barWidth = canvasWidth / (1.4 * numBars - 0.4) + const totalWidth = canvas.width; + const barWidth = totalWidth / (1.4 * numBars - 0.4); + const gap = barWidth * 0.4; const centerY = canvas.height / 2; ctx.fillStyle = props.color; - for (let i = 0; i < samples.length; i++) { + for (let i = 0; i < numBars; i++) { const amplitude = samples[i]; // Scale amplitude for visibility (boost quiet sounds) const scaledAmplitude = Math.min(1, amplitude * 3); const barHeight = Math.max(2, scaledAmplitude * canvas.height * 0.9); - const x = i * barWidth; + const x = i * (barWidth + gap); const y = centerY - barHeight / 2; ctx.beginPath(); - ctx.roundRect(x, y, effectiveBarWidth, barHeight, 1); + ctx.roundRect(x, y, barWidth, barHeight, 1); ctx.fill(); } @@ -223,12 +231,12 @@ const SlidingWaveform: React.FC = (props) => { }, [draw]); return ( -
+
); From e277d251d4949bbab8f569aae4d7cef1a8cf7a02 Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 22:54:57 -0600 Subject: [PATCH 08/11] fix: lint errors in RecordingOverlay --- .../components/ChatInput/RecordingOverlay.tsx | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx index f33b9e9c0..ae83851ac 100644 --- a/src/browser/components/ChatInput/RecordingOverlay.tsx +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -75,8 +75,8 @@ export const RecordingOverlay: React.FC = (props) => { {isRecording ? ( <> space send ·{" "} - {formatKeybind(KEYBINDS.TOGGLE_VOICE_INPUT)} review ·{" "} - esc cancel + {formatKeybind(KEYBINDS.TOGGLE_VOICE_INPUT)} review + · esc cancel ) : ( "Transcribing..." @@ -104,7 +104,7 @@ const SlidingWaveform: React.FC = (props) => { // Audio analysis refs (persist across renders) const audioContextRef = useRef(null); const analyserRef = useRef(null); - const samplesRef = useRef(new Array(NUM_SAMPLES).fill(0)); + const samplesRef = useRef(new Array(NUM_SAMPLES).fill(0)); const animationFrameRef = useRef(0); const lastSampleTimeRef = useRef(0); @@ -142,11 +142,11 @@ const SlidingWaveform: React.FC = (props) => { analyserRef.current = analyser; // Reset samples when starting - samplesRef.current = new Array(NUM_SAMPLES).fill(0); + samplesRef.current = new Array(NUM_SAMPLES).fill(0); lastSampleTimeRef.current = performance.now(); return () => { - audioContext.close(); + void audioContext.close(); audioContextRef.current = null; analyserRef.current = null; }; @@ -171,8 +171,8 @@ const SlidingWaveform: React.FC = (props) => { // Calculate RMS amplitude (0-1 range) let sum = 0; - for (let i = 0; i < dataArray.length; i++) { - const normalized = (dataArray[i] - 128) / 128; // -1 to 1 + for (const sample of dataArray) { + const normalized = (sample - 128) / 128; // -1 to 1 sum += normalized * normalized; } const rms = Math.sqrt(sum / dataArray.length); From a549122956cf5d4f8a165f5247be6f4b9214dc12 Mon Sep 17 00:00:00 2001 From: Ammar Date: Tue, 2 Dec 2025 22:59:46 -0600 Subject: [PATCH 09/11] refactor: clean up recording overlay code, add roundRect fallback - Deduplicate mode-based styling with lookup tables - Extract RecordingHints component for keyboard shortcuts - Simplify VoiceInputButton color logic - Add roundRect fallback for older browser compatibility - Improve code organization and comments --- .../components/ChatInput/RecordingOverlay.tsx | 150 ++++++++---------- .../components/ChatInput/VoiceInputButton.tsx | 50 +++--- 2 files changed, 92 insertions(+), 108 deletions(-) diff --git a/src/browser/components/ChatInput/RecordingOverlay.tsx b/src/browser/components/ChatInput/RecordingOverlay.tsx index ae83851ac..b520b4a06 100644 --- a/src/browser/components/ChatInput/RecordingOverlay.tsx +++ b/src/browser/components/ChatInput/RecordingOverlay.tsx @@ -10,16 +10,27 @@ import { formatKeybind, KEYBINDS } from "@/browser/utils/ui/keybinds"; import type { UIMode } from "@/common/types/mode"; import type { VoiceInputState } from "@/browser/hooks/useVoiceInput"; -// Mode color values for the visualizer (CSS var values from globals.css) -const MODE_COLORS = { - plan: "hsl(210, 70%, 55%)", // Slightly lighter than --color-plan-mode for visibility - exec: "hsl(268, 94%, 65%)", // Slightly lighter than --color-exec-mode for visibility -} as const; +/** Canvas fill colors for the waveform (slightly lighter than CSS vars for visibility) */ +const MODE_COLORS: Record = { + plan: "hsl(210, 70%, 55%)", + exec: "hsl(268, 94%, 65%)", +}; + +/** Tailwind classes for recording state, keyed by mode */ +const RECORDING_CLASSES: Record = { + plan: "cursor-pointer border-plan-mode bg-plan-mode/10", + exec: "cursor-pointer border-exec-mode bg-exec-mode/10", +}; -// Sliding window config -const WINDOW_DURATION_MS = 10000; // 10 seconds of history -const SAMPLE_INTERVAL_MS = 50; // Sample every 50ms -const NUM_SAMPLES = Math.floor(WINDOW_DURATION_MS / SAMPLE_INTERVAL_MS); // 200 samples +const TEXT_CLASSES: Record = { + plan: "text-plan-mode-light", + exec: "text-exec-mode-light", +}; + +// Waveform shows last 10 seconds of audio, sampled every 50ms (200 samples) +const WINDOW_DURATION_MS = 10_000; +const SAMPLE_INTERVAL_MS = 50; +const NUM_SAMPLES = WINDOW_DURATION_MS / SAMPLE_INTERVAL_MS; interface RecordingOverlayProps { state: VoiceInputState; @@ -32,16 +43,9 @@ export const RecordingOverlay: React.FC = (props) => { const isRecording = props.state === "recording"; const isTranscribing = props.state === "transcribing"; - const modeColor = MODE_COLORS[props.mode]; - - // Border and background classes based on state const containerClasses = cn( "mb-1 flex w-full flex-col items-center justify-center gap-1 rounded-md border px-3 py-2 transition-all focus:outline-none", - isRecording - ? props.mode === "plan" - ? "cursor-pointer border-plan-mode bg-plan-mode/10" - : "cursor-pointer border-exec-mode bg-exec-mode/10" - : "cursor-wait border-amber-500 bg-amber-500/10" + isRecording ? RECORDING_CLASSES[props.mode] : "cursor-wait border-amber-500 bg-amber-500/10" ); return ( @@ -52,63 +56,66 @@ export const RecordingOverlay: React.FC = (props) => { className={containerClasses} aria-label={isRecording ? "Stop recording" : "Transcribing..."} > - {/* Visualizer / Animation Area */}
{isRecording && props.mediaRecorder ? ( - + ) : ( - + )}
- {/* Status Text */} - {isRecording ? ( - <> - space send ·{" "} - {formatKeybind(KEYBINDS.TOGGLE_VOICE_INPUT)} review - · esc cancel - - ) : ( - "Transcribing..." - )} + {isRecording ? : "Transcribing..."} ); }; -/** - * Sliding window waveform - shows amplitude over the last ~10 seconds. - * New samples appear on the right and slide left over time. - */ +/** Keyboard hint display for recording state */ +const RecordingHints: React.FC = () => ( + <> + space send ·{" "} + {formatKeybind(KEYBINDS.TOGGLE_VOICE_INPUT)} review ·{" "} + esc cancel + +); + +// ============================================================================= +// SlidingWaveform - Canvas-based amplitude visualization +// ============================================================================= + interface SlidingWaveformProps { mediaRecorder: MediaRecorder; color: string; height: number; } +/** + * Renders a sliding window of audio amplitude over time. + * New samples appear on the right and scroll left as time passes. + */ const SlidingWaveform: React.FC = (props) => { const canvasRef = useRef(null); const containerRef = useRef(null); const [containerWidth, setContainerWidth] = useState(600); - // Audio analysis refs (persist across renders) + // Audio analysis state (refs to avoid re-renders) const audioContextRef = useRef(null); const analyserRef = useRef(null); const samplesRef = useRef(new Array(NUM_SAMPLES).fill(0)); const animationFrameRef = useRef(0); const lastSampleTimeRef = useRef(0); - // Measure container width + // Track container width for responsive canvas useLayoutEffect(() => { const container = containerRef.current; if (!container) return; @@ -118,14 +125,13 @@ const SlidingWaveform: React.FC = (props) => { setContainerWidth(entry.contentRect.width); } }); - observer.observe(container); setContainerWidth(container.offsetWidth); return () => observer.disconnect(); }, []); - // Set up audio analysis + // Initialize Web Audio API analyser useEffect(() => { const stream = props.mediaRecorder.stream; if (!stream) return; @@ -140,8 +146,6 @@ const SlidingWaveform: React.FC = (props) => { audioContextRef.current = audioContext; analyserRef.current = analyser; - - // Reset samples when starting samplesRef.current = new Array(NUM_SAMPLES).fill(0); lastSampleTimeRef.current = performance.now(); @@ -152,7 +156,7 @@ const SlidingWaveform: React.FC = (props) => { }; }, [props.mediaRecorder]); - // Animation loop - sample audio and render + // Animation loop: sample audio amplitude and render bars const draw = useCallback(() => { const canvas = canvasRef.current; const analyser = analyserRef.current; @@ -161,73 +165,60 @@ const SlidingWaveform: React.FC = (props) => { const ctx = canvas.getContext("2d"); if (!ctx) return; + // Sample audio at fixed intervals const now = performance.now(); - const timeSinceLastSample = now - lastSampleTimeRef.current; - - // Take a new sample if enough time has passed - if (timeSinceLastSample >= SAMPLE_INTERVAL_MS) { + if (now - lastSampleTimeRef.current >= SAMPLE_INTERVAL_MS) { const dataArray = new Uint8Array(analyser.frequencyBinCount); analyser.getByteTimeDomainData(dataArray); - // Calculate RMS amplitude (0-1 range) + // Calculate RMS (root mean square) amplitude let sum = 0; for (const sample of dataArray) { - const normalized = (sample - 128) / 128; // -1 to 1 + const normalized = (sample - 128) / 128; sum += normalized * normalized; } const rms = Math.sqrt(sum / dataArray.length); - // Shift samples left and add new one samplesRef.current.shift(); samplesRef.current.push(rms); lastSampleTimeRef.current = now; } - // Clear canvas + // Render bars ctx.clearRect(0, 0, canvas.width, canvas.height); - // Draw waveform bars - calculate to fill full width const samples = samplesRef.current; const numBars = samples.length; - - // Calculate bar and gap sizes to fill exactly the canvas width - // We want: numBars * barWidth + (numBars - 1) * gap = canvasWidth - // With gap = barWidth * 0.4, we get: - // numBars * barWidth + (numBars - 1) * 0.4 * barWidth = canvasWidth - // barWidth * (numBars + 0.4 * numBars - 0.4) = canvasWidth - // barWidth = canvasWidth / (1.4 * numBars - 0.4) - const totalWidth = canvas.width; - const barWidth = totalWidth / (1.4 * numBars - 0.4); + // Bar sizing: bars fill full width with 40% gap ratio + const barWidth = canvas.width / (1.4 * numBars - 0.4); const gap = barWidth * 0.4; const centerY = canvas.height / 2; ctx.fillStyle = props.color; for (let i = 0; i < numBars; i++) { - const amplitude = samples[i]; - // Scale amplitude for visibility (boost quiet sounds) - const scaledAmplitude = Math.min(1, amplitude * 3); + const scaledAmplitude = Math.min(1, samples[i] * 3); // Boost for visibility const barHeight = Math.max(2, scaledAmplitude * canvas.height * 0.9); - const x = i * (barWidth + gap); const y = centerY - barHeight / 2; ctx.beginPath(); - ctx.roundRect(x, y, barWidth, barHeight, 1); + // roundRect fallback for older browsers (though Electron 38+ supports it) + if (ctx.roundRect) { + ctx.roundRect(x, y, barWidth, barHeight, 1); + } else { + ctx.rect(x, y, barWidth, barHeight); + } ctx.fill(); } animationFrameRef.current = requestAnimationFrame(draw); }, [props.color]); - // Start/stop animation loop + // Run animation loop useEffect(() => { animationFrameRef.current = requestAnimationFrame(draw); - return () => { - if (animationFrameRef.current) { - cancelAnimationFrame(animationFrameRef.current); - } - }; + return () => cancelAnimationFrame(animationFrameRef.current); }, [draw]); return ( @@ -241,12 +232,3 @@ const SlidingWaveform: React.FC = (props) => {
); }; - -/** - * Simple pulsing animation for transcribing state - */ -const TranscribingAnimation: React.FC = () => ( -
- -
-); diff --git a/src/browser/components/ChatInput/VoiceInputButton.tsx b/src/browser/components/ChatInput/VoiceInputButton.tsx index 0256e3de4..6d4eb817b 100644 --- a/src/browser/components/ChatInput/VoiceInputButton.tsx +++ b/src/browser/components/ChatInput/VoiceInputButton.tsx @@ -21,38 +21,40 @@ interface VoiceInputButtonProps { mode: UIMode; } -function getRecordingColorClass(mode: UIMode): string { - return mode === "plan" - ? "text-plan-mode-light animate-pulse" - : "text-exec-mode-light animate-pulse"; -} +/** Color classes for each voice input state */ +const STATE_COLORS: Record = { + idle: "text-muted/50 hover:text-muted", + recording: "", // Set dynamically based on mode + transcribing: "text-amber-500", +}; -const STATE_CONFIG: Record = { - idle: { label: "Voice input", colorClass: "text-muted/50 hover:text-muted" }, - recording: { label: "Stop recording", colorClass: "" }, // handled dynamically - transcribing: { label: "Transcribing...", colorClass: "text-amber-500" }, +const RECORDING_COLORS: Record = { + plan: "text-plan-mode-light animate-pulse", + exec: "text-exec-mode-light animate-pulse", }; +function getColorClass(state: VoiceInputState, mode: UIMode): string { + return state === "recording" ? RECORDING_COLORS[mode] : STATE_COLORS[state]; +} + export const VoiceInputButton: React.FC = (props) => { if (!props.shouldShowUI) return null; const needsHttps = props.requiresSecureContext; const needsApiKey = !needsHttps && !props.isApiKeySet; - const isDisabledReason = needsHttps || needsApiKey; + const isDisabled = needsHttps || needsApiKey; + + const label = isDisabled + ? needsHttps + ? "Voice input (requires HTTPS)" + : "Voice input (requires OpenAI API key)" + : props.state === "recording" + ? "Stop recording" + : props.state === "transcribing" + ? "Transcribing..." + : "Voice input"; - const stateConfig = STATE_CONFIG[props.state]; - const { label, colorClass } = isDisabledReason - ? { - label: needsHttps - ? "Voice input (requires HTTPS)" - : "Voice input (requires OpenAI API key)", - colorClass: "text-muted/50", - } - : { - label: stateConfig.label, - colorClass: - props.state === "recording" ? getRecordingColorClass(props.mode) : stateConfig.colorClass, - }; + const colorClass = isDisabled ? "text-muted/50" : getColorClass(props.state, props.mode); const Icon = props.state === "transcribing" ? Loader2 : Mic; const isTranscribing = props.state === "transcribing"; @@ -62,7 +64,7 @@ export const VoiceInputButton: React.FC = (props) => {