{attachment.duration ? (
- displayDuration(secondsElapsed)
+ displayDuration(displayedDuration)
) : (
)}
diff --git a/src/components/Attachment/__tests__/Audio.test.js b/src/components/Attachment/__tests__/Audio.test.js
index 63b72bbf3..26d5eb8c5 100644
--- a/src/components/Attachment/__tests__/Audio.test.js
+++ b/src/components/Attachment/__tests__/Audio.test.js
@@ -5,21 +5,35 @@ import '@testing-library/jest-dom';
import { Audio } from '../Audio';
import { ChannelActionProvider, ChatContext } from '../../../context';
-
-import { generateAudioAttachment } from 'mock-builders';
+import { generateAudioAttachment } from '../../../mock-builders';
import { prettifyFileSize } from '../../MessageInput/hooks/utils';
const AUDIO = generateAudioAttachment();
+const delay = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
+
+const originalConsoleError = console.error;
+jest.spyOn(console, 'error').mockImplementationOnce((...errorOrTextorArg) => {
+ const msg = Array.isArray(errorOrTextorArg)
+ ? errorOrTextorArg[0]
+ : errorOrTextorArg.message ?? errorOrTextorArg;
+ if (msg.match('Not implemented')) return;
+ originalConsoleError(...errorOrTextorArg);
+});
+const addNotificationSpy = jest.fn();
+const defaultChannelActionContext = { addNotification: addNotificationSpy };
const renderComponent = (
props = {
+ channelActionContext: defaultChannelActionContext,
chatContext: { themeVersion: '1' },
og: AUDIO,
},
) =>
render(
-
+
,
@@ -27,6 +41,8 @@ const renderComponent = (
const playButtonTestId = 'play-audio';
const pauseButtonTestId = 'pause-audio';
+const playButton = () => screen.queryByTestId(playButtonTestId);
+const pauseButton = () => screen.queryByTestId(pauseButtonTestId);
describe('Audio', () => {
beforeAll(() => {
@@ -35,7 +51,10 @@ describe('Audio', () => {
jest.spyOn(HTMLMediaElement.prototype, 'play').mockImplementation(() => {});
jest.spyOn(HTMLMediaElement.prototype, 'pause').mockImplementation(() => {});
});
- afterEach(cleanup);
+ afterEach(() => {
+ cleanup();
+ jest.resetAllMocks();
+ });
it('in v1 should render title and render the image with description as alt tag', () => {
const { container } = renderComponent({
@@ -90,30 +109,143 @@ describe('Audio', () => {
});
it('should show the correct button if the song is paused/playing', async () => {
- const { container, queryByTestId } = renderComponent({
+ const { container } = renderComponent({
chatContext: { themeVersion },
- og: AUDIO,
+ og: { ...AUDIO, mime_type: undefined },
});
const audioPausedMock = jest.spyOn(container.querySelector('audio'), 'paused', 'get');
- const playButton = () => queryByTestId(playButtonTestId);
- const pauseButton = () => queryByTestId(pauseButtonTestId);
expect(await playButton()).toBeInTheDocument();
expect(await pauseButton()).not.toBeInTheDocument();
audioPausedMock.mockReturnValueOnce(true);
- fireEvent.click(playButton());
+ await act(async () => {
+ await fireEvent.click(playButton());
+ });
expect(await playButton()).not.toBeInTheDocument();
expect(await pauseButton()).toBeInTheDocument();
audioPausedMock.mockReturnValueOnce(false);
- fireEvent.click(pauseButton());
+ await act(async () => {
+ await fireEvent.click(pauseButton());
+ });
expect(await playButton()).toBeInTheDocument();
expect(await pauseButton()).not.toBeInTheDocument();
+ expect(addNotificationSpy).not.toHaveBeenCalled();
audioPausedMock.mockRestore();
});
+ it('should pause the audio if the playback has not started in 2000ms', async () => {
+ jest.useFakeTimers('modern');
+ const { container } = renderComponent({
+ chatContext: { themeVersion },
+ og: { ...AUDIO, mime_type: undefined },
+ });
+
+ const audio = container.querySelector('audio');
+ const audioPlayMock = jest.spyOn(audio, 'play').mockImplementation(() => delay(3000));
+ const audioPauseMock = jest.spyOn(audio, 'pause');
+
+ expect(await playButton()).toBeInTheDocument();
+ expect(await pauseButton()).not.toBeInTheDocument();
+
+ await act(async () => {
+ await fireEvent.click(playButton());
+ });
+ expect(await playButton()).toBeInTheDocument();
+ expect(await pauseButton()).not.toBeInTheDocument();
+
+ jest.advanceTimersByTime(2000);
+
+ await waitFor(async () => {
+ expect(audioPauseMock).toHaveBeenCalledWith();
+ expect(await playButton()).toBeInTheDocument();
+ expect(await pauseButton()).not.toBeInTheDocument();
+ expect(addNotificationSpy).not.toHaveBeenCalled();
+ });
+
+ jest.useRealTimers();
+ audioPlayMock.mockRestore();
+ audioPauseMock.mockRestore();
+ });
+
+ it('should register error if pausing the audio after 2000ms of inactivity failed', async () => {
+ jest.useFakeTimers('modern');
+ const { container } = renderComponent({
+ chatContext: { themeVersion },
+ og: { ...AUDIO, mime_type: undefined },
+ });
+ const audio = container.querySelector('audio');
+ const audioPlayMock = jest.spyOn(audio, 'play').mockImplementation(() => delay(3000));
+ const audioPauseMock = jest.spyOn(audio, 'pause').mockImplementationOnce(() => {
+ throw new Error('');
+ });
+
+ await act(() => {
+ fireEvent.click(playButton());
+ });
+ jest.advanceTimersByTime(2000);
+ await waitFor(() => {
+ expect(audioPauseMock).toHaveBeenCalledWith();
+ expect(addNotificationSpy).toHaveBeenCalledWith('Failed to play the recording', 'error');
+ });
+
+ jest.useRealTimers();
+ audioPlayMock.mockRestore();
+ audioPauseMock.mockRestore();
+ });
+
+ it('should register error if playing the audio failed', async () => {
+ const errorText = 'Test error';
+ const { container } = renderComponent({
+ chatContext: { themeVersion },
+ og: AUDIO,
+ });
+ const audio = container.querySelector('audio');
+ const audioPlayMock = jest.spyOn(audio, 'play').mockRejectedValueOnce(new Error(errorText));
+ const audioCanPlayTypeMock = jest.spyOn(audio, 'canPlayType').mockReturnValue('maybe');
+
+ expect(await playButton()).toBeInTheDocument();
+ expect(await pauseButton()).not.toBeInTheDocument();
+
+ await act(async () => {
+ await fireEvent.click(playButton());
+ });
+ expect(await playButton()).toBeInTheDocument();
+ expect(await pauseButton()).not.toBeInTheDocument();
+ expect(addNotificationSpy).toHaveBeenCalledWith(errorText, 'error');
+ audioPlayMock.mockRestore();
+ audioCanPlayTypeMock.mockRestore();
+ });
+
+ it('should register error if the audio MIME type is not playable', async () => {
+ const { container } = renderComponent({
+ chatContext: { themeVersion },
+ og: AUDIO,
+ });
+ const audio = container.querySelector('audio');
+ const audioPlayMock = jest.spyOn(audio, 'play');
+ const audioCanPlayTypeMock = jest.spyOn(audio, 'canPlayType').mockReturnValue('');
+
+ expect(await playButton()).toBeInTheDocument();
+ expect(await pauseButton()).not.toBeInTheDocument();
+
+ await act(async () => {
+ await fireEvent.click(playButton());
+ });
+ expect(audioPlayMock).not.toHaveBeenCalled();
+ expect(addNotificationSpy).toHaveBeenCalledWith(
+ 'Recording format is not supported and cannot be reproduced',
+ 'error',
+ );
+ expect(await playButton()).toBeInTheDocument();
+ expect(await pauseButton()).not.toBeInTheDocument();
+
+ audioPlayMock.mockRestore();
+ audioCanPlayTypeMock.mockRestore();
+ });
+
it('should show the correct progress', async () => {
const { container } = renderComponent({ chatContext: { themeVersion }, og: AUDIO });
diff --git a/src/components/Attachment/__tests__/VoiceRecording.test.js b/src/components/Attachment/__tests__/VoiceRecording.test.js
index 7acd25630..ca209c632 100644
--- a/src/components/Attachment/__tests__/VoiceRecording.test.js
+++ b/src/components/Attachment/__tests__/VoiceRecording.test.js
@@ -13,6 +13,12 @@ const FALLBACK_TITLE = 'Voice message';
const attachment = generateVoiceRecordingAttachment();
+const clickPlay = async () => {
+ await act(async () => {
+ await fireEvent.click(screen.queryByTestId('play-audio'));
+ });
+};
+
jest.spyOn(window.HTMLMediaElement.prototype, 'play').mockImplementation(() => {});
jest.spyOn(window.HTMLMediaElement.prototype, 'pause').mockImplementation(() => {});
@@ -41,6 +47,7 @@ describe('VoiceRecordingPlayer', () => {
beforeAll(() => {
jest.spyOn(window.HTMLMediaElement.prototype, 'pause').mockImplementation(() => {});
jest.spyOn(window.HTMLMediaElement.prototype, 'play').mockImplementation(() => {});
+ jest.spyOn(window.HTMLMediaElement.prototype, 'canPlayType').mockReturnValue('maybe');
});
afterAll(jest.restoreAllMocks);
@@ -68,19 +75,19 @@ describe('VoiceRecordingPlayer', () => {
expect(queryByTestId('play-audio')).toBeInTheDocument();
expect(queryByTestId('pause-audio')).not.toBeInTheDocument();
});
- it('should render pause button when playing', () => {
+ it('should render pause button when playing', async () => {
const { queryByTestId } = renderComponent({ attachment });
- fireEvent.click(queryByTestId('play-audio'));
+ await clickPlay();
expect(queryByTestId('play-audio')).not.toBeInTheDocument();
expect(queryByTestId('pause-audio')).toBeInTheDocument();
});
- it('should render playback rate button only when playing', () => {
+ it('should render playback rate button only when playing', async () => {
const { queryByTestId } = renderComponent({ attachment });
expect(queryByTestId('playback-rate-button')).not.toBeInTheDocument();
- fireEvent.click(queryByTestId('play-audio'));
+ await clickPlay();
expect(queryByTestId('playback-rate-button')).toHaveTextContent('1.0x');
});
- it('should use custom playback rates', () => {
+ it('should use custom playback rates', async () => {
const { queryByTestId } = renderComponent(
{
attachment: { ...attachment },
@@ -89,10 +96,10 @@ describe('VoiceRecordingPlayer', () => {
VoiceRecordingPlayer,
);
expect(queryByTestId('playback-rate-button')).not.toBeInTheDocument();
- fireEvent.click(queryByTestId('play-audio'));
+ await clickPlay();
expect(queryByTestId('playback-rate-button')).toHaveTextContent('2.5x');
});
- it('should switch playback rates in round robin', () => {
+ it('should switch playback rates in round robin', async () => {
const { queryByTestId } = renderComponent(
{
attachment: { ...attachment },
@@ -101,7 +108,7 @@ describe('VoiceRecordingPlayer', () => {
VoiceRecordingPlayer,
);
expect(queryByTestId('playback-rate-button')).not.toBeInTheDocument();
- fireEvent.click(queryByTestId('play-audio'));
+ await clickPlay();
const playbackRateButton = queryByTestId('playback-rate-button');
expect(playbackRateButton).toHaveTextContent('2.5x');
act(() => {
diff --git a/src/components/Attachment/__tests__/WaveProgressBar.test.js b/src/components/Attachment/__tests__/WaveProgressBar.test.js
index 4f739ac65..ee6b47eb4 100644
--- a/src/components/Attachment/__tests__/WaveProgressBar.test.js
+++ b/src/components/Attachment/__tests__/WaveProgressBar.test.js
@@ -1,100 +1,38 @@
import React from 'react';
import { render, screen } from '@testing-library/react';
import '@testing-library/jest-dom';
-import { downSample, upSample, WaveProgressBar } from '../components';
+import { WaveProgressBar } from '../components';
jest.spyOn(console, 'warn').mockImplementation();
const originalSample = Array.from({ length: 10 }, (_, i) => i);
const PROGRESS_INDICATOR_TEST_ID = 'wave-progress-bar-progress-indicator';
+
describe('WaveProgressBar', () => {
- describe('component', () => {
- it('is not rendered if waveform data is missing', () => {
- render(
);
- expect(screen.queryByTestId('wave-progress-bar-track')).not.toBeInTheDocument();
- });
- it('is rendered with zero progress by default if waveform data is available', () => {
- const { container } = render(
-
,
- );
- expect(container).toMatchSnapshot();
- expect(screen.queryByTestId(PROGRESS_INDICATOR_TEST_ID)).toBeInTheDocument();
- });
- it('is rendered with highlighted bars with non-zero progress', () => {
- const { container } = render(
-
,
- );
- expect(
- container.querySelectorAll('.str-chat__wave-progress-bar__amplitude-bar--active'),
- ).toHaveLength(1);
- expect(screen.queryByTestId(PROGRESS_INDICATOR_TEST_ID)).toBeInTheDocument();
- expect(screen.queryByTestId(PROGRESS_INDICATOR_TEST_ID)).toHaveStyle('left: 20%');
- });
+ it('is not rendered if waveform data is missing', () => {
+ render(
);
+ expect(screen.queryByTestId('wave-progress-bar-track')).not.toBeInTheDocument();
});
-
- describe('amplitude sampling', () => {
- describe('upSample', () => {
- afterEach(jest.restoreAllMocks);
- it('should return original values if target size is smaller than the original sample size', () => {
- expect(upSample(originalSample, 5)).toHaveLength(originalSample.length);
- });
-
- it('should return original values if the original sample size is empty', () => {
- expect(upSample([], 5)).toHaveLength(0);
- });
-
- it('should return original values if the original sample size equals the target', () => {
- expect(upSample(originalSample, originalSample.length)).toHaveLength(originalSample.length);
- });
-
- it('should fill each bucket to reach the target sample size', () => {
- expect(JSON.stringify(upSample(originalSample, 17))).toBe(
- JSON.stringify([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9]),
- );
- });
- });
-
- describe('downSample', () => {
- it('should return original values if target size is greater than the original sample size', () => {
- expect(downSample(originalSample, 20)).toHaveLength(originalSample.length);
- });
-
- it('should return original values if the original sample size is empty', () => {
- expect(downSample([], 5)).toHaveLength(0);
- });
-
- it('should return original values if the original sample size equals the target', () => {
- expect(downSample(originalSample, originalSample.length)).toHaveLength(
- originalSample.length,
- );
- });
-
- it('should return a mean of original values if the target output size is 1', () => {
- expect(JSON.stringify(downSample([10, 2, 6, 10, 3, 4, 8, 0], 1))).toBe(
- JSON.stringify([5.375]),
- );
- });
-
- it('should fill each bucket to reach the target sample size', () => {
- expect(
- JSON.stringify(
- downSample(
- [10, 2, 6, 10, 3, 4, 8, 1, 10, 0, 6, 10, 3, 4, 8, 1, 2, 6, 10, 3, 8, 10, 0],
- 7,
- ),
- ),
- ).toBe(JSON.stringify([10, 2, 10, 0, 8, 10, 0]));
- expect(JSON.stringify(downSample([10, 2, 6, 10, 3, 4, 8, 0], 7))).toBe(
- JSON.stringify([10, 2, 6, 10, 3, 8, 0]),
- );
- expect(JSON.stringify(downSample([10, 2], 2))).toBe(JSON.stringify([10, 2]));
- expect(JSON.stringify(downSample([10, 2, 10], 2))).toBe(JSON.stringify([10, 10]));
- });
- });
+ it('is rendered with zero progress by default if waveform data is available', () => {
+ const { container } = render(
+
,
+ );
+ expect(container).toMatchSnapshot();
+ expect(screen.queryByTestId(PROGRESS_INDICATOR_TEST_ID)).toBeInTheDocument();
+ });
+ it('is rendered with highlighted bars with non-zero progress', () => {
+ const { container } = render(
+
,
+ );
+ expect(
+ container.querySelectorAll('.str-chat__wave-progress-bar__amplitude-bar--active'),
+ ).toHaveLength(1);
+ expect(screen.queryByTestId(PROGRESS_INDICATOR_TEST_ID)).toBeInTheDocument();
+ expect(screen.queryByTestId(PROGRESS_INDICATOR_TEST_ID)).toHaveStyle('left: 20%');
});
});
diff --git a/src/components/Attachment/__tests__/__snapshots__/WaveProgressBar.test.js.snap b/src/components/Attachment/__tests__/__snapshots__/WaveProgressBar.test.js.snap
index 3e9069a99..a9bcd2ce5 100644
--- a/src/components/Attachment/__tests__/__snapshots__/WaveProgressBar.test.js.snap
+++ b/src/components/Attachment/__tests__/__snapshots__/WaveProgressBar.test.js.snap
@@ -1,6 +1,6 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
-exports[`WaveProgressBar component is rendered with zero progress by default if waveform data is available 1`] = `
+exports[`WaveProgressBar is rendered with zero progress by default if waveform data is available 1`] = `
i);
+
+describe('amplitude sampling', () => {
+ describe('upSample', () => {
+ afterEach(jest.restoreAllMocks);
+ it('should return original values if target size is smaller than the original sample size', () => {
+ expect(upSample(originalSample, 5)).toHaveLength(originalSample.length);
+ });
+
+ it('should return original values if the original sample size is empty', () => {
+ expect(upSample([], 5)).toHaveLength(0);
+ });
+
+ it('should return original values if the original sample size equals the target', () => {
+ expect(upSample(originalSample, originalSample.length)).toHaveLength(originalSample.length);
+ });
+
+ it('should fill each bucket to reach the target sample size', () => {
+ expect(JSON.stringify(upSample(originalSample, 17))).toBe(
+ JSON.stringify([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9]),
+ );
+ });
+ });
+
+ describe('downSample', () => {
+ it('should return original values if target size is greater than the original sample size', () => {
+ expect(downSample(originalSample, 20)).toHaveLength(originalSample.length);
+ });
+
+ it('should return original values if the original sample size is empty', () => {
+ expect(downSample([], 5)).toHaveLength(0);
+ });
+
+ it('should return original values if the original sample size equals the target', () => {
+ expect(downSample(originalSample, originalSample.length)).toHaveLength(originalSample.length);
+ });
+
+ it('should return a mean of original values if the target output size is 1', () => {
+ expect(JSON.stringify(downSample([10, 2, 6, 10, 3, 4, 8, 0], 1))).toBe(
+ JSON.stringify([5.375]),
+ );
+ });
+
+ it('should fill each bucket to reach the target sample size', () => {
+ expect(
+ JSON.stringify(
+ downSample(
+ [10, 2, 6, 10, 3, 4, 8, 1, 10, 0, 6, 10, 3, 4, 8, 1, 2, 6, 10, 3, 8, 10, 0],
+ 7,
+ ),
+ ),
+ ).toBe(JSON.stringify([10, 2, 10, 0, 8, 10, 0]));
+ expect(JSON.stringify(downSample([10, 2, 6, 10, 3, 4, 8, 0], 7))).toBe(
+ JSON.stringify([10, 2, 6, 10, 3, 8, 0]),
+ );
+ expect(JSON.stringify(downSample([10, 2], 2))).toBe(JSON.stringify([10, 2]));
+ expect(JSON.stringify(downSample([10, 2, 10], 2))).toBe(JSON.stringify([10, 10]));
+ });
+ });
+});
diff --git a/src/components/Attachment/audioSampling.ts b/src/components/Attachment/audioSampling.ts
new file mode 100644
index 000000000..ff8b7638d
--- /dev/null
+++ b/src/components/Attachment/audioSampling.ts
@@ -0,0 +1,106 @@
+import { divMod } from './utils';
+
+export const resampleWaveformData = (waveformData: number[], amplitudesCount: number) =>
+ waveformData.length === amplitudesCount
+ ? waveformData
+ : waveformData.length > amplitudesCount
+ ? downSample(waveformData, amplitudesCount)
+ : upSample(waveformData, amplitudesCount);
+
+/**
+ * The downSample function uses the Largest-Triangle-Three-Buckets (LTTB) algorithm.
+ * See the thesis Downsampling Time Series for Visual Representation by Sveinn Steinarsson for more (https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf)
+ * @param data
+ * @param targetOutputSize
+ */
+export function downSample(data: number[], targetOutputSize: number): number[] {
+ if (data.length <= targetOutputSize || targetOutputSize === 0) {
+ return data;
+ }
+
+ if (targetOutputSize === 1) return [mean(data)];
+
+ const result: number[] = [];
+ // bucket size adjusted due to the fact that the first and the last item in the original data array is kept in target output
+ const bucketSize = (data.length - 2) / (targetOutputSize - 2);
+ let lastSelectedPointIndex = 0;
+ result.push(data[lastSelectedPointIndex]); // Always add the first point
+ let maxAreaPoint, maxArea, triangleArea;
+
+ for (let bucketIndex = 1; bucketIndex < targetOutputSize - 1; bucketIndex++) {
+ const previousBucketRefPoint = data[lastSelectedPointIndex];
+ const nextBucketMean = getNextBucketMean(data, bucketIndex, bucketSize);
+
+ const currentBucketStartIndex = Math.floor((bucketIndex - 1) * bucketSize) + 1;
+ const nextBucketStartIndex = Math.floor(bucketIndex * bucketSize) + 1;
+ const countUnitsBetweenAtoC = 1 + nextBucketStartIndex - currentBucketStartIndex;
+
+ maxArea = triangleArea = -1;
+
+ for (
+ let currentPointIndex = currentBucketStartIndex;
+ currentPointIndex < nextBucketStartIndex;
+ currentPointIndex++
+ ) {
+ const countUnitsBetweenAtoB = Math.abs(currentPointIndex - currentBucketStartIndex) + 1;
+ const countUnitsBetweenBtoC = countUnitsBetweenAtoC - countUnitsBetweenAtoB;
+ const currentPointValue = data[currentPointIndex];
+
+ triangleArea = triangleAreaHeron(
+ triangleBase(Math.abs(previousBucketRefPoint - currentPointValue), countUnitsBetweenAtoB),
+ triangleBase(Math.abs(currentPointValue - nextBucketMean), countUnitsBetweenBtoC),
+ triangleBase(Math.abs(previousBucketRefPoint - nextBucketMean), countUnitsBetweenAtoC),
+ );
+
+ if (triangleArea > maxArea) {
+ maxArea = triangleArea;
+ maxAreaPoint = data[currentPointIndex];
+ lastSelectedPointIndex = currentPointIndex;
+ }
+ }
+
+ if (typeof maxAreaPoint !== 'undefined') result.push(maxAreaPoint);
+ }
+
+ result.push(data[data.length - 1]); // Always add the last point
+
+ return result;
+}
+
+const triangleAreaHeron = (a: number, b: number, c: number) => {
+ const s = (a + b + c) / 2;
+ return Math.sqrt(s * (s - a) * (s - b) * (s - c));
+};
+const triangleBase = (a: number, b: number) => Math.sqrt(Math.pow(a, 2) + Math.pow(b, 2));
+const mean = (values: number[]) => values.reduce((acc, value) => acc + value, 0) / values.length;
+const getNextBucketMean = (data: number[], currentBucketIndex: number, bucketSize: number) => {
+ const nextBucketStartIndex = Math.floor(currentBucketIndex * bucketSize) + 1;
+ let nextNextBucketStartIndex = Math.floor((currentBucketIndex + 1) * bucketSize) + 1;
+ nextNextBucketStartIndex =
+ nextNextBucketStartIndex < data.length ? nextNextBucketStartIndex : data.length;
+
+ return mean(data.slice(nextBucketStartIndex, nextNextBucketStartIndex));
+};
+export const upSample = (values: number[], targetSize: number) => {
+ if (!values.length) {
+ console.warn('Cannot extend empty array of amplitudes.');
+ return values;
+ }
+
+ if (values.length > targetSize) {
+ console.warn('Requested to extend the waveformData that is longer than the target list size');
+ return values;
+ }
+
+ if (targetSize === values.length) return values;
+
+ // eslint-disable-next-line prefer-const
+ let [bucketSize, remainder] = divMod(targetSize, values.length);
+ const result: number[] = [];
+
+ for (let i = 0; i < values.length; i++) {
+ const extra = remainder && remainder-- ? 1 : 0;
+ result.push(...Array(bucketSize + extra).fill(values[i]));
+ }
+ return result;
+};
diff --git a/src/components/Attachment/components/WaveProgressBar.tsx b/src/components/Attachment/components/WaveProgressBar.tsx
index 9d01905da..b34af75a9 100644
--- a/src/components/Attachment/components/WaveProgressBar.tsx
+++ b/src/components/Attachment/components/WaveProgressBar.tsx
@@ -1,10 +1,18 @@
-import React, { MouseEventHandler, useMemo, useRef, useState } from 'react';
+import React, {
+ PointerEventHandler,
+ useCallback,
+ useEffect,
+ useMemo,
+ useRef,
+ useState,
+} from 'react';
import clsx from 'clsx';
-import { divMod } from '../utils';
+import { resampleWaveformData } from '../audioSampling';
+import type { SeekFn } from '../hooks/useAudioController';
type WaveProgressBarProps = {
/** Function that allows to change the track progress */
- seek: MouseEventHandler;
+ seek: SeekFn;
/** The array of fractional number values between 0 and 1 representing the height of amplitudes */
waveformData: number[];
/** Allows to specify the number of bars into which the original waveformData array should be resampled */
@@ -12,6 +20,7 @@ type WaveProgressBarProps = {
/** Progress expressed in fractional number value btw 0 and 100. */
progress?: number;
};
+
export const WaveProgressBar = ({
amplitudesCount = 40,
progress = 0,
@@ -20,33 +29,39 @@ export const WaveProgressBar = ({
}: WaveProgressBarProps) => {
const [progressIndicator, setProgressIndicator] = useState(null);
const isDragging = useRef(false);
+ const rootRef = useRef(null);
- const handleMouseDown = () => {
+ const handleDragStart: PointerEventHandler = (e) => {
+ e.preventDefault();
if (!progressIndicator) return;
isDragging.current = true;
progressIndicator.style.cursor = 'grabbing';
};
- const handleMouseMove: MouseEventHandler = (event) => {
+ const handleDrag: PointerEventHandler = (e) => {
if (!isDragging.current) return;
- seek(event);
+ // Due to throttling of seek, it is necessary to create a copy (snapshot) of the event.
+ // Otherwise, the event would be nullified at the point when the throttled function is executed.
+ seek({ ...e });
};
- const handleMouseUp = () => {
+ const handleDragStop = useCallback(() => {
if (!progressIndicator) return;
isDragging.current = false;
progressIndicator.style.removeProperty('cursor');
- };
+ }, [progressIndicator]);
- const resampledWaveformData = useMemo(
- () =>
- waveformData.length === amplitudesCount
- ? waveformData
- : waveformData.length > amplitudesCount
- ? downSample(waveformData, amplitudesCount)
- : upSample(waveformData, amplitudesCount),
- [amplitudesCount, waveformData],
- );
+ const resampledWaveformData = useMemo(() => resampleWaveformData(waveformData, amplitudesCount), [
+ amplitudesCount,
+ waveformData,
+ ]);
+
+ useEffect(() => {
+ document.addEventListener('pointerup', handleDragStop);
+ return () => {
+ document.removeEventListener('pointerup', handleDragStop);
+ };
+ }, [handleDragStop]);
if (!waveformData.length) return null;
@@ -55,10 +70,10 @@ export const WaveProgressBar = ({
className='str-chat__wave-progress-bar__track'
data-testid='wave-progress-bar-track'
onClick={seek}
- onMouseDown={handleMouseDown}
- onMouseLeave={handleMouseUp}
- onMouseMove={handleMouseMove}
- onMouseUp={handleMouseUp}
+ onPointerDown={handleDragStart}
+ onPointerMove={handleDrag}
+ onPointerUp={handleDragStop}
+ ref={rootRef}
role='progressbar'
>
{resampledWaveformData.map((amplitude, i) => (
@@ -87,105 +102,3 @@ export const WaveProgressBar = ({
);
};
-
-/**
- * The downSample function uses the Largest-Triangle-Three-Buckets (LTTB) algorithm.
- * See the thesis Downsampling Time Series for Visual Representation by Sveinn Steinarsson for more (https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf)
- * @param data
- * @param targetOutputSize
- */
-export function downSample(data: number[], targetOutputSize: number): number[] {
- if (data.length <= targetOutputSize || targetOutputSize === 0) {
- return data;
- }
-
- if (targetOutputSize === 1) return [mean(data)];
-
- const result: number[] = [];
- // bucket size adjusted due to the fact that the first and the last item in the original data array is kept in target output
- const bucketSize = (data.length - 2) / (targetOutputSize - 2);
- let lastSelectedPointIndex = 0;
- result.push(data[lastSelectedPointIndex]); // Always add the first point
- let maxAreaPoint, maxArea, triangleArea;
-
- for (let bucketIndex = 1; bucketIndex < targetOutputSize - 1; bucketIndex++) {
- const previousBucketRefPoint = data[lastSelectedPointIndex];
- const nextBucketMean = getNextBucketMean(data, bucketIndex, bucketSize);
-
- const currentBucketStartIndex = Math.floor((bucketIndex - 1) * bucketSize) + 1;
- const nextBucketStartIndex = Math.floor(bucketIndex * bucketSize) + 1;
- const countUnitsBetweenAtoC = 1 + nextBucketStartIndex - currentBucketStartIndex;
-
- maxArea = triangleArea = -1;
-
- for (
- let currentPointIndex = currentBucketStartIndex;
- currentPointIndex < nextBucketStartIndex;
- currentPointIndex++
- ) {
- const countUnitsBetweenAtoB = Math.abs(currentPointIndex - currentBucketStartIndex) + 1;
- const countUnitsBetweenBtoC = countUnitsBetweenAtoC - countUnitsBetweenAtoB;
- const currentPointValue = data[currentPointIndex];
-
- triangleArea = triangleAreaHeron(
- triangleBase(Math.abs(previousBucketRefPoint - currentPointValue), countUnitsBetweenAtoB),
- triangleBase(Math.abs(currentPointValue - nextBucketMean), countUnitsBetweenBtoC),
- triangleBase(Math.abs(previousBucketRefPoint - nextBucketMean), countUnitsBetweenAtoC),
- );
-
- if (triangleArea > maxArea) {
- maxArea = triangleArea;
- maxAreaPoint = data[currentPointIndex];
- lastSelectedPointIndex = currentPointIndex;
- }
- }
-
- if (typeof maxAreaPoint !== 'undefined') result.push(maxAreaPoint);
- }
-
- result.push(data[data.length - 1]); // Always add the last point
-
- return result;
-}
-
-const triangleAreaHeron = (a: number, b: number, c: number) => {
- const s = (a + b + c) / 2;
- return Math.sqrt(s * (s - a) * (s - b) * (s - c));
-};
-
-const triangleBase = (a: number, b: number) => Math.sqrt(Math.pow(a, 2) + Math.pow(b, 2));
-
-const mean = (values: number[]) => values.reduce((acc, value) => acc + value, 0) / values.length;
-
-const getNextBucketMean = (data: number[], currentBucketIndex: number, bucketSize: number) => {
- const nextBucketStartIndex = Math.floor(currentBucketIndex * bucketSize) + 1;
- let nextNextBucketStartIndex = Math.floor((currentBucketIndex + 1) * bucketSize) + 1;
- nextNextBucketStartIndex =
- nextNextBucketStartIndex < data.length ? nextNextBucketStartIndex : data.length;
-
- return mean(data.slice(nextBucketStartIndex, nextNextBucketStartIndex));
-};
-
-export const upSample = (values: number[], targetSize: number) => {
- if (!values.length) {
- console.warn('Cannot extend empty array of amplitudes.');
- return values;
- }
-
- if (values.length > targetSize) {
- console.warn('Requested to extend the waveformData that is longer than the target list size');
- return values;
- }
-
- if (targetSize === values.length) return values;
-
- // eslint-disable-next-line prefer-const
- let [bucketSize, remainder] = divMod(targetSize, values.length);
- const result: number[] = [];
-
- for (let i = 0; i < values.length; i++) {
- const extra = remainder && remainder-- ? 1 : 0;
- result.push(...Array(bucketSize + extra).fill(values[i]));
- }
- return result;
-};
diff --git a/src/components/Attachment/hooks/useAudioController.ts b/src/components/Attachment/hooks/useAudioController.ts
index 7bb6d702d..206c28295 100644
--- a/src/components/Attachment/hooks/useAudioController.ts
+++ b/src/components/Attachment/hooks/useAudioController.ts
@@ -1,40 +1,86 @@
-import { useCallback, useEffect, useRef, useState } from 'react';
+import throttle from 'lodash.throttle';
+import { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { useChannelActionContext, useTranslationContext } from '../../../context';
+const isSeekable = (audioElement: HTMLAudioElement) =>
+ !(audioElement.duration === Infinity || isNaN(audioElement.duration));
+
export const elementIsPlaying = (audioElement: HTMLAudioElement | null) =>
audioElement && !(audioElement.paused || audioElement.ended);
+const logError = (e: Error) => console.error('[AUDIO PLAYER]', e);
+
const DEFAULT_PLAYBACK_RATES = [1.0, 1.5, 2.0];
+export type SeekFn = (params: { clientX: number; currentTarget: HTMLDivElement }) => void;
+
type AudioControllerParams = {
/** Audio duration in seconds. */
durationSeconds?: number;
+ /** The audio MIME type that is checked before the audio is played. If the type is not supported the controller registers error in playbackError. */
+ mimeType?: string;
/** An array of fractional numeric values of playback speed to override the defaults (1.0, 1.5, 2.0) */
playbackRates?: number[];
};
export const useAudioController = ({
durationSeconds,
+ mimeType,
playbackRates = DEFAULT_PLAYBACK_RATES,
}: AudioControllerParams = {}) => {
const { addNotification } = useChannelActionContext('useAudioController');
const { t } = useTranslationContext('useAudioController');
const [isPlaying, setIsPlaying] = useState(false);
+ const [playbackError, setPlaybackError] = useState
();
+ const [canPlayRecord, setCanPlayRecord] = useState(true);
const [secondsElapsed, setSecondsElapsed] = useState(0);
const [playbackRateIndex, setPlaybackRateIndex] = useState(0);
-
+ const playTimeout = useRef>();
const audioRef = useRef(null);
- const togglePlay = useCallback(() => {
+ const registerError = useCallback(
+ (e: Error) => {
+ logError(e as Error);
+ setPlaybackError(e);
+ addNotification(e.message, 'error');
+ },
+ [addNotification],
+ );
+
+ const togglePlay = useCallback(async () => {
if (!audioRef.current) return;
- if (!elementIsPlaying(audioRef.current)) {
- audioRef.current.play();
- setIsPlaying(true);
- } else {
+ clearTimeout(playTimeout.current);
+ playTimeout.current = undefined;
+ if (mimeType && !audioRef.current.canPlayType(mimeType)) {
+ registerError(new Error(t('Recording format is not supported and cannot be reproduced')));
+ setCanPlayRecord(false);
+ return;
+ }
+ if (elementIsPlaying(audioRef.current)) {
audioRef.current.pause();
setIsPlaying(false);
+ } else {
+ playTimeout.current = setTimeout(() => {
+ if (!audioRef.current) return;
+ try {
+ audioRef.current.pause();
+ } catch (e) {
+ registerError(new Error(t('Failed to play the recording')));
+ }
+ }, 2000);
+
+ try {
+ await audioRef.current.play();
+ setIsPlaying(true);
+ } catch (e) {
+ registerError(e as Error);
+ setIsPlaying(false);
+ } finally {
+ clearTimeout(playTimeout.current);
+ playTimeout.current = undefined;
+ }
}
- }, []);
+ }, [mimeType, registerError, t]);
const increasePlaybackRate = () => {
setPlaybackRateIndex((prev) => {
@@ -45,19 +91,24 @@ export const useAudioController = ({
});
};
- const seek = useCallback>(
- ({ clientX, currentTarget }) => {
- if (!audioRef.current) return;
-
- const { width, x } = currentTarget.getBoundingClientRect();
-
- const ratio = (clientX - x) / width;
-
- const currentTime = ratio * audioRef.current.duration;
- setSecondsElapsed(currentTime);
- audioRef.current.currentTime = currentTime;
- },
- [],
+ const seek = useMemo(
+ () =>
+ throttle(({ clientX, currentTarget }) => {
+ if (!(currentTarget && audioRef.current)) return;
+ if (!isSeekable(audioRef.current)) {
+ registerError(new Error(t('Cannot seek in the recording')));
+ return;
+ }
+
+ const { width, x } = currentTarget.getBoundingClientRect();
+
+ const ratio = (clientX - x) / width;
+ if (ratio > 1 || ratio < 0) return;
+ const currentTime = ratio * audioRef.current.duration;
+ setSecondsElapsed(currentTime);
+ audioRef.current.currentTime = currentTime;
+ }, 16),
+ [registerError, t],
);
useEffect(() => {
@@ -72,6 +123,7 @@ export const useAudioController = ({
const handleError = () => {
addNotification(t('Error reproducing the recording'), 'error');
+ setIsPlaying(false);
};
audioElement.addEventListener('error', handleError);
@@ -90,8 +142,10 @@ export const useAudioController = ({
return {
audioRef,
+ canPlayRecord,
increasePlaybackRate,
isPlaying,
+ playbackError,
playbackRate: playbackRates[playbackRateIndex],
progress:
audioRef.current && secondsElapsed ? (secondsElapsed / audioRef.current.duration) * 100 : 0,
diff --git a/src/components/Attachment/index.ts b/src/components/Attachment/index.ts
index 56f4b671b..f1385fe7d 100644
--- a/src/components/Attachment/index.ts
+++ b/src/components/Attachment/index.ts
@@ -2,6 +2,7 @@ export * from './Attachment';
export * from './AttachmentActions';
export * from './AttachmentContainer';
export * from './Audio';
+export * from './audioSampling';
export * from './Card';
export * from './components';
export * from './UnsupportedAttachment';
diff --git a/src/components/Attachment/utils.tsx b/src/components/Attachment/utils.tsx
index f3cb6b283..af6b0a46e 100644
--- a/src/components/Attachment/utils.tsx
+++ b/src/components/Attachment/utils.tsx
@@ -11,6 +11,7 @@ import { Gallery as DefaultGallery, ImageComponent as DefaultImage } from '../Ga
import type { Attachment } from 'stream-chat';
import type { ATTACHMENT_GROUPS_ORDER, AttachmentProps } from './Attachment';
import type { DefaultStreamChatGenerics } from '../../types/types';
+import type { LocalAttachment, VoiceRecordingAttachment } from '../MessageInput';
export const SUPPORTED_VIDEO_FORMATS = ['video/mp4', 'video/ogg', 'video/webm', 'video/quicktime'];
@@ -65,14 +66,20 @@ export const isGalleryAttachmentType = <
export const isAudioAttachment = <
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
>(
- attachment: Attachment,
+ attachment: Attachment | LocalAttachment,
) => attachment.type === 'audio';
export const isVoiceRecordingAttachment = <
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
>(
- attachment: Attachment,
-) => attachment.type === 'voiceRecording';
+ attachment: Attachment | LocalAttachment,
+): attachment is VoiceRecordingAttachment => attachment.type === 'voiceRecording';
+
+export const isLocalAttachment = <
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+>(
+ attachment: LocalAttachment,
+): attachment is LocalAttachment => !!attachment.$internal;
export const isFileAttachment = <
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
@@ -87,7 +94,7 @@ export const isFileAttachment = <
export const isMediaAttachment = <
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
>(
- attachment: Attachment,
+ attachment: Attachment | LocalAttachment,
) =>
(attachment.mime_type && SUPPORTED_VIDEO_FORMATS.indexOf(attachment.mime_type) !== -1) ||
attachment.type === 'video';
diff --git a/src/components/Channel/Channel.tsx b/src/components/Channel/Channel.tsx
index b93d1ca8b..713d7e468 100644
--- a/src/components/Channel/Channel.tsx
+++ b/src/components/Channel/Channel.tsx
@@ -104,8 +104,10 @@ type ChannelPropsForwardedToComponentContext<
> = {
/** Custom UI component to display a message attachment, defaults to and accepts same props as: [Attachment](https://github.com/GetStream/stream-chat-react/blob/master/src/components/Attachment/Attachment.tsx) */
Attachment?: ComponentContextValue['Attachment'];
- /** Custom UI component to display a attachment previews in MessageInput, defaults to and accepts same props as: [Attachment](https://github.com/GetStream/stream-chat-react/blob/master/src/components/MessageInput/AttachmentPreviewList.tsx) */
+ /** Custom UI component to display an attachment previews in MessageInput, defaults to and accepts same props as: [Attachment](https://github.com/GetStream/stream-chat-react/blob/master/src/components/MessageInput/AttachmentPreviewList.tsx) */
AttachmentPreviewList?: ComponentContextValue['AttachmentPreviewList'];
+ /** Custom UI component to display AudioRecorder in MessageInput, defaults to and accepts same props as: [AudioRecorder](https://github.com/GetStream/stream-chat-react/blob/master/src/components/MessageInput/AudioRecorder.tsx) */
+ AudioRecorder?: ComponentContextValue['AudioRecorder'];
/** Optional UI component to override the default suggestion Header component, defaults to and accepts same props as: [Header](https://github.com/GetStream/stream-chat-react/blob/master/src/components/AutoCompleteTextarea/Header.tsx) */
AutocompleteSuggestionHeader?: ComponentContextValue['AutocompleteSuggestionHeader'];
/** Optional UI component to override the default suggestion Item component, defaults to and accepts same props as: [Item](https://github.com/GetStream/stream-chat-react/blob/master/src/components/AutoCompleteTextarea/Item.js) */
@@ -180,6 +182,8 @@ type ChannelPropsForwardedToComponentContext<
ReactionsList?: ComponentContextValue['ReactionsList'];
/** Custom UI component for send button, defaults to and accepts same props as: [SendButton](https://github.com/GetStream/stream-chat-react/blob/master/src/components/MessageInput/icons.tsx) */
SendButton?: ComponentContextValue['SendButton'];
+ /** Custom UI component button for initiating audio recording, defaults to and accepts same props as: [StartRecordingAudioButton](https://github.com/GetStream/stream-chat-react/blob/master/src/components/MediaRecorder/AudioRecorder/AudioRecordingButtons.tsx) */
+ StartRecordingAudioButton?: ComponentContextValue['StartRecordingAudioButton'];
/** Custom UI component that displays thread's parent or other message at the top of the `MessageList`, defaults to and accepts same props as [MessageSimple](https://github.com/GetStream/stream-chat-react/blob/master/src/components/Message/MessageSimple.tsx) */
ThreadHead?: React.ComponentType>;
/** Custom UI component to display the header of a `Thread`, defaults to and accepts same props as: [DefaultThreadHeader](https://github.com/GetStream/stream-chat-react/blob/master/src/components/Thread/Thread.tsx) */
@@ -1244,6 +1248,7 @@ const ChannelInner = <
() => ({
Attachment: props.Attachment || DefaultAttachment,
AttachmentPreviewList: props.AttachmentPreviewList,
+ AudioRecorder: props.AudioRecorder,
AutocompleteSuggestionHeader: props.AutocompleteSuggestionHeader,
AutocompleteSuggestionItem: props.AutocompleteSuggestionItem,
AutocompleteSuggestionList: props.AutocompleteSuggestionList,
@@ -1280,6 +1285,7 @@ const ChannelInner = <
ReactionSelector: props.ReactionSelector,
ReactionsList: props.ReactionsList,
SendButton: props.SendButton,
+ StartRecordingAudioButton: props.StartRecordingAudioButton,
ThreadHead: props.ThreadHead,
ThreadHeader: props.ThreadHeader,
ThreadStart: props.ThreadStart,
diff --git a/src/components/MediaRecorder/AudioRecorder/AudioRecorder.tsx b/src/components/MediaRecorder/AudioRecorder/AudioRecorder.tsx
new file mode 100644
index 000000000..18b337c35
--- /dev/null
+++ b/src/components/MediaRecorder/AudioRecorder/AudioRecorder.tsx
@@ -0,0 +1,83 @@
+import React from 'react';
+import { AudioRecordingPreview } from './AudioRecordingPreview';
+import { AudioRecordingInProgress } from './AudioRecordingInProgress';
+import { MediaRecordingState } from '../classes';
+import {
+ BinIcon,
+ CheckSignIcon,
+ LoadingIndicatorIcon,
+ MicIcon,
+ PauseIcon,
+ SendIconV2,
+} from '../../MessageInput';
+import { useMessageInputContext } from '../../../context';
+
+export const AudioRecorder = () => {
+ const {
+ recordingController: { completeRecording, recorder, recording, recordingState },
+ } = useMessageInputContext();
+
+ const isUploadingFile = recording?.$internal?.uploadState === 'uploading';
+
+ if (!recorder) return null;
+
+ return (
+
+
+
+
+
+
+ {recording?.asset_url ? (
+
+ ) : (
+
+ )}
+
+ {recordingState === MediaRecordingState.PAUSED && (
+
+
+
+ )}
+ {recordingState === MediaRecordingState.RECORDING && (
+
+
+
+ )}
+ {recordingState === MediaRecordingState.STOPPED ? (
+
+ {isUploadingFile ? : }
+
+ ) : (
+
+
+
+ )}
+
+
+ );
+};
diff --git a/src/components/MediaRecorder/AudioRecorder/AudioRecordingButtons.tsx b/src/components/MediaRecorder/AudioRecorder/AudioRecordingButtons.tsx
new file mode 100644
index 000000000..45d35aa99
--- /dev/null
+++ b/src/components/MediaRecorder/AudioRecorder/AudioRecordingButtons.tsx
@@ -0,0 +1,15 @@
+import React from 'react';
+import { MicIcon } from '../../MessageInput/icons';
+
+export type StartRecordingAudioButtonProps = React.ComponentProps<'button'>;
+
+export const StartRecordingAudioButton = (props: StartRecordingAudioButtonProps) => (
+
+
+
+);
diff --git a/src/components/MediaRecorder/AudioRecorder/AudioRecordingInProgress.tsx b/src/components/MediaRecorder/AudioRecorder/AudioRecordingInProgress.tsx
new file mode 100644
index 000000000..3ec3ce769
--- /dev/null
+++ b/src/components/MediaRecorder/AudioRecorder/AudioRecordingInProgress.tsx
@@ -0,0 +1,75 @@
+import React, { useEffect, useState } from 'react';
+import { useTimeElapsed } from '../../MessageInput/hooks/useTimeElapsed';
+import { useMessageInputContext } from '../../../context';
+import { RecordingTimer } from './RecordingTimer';
+
+type WaveformProps = {
+ maxDataPointsDrawn?: number;
+};
+
+const AudioRecordingWaveform = ({ maxDataPointsDrawn = 100 }: WaveformProps) => {
+ const {
+ recordingController: { recorder },
+ } = useMessageInputContext();
+
+ const [amplitudes, setAmplitudes] = useState([]);
+
+ useEffect(() => {
+ if (!recorder?.amplitudeRecorder) return;
+ const amplitudesSubscription = recorder.amplitudeRecorder.amplitudes.subscribe(setAmplitudes);
+ return () => {
+ amplitudesSubscription.unsubscribe();
+ };
+ }, [recorder]);
+
+ if (!recorder) return null;
+
+ return (
+
+ {amplitudes.slice(-maxDataPointsDrawn).map((amplitude, i) => (
+
+ ))}
+
+ );
+};
+export const AudioRecordingInProgress = () => {
+ const { secondsElapsed, startCounter, stopCounter } = useTimeElapsed();
+ const {
+ recordingController: { recorder },
+ } = useMessageInputContext();
+
+ useEffect(() => {
+ if (!recorder?.mediaRecorder) return;
+
+ const { mediaRecorder } = recorder;
+ mediaRecorder.addEventListener('start', startCounter);
+ mediaRecorder.addEventListener('resume', startCounter);
+ mediaRecorder.addEventListener('stop', stopCounter);
+ mediaRecorder.addEventListener('pause', stopCounter);
+
+ return () => {
+ mediaRecorder.removeEventListener('start', startCounter);
+ mediaRecorder.removeEventListener('resume', startCounter);
+ mediaRecorder.removeEventListener('stop', stopCounter);
+ mediaRecorder.removeEventListener('pause', stopCounter);
+ };
+ }, [recorder, startCounter, stopCounter]);
+ return (
+
+
+
+
+ );
+};
diff --git a/src/components/MediaRecorder/AudioRecorder/AudioRecordingPreview.tsx b/src/components/MediaRecorder/AudioRecorder/AudioRecordingPreview.tsx
new file mode 100644
index 000000000..099dfaf3c
--- /dev/null
+++ b/src/components/MediaRecorder/AudioRecorder/AudioRecordingPreview.tsx
@@ -0,0 +1,40 @@
+import React from 'react';
+import { PauseIcon, PlayIcon } from '../../MessageInput/icons';
+import { RecordingTimer } from './RecordingTimer';
+import { useAudioController } from '../../Attachment/hooks/useAudioController';
+import { WaveProgressBar } from '../../Attachment';
+
+export type AudioRecordingPlayerProps = React.ComponentProps<'audio'> & {
+ durationSeconds: number;
+ mimeType?: string;
+ waveformData?: number[];
+};
+
+export const AudioRecordingPreview = ({
+ durationSeconds,
+ mimeType,
+ waveformData,
+ ...props
+}: AudioRecordingPlayerProps) => {
+ const { audioRef, isPlaying, progress, secondsElapsed, seek, togglePlay } = useAudioController({
+ durationSeconds,
+ mimeType,
+ });
+
+ const displayedDuration = secondsElapsed || durationSeconds;
+
+ return (
+
+
+
+
+
+ {isPlaying ? : }
+
+
+
+
+
+
+ );
+};
diff --git a/src/components/MediaRecorder/AudioRecorder/RecordingTimer.tsx b/src/components/MediaRecorder/AudioRecorder/RecordingTimer.tsx
new file mode 100644
index 000000000..b3d47b98a
--- /dev/null
+++ b/src/components/MediaRecorder/AudioRecorder/RecordingTimer.tsx
@@ -0,0 +1,17 @@
+import clsx from 'clsx';
+import { displayDuration } from '../../Attachment';
+import React from 'react';
+
+export type RecordingTimerProps = {
+ durationSeconds: number;
+};
+
+export const RecordingTimer = ({ durationSeconds }: RecordingTimerProps) => (
+ = 3600,
+ })}
+ >
+ {displayDuration(durationSeconds)}
+
+);
diff --git a/src/components/MediaRecorder/AudioRecorder/__tests__/AudioRecorder.test.js b/src/components/MediaRecorder/AudioRecorder/__tests__/AudioRecorder.test.js
new file mode 100644
index 000000000..b96edec21
--- /dev/null
+++ b/src/components/MediaRecorder/AudioRecorder/__tests__/AudioRecorder.test.js
@@ -0,0 +1,322 @@
+import React, { useEffect } from 'react';
+import { act, fireEvent, render, screen } from '@testing-library/react';
+import '@testing-library/jest-dom';
+import * as transcoder from '../../transcode';
+
+import { MessageInput, MessageInputFlat } from '../../../MessageInput';
+import {
+ ChannelActionProvider,
+ ChannelStateProvider,
+ ChatProvider,
+ ComponentProvider,
+ useMessageInputContext,
+} from '../../../../context';
+import {
+ generateAudioAttachment,
+ generateFileAttachment,
+ generateImageAttachment,
+ generateVideoAttachment,
+ generateVoiceRecordingAttachment,
+ initClientWithChannels,
+} from '../../../../mock-builders';
+
+import '../../../../mock-builders/browser/HTMLMediaElement';
+import {
+ AnalyserNodeMock,
+ AudioContextMock,
+ EventEmitterMock,
+ MediaRecorderMock,
+} from '../../../../mock-builders/browser';
+import { generateDataavailableEvent } from '../../../../mock-builders/browser/events/dataavailable';
+
+const PERM_DENIED_NOTIFICATION_TEXT =
+ 'To start recording, allow the microphone access in your browser';
+
+const START_RECORDING_AUDIO_BUTTON_TEST_ID = 'start-recording-audio-button';
+const AUDIO_RECORDER_TEST_ID = 'audio-recorder';
+const AUDIO_RECORDER_STOP_BTN_TEST_ID = 'audio-recorder-stop-button';
+const AUDIO_RECORDER_COMPLETE_BTN_TEST_ID = 'audio-recorder-complete-button';
+
+const CSS_THEME_VERSION = '2';
+const DEFAULT_RENDER_PARAMS = {
+ channelActionCtx: {
+ addNotification: jest.fn(),
+ },
+ channelStateCtx: {},
+ chatCtx: {
+ getAppSettings: jest.fn().mockReturnValue({}),
+ latestMessageDatesByChannels: {},
+ },
+ componentCtx: {},
+};
+
+const renderComponent = async ({
+ channelActionCtx,
+ channelStateCtx,
+ chatCtx,
+ componentCtx,
+ props,
+} = {}) => {
+ const {
+ channels: [channel],
+ client,
+ } = await initClientWithChannels();
+ let result;
+ await act(() => {
+ result = render(
+
+
+
+
+
+
+
+
+ ,
+ );
+ });
+ return result;
+};
+
+const nanoidMockValue = 'randomNanoId';
+jest.mock('nanoid', () => ({
+ nanoid: () => nanoidMockValue,
+}));
+
+jest.mock('fix-webm-duration', () => jest.fn((blob) => blob));
+
+jest
+ .spyOn(transcoder, 'transcode')
+ .mockImplementation((opts) =>
+ Promise.resolve(new Blob([opts.blob], { type: opts.targetMimeType })),
+ );
+
+window.navigator.permissions = {
+ query: jest.fn(),
+};
+
+// eslint-disable-next-line
+window.MediaRecorder = MediaRecorderMock;
+
+// eslint-disable-next-line
+window.AudioContext = AudioContextMock;
+
+// eslint-disable-next-line
+window.AnalyserNode = AnalyserNodeMock;
+
+const fileObjectURL = 'fileObjectURL';
+// eslint-disable-next-line
+window.URL.createObjectURL = jest.fn(() => fileObjectURL);
+// eslint-disable-next-line
+window.URL.revokeObjectURL = jest.fn();
+
+describe('MessageInput', () => {
+ beforeEach(() => {
+ window.navigator.mediaDevices = {
+ getUserMedia: jest.fn().mockResolvedValue({}),
+ };
+ });
+ afterEach(jest.clearAllMocks);
+
+ it('does not render start recording button if disabled', async () => {
+ await renderComponent({ props: { audioRecordingEnabled: false } });
+ expect(screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID)).not.toBeInTheDocument();
+ });
+
+ it('does not render start recording button if navigator.mediaDevices is undefined', async () => {
+ window.navigator.mediaDevices = undefined;
+ await renderComponent();
+ expect(screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID)).not.toBeInTheDocument();
+ });
+
+ it('renders start recording button when enabled and message input is empty', async () => {
+ await renderComponent();
+ const btn = screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID);
+ expect(btn).toBeInTheDocument();
+ expect(btn).toBeEnabled();
+ });
+
+ it('renders start recording button when message input contains text', async () => {
+ await renderComponent({ props: { message: { text: 'X' } } });
+ const btn = screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID);
+ expect(btn).toBeInTheDocument();
+ expect(btn).toBeEnabled();
+ });
+
+ it('renders start recording button when message input contains attachments', async () => {
+ await renderComponent({
+ props: {
+ attachments: [
+ generateFileAttachment(),
+ generateImageAttachment(),
+ generateAudioAttachment(),
+ generateVideoAttachment(),
+ ],
+ message: { text: 'X' },
+ },
+ });
+ const btn = screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID);
+ expect(btn).toBeInTheDocument();
+ expect(btn).toBeEnabled();
+ });
+
+ it('disables start recording button if is asyncMessagesMultiSendEnabled is false and voiceRecording attachment already present', async () => {
+ await renderComponent({
+ props: {
+ attachments: [generateVoiceRecordingAttachment()],
+ message: { text: 'X' },
+ },
+ });
+ const btn = screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID);
+ expect(btn).toBeInTheDocument();
+ expect(btn).toBeDisabled();
+ });
+
+ it('renders AudioRecorder on start recording button click', async () => {
+ await renderComponent();
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID));
+ });
+ expect(screen.queryByTestId(AUDIO_RECORDER_TEST_ID)).toBeInTheDocument();
+ });
+
+ it('does not show RecordingPermissionDeniedNotification until start recording button clicked if microphone permission is denied', async () => {
+ expect(screen.queryByText(PERM_DENIED_NOTIFICATION_TEXT)).not.toBeInTheDocument();
+ const status = new EventEmitterMock();
+ status.state = 'denied';
+ window.navigator.permissions.query.mockResolvedValueOnce(status);
+ await renderComponent();
+ expect(screen.queryByText(PERM_DENIED_NOTIFICATION_TEXT)).not.toBeInTheDocument();
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID));
+ });
+ expect(screen.queryByText(PERM_DENIED_NOTIFICATION_TEXT)).toBeInTheDocument();
+ });
+
+ it('renders custom RecordingPermissionDeniedNotification', async () => {
+ const RecordingPermissionDeniedNotification = () => custom notification
;
+ const status = new EventEmitterMock();
+ status.state = 'denied';
+ window.navigator.permissions.query.mockResolvedValueOnce(status);
+ await renderComponent({ componentCtx: { RecordingPermissionDeniedNotification } });
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID));
+ });
+ expect(screen.queryByText('custom notification')).toBeInTheDocument();
+ });
+
+ it('uploads and submits the whole message with all the attachments on recording completion and multiple async messages disabled', async () => {
+ const sendMessage = jest.fn();
+ const doFileUploadRequest = jest.fn().mockResolvedValue({ file: fileObjectURL });
+ let recorder;
+ let recording;
+ const MessageInputFlatWithContextCatcher = () => {
+ const ctx = useMessageInputContext();
+
+ useEffect(() => {
+ if (ctx.recordingController.recorder) {
+ recorder = ctx.recordingController.recorder;
+ }
+ if (ctx.recordingController.recording) {
+ recording = ctx.recordingController.recording;
+ }
+ }, [ctx.recordingController.recorder, ctx.recordingController.recording]);
+
+ return ;
+ };
+ await renderComponent({
+ channelActionCtx: { sendMessage },
+ componentCtx: { Input: MessageInputFlatWithContextCatcher },
+ props: { doFileUploadRequest },
+ });
+
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID));
+ });
+ recorder.mediaRecorder.state = 'recording';
+
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(AUDIO_RECORDER_STOP_BTN_TEST_ID));
+ });
+ recorder.mediaRecorder.state = 'paused';
+
+ await act(async () => {
+ await recorder.handleDataavailableEvent(generateDataavailableEvent());
+ });
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(AUDIO_RECORDER_COMPLETE_BTN_TEST_ID));
+ });
+
+ expect(doFileUploadRequest).toHaveBeenCalledTimes(1);
+ const { $internal, ...uploadedRecordingAtt } = recording;
+ expect(sendMessage.mock.calls[0][0]).toStrictEqual({
+ attachments: [uploadedRecordingAtt],
+ mentioned_users: [],
+ parent: undefined,
+ text: '',
+ });
+ });
+
+ it('uploads but does not submit message on recording completion and multiple async messages enabled', async () => {
+ const sendMessage = jest.fn();
+ const doFileUploadRequest = jest.fn().mockResolvedValue({ file: fileObjectURL });
+ let recorder;
+ const MessageInputFlatWithContextCatcher = () => {
+ const ctx = useMessageInputContext();
+
+ useEffect(() => {
+ if (ctx.recordingController.recorder) {
+ recorder = ctx.recordingController.recorder;
+ }
+ }, [ctx.recordingController.recorder]);
+
+ return ;
+ };
+ await renderComponent({
+ channelActionCtx: { sendMessage },
+ componentCtx: { Input: MessageInputFlatWithContextCatcher },
+ props: { asyncMessagesMultiSendEnabled: true, doFileUploadRequest },
+ });
+
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(START_RECORDING_AUDIO_BUTTON_TEST_ID));
+ });
+ recorder.mediaRecorder.state = 'recording';
+
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(AUDIO_RECORDER_STOP_BTN_TEST_ID));
+ });
+ recorder.mediaRecorder.state = 'paused';
+
+ await act(async () => {
+ recorder.amplitudeRecorder.amplitudes.next([1]);
+ await recorder.handleDataavailableEvent(generateDataavailableEvent());
+ });
+ await act(() => {
+ fireEvent.click(screen.queryByTestId(AUDIO_RECORDER_COMPLETE_BTN_TEST_ID));
+ });
+
+ expect(doFileUploadRequest).toHaveBeenCalledTimes(1);
+ expect(sendMessage).not.toHaveBeenCalled();
+ });
+});
+describe('AudioRecorder', () => {
+ it.todo('does not render anything if recorder is not available');
+ it.todo('renders audio recording in progress UI');
+ it.todo('renders audio recording paused UI when paused');
+ it.todo('renders audio recording in progress UI when recording resumed');
+ it.todo('renders audio recording stopped UI when stopped');
+ it.todo('renders message composer when recording cancelled while recording');
+ it.todo('renders message composer when recording cancelled while paused');
+ it.todo('renders message composer when recording cancelled while stopped');
+ it.todo('renders loading indicators while recording being uploaded');
+});
diff --git a/src/components/MediaRecorder/AudioRecorder/index.ts b/src/components/MediaRecorder/AudioRecorder/index.ts
new file mode 100644
index 000000000..4aafc62ab
--- /dev/null
+++ b/src/components/MediaRecorder/AudioRecorder/index.ts
@@ -0,0 +1,3 @@
+export * from './AudioRecorder';
+export * from './AudioRecordingButtons';
+export * from './RecordingTimer';
diff --git a/src/components/MediaRecorder/RecordingPermissionDeniedNotification.tsx b/src/components/MediaRecorder/RecordingPermissionDeniedNotification.tsx
new file mode 100644
index 000000000..935535649
--- /dev/null
+++ b/src/components/MediaRecorder/RecordingPermissionDeniedNotification.tsx
@@ -0,0 +1,45 @@
+import React from 'react';
+import { useTranslationContext } from '../../context';
+
+import { RecordingPermission } from './classes/BrowserPermission';
+
+export type RecordingPermissionDeniedNotificationProps = {
+ onClose: () => void;
+ permissionName: RecordingPermission;
+};
+
+export const RecordingPermissionDeniedNotification = ({
+ onClose,
+ permissionName,
+}: RecordingPermissionDeniedNotificationProps) => {
+ const { t } = useTranslationContext();
+ const permissionTranslations = {
+ body: {
+ camera: t('To start recording, allow the camera access in your browser'),
+ microphone: t('To start recording, allow the microphone access in your browser'),
+ },
+ heading: {
+ camera: t('Allow access to camera'),
+ microphone: t('Allow access to microphone'),
+ },
+ };
+
+ return (
+
+
+ {permissionTranslations.heading[permissionName]}
+
+
+ {permissionTranslations.body[permissionName]}
+
+
+
+ {t('Ok')}
+
+
+
+ );
+};
diff --git a/src/components/MediaRecorder/classes/AmplitudeRecorder.ts b/src/components/MediaRecorder/classes/AmplitudeRecorder.ts
new file mode 100644
index 000000000..181cacddf
--- /dev/null
+++ b/src/components/MediaRecorder/classes/AmplitudeRecorder.ts
@@ -0,0 +1,134 @@
+import { BehaviorSubject } from '../observable/BehaviorSubject';
+import { Subject } from '../observable/Subject';
+import { mergeDeepUndefined } from '../../../utils/mergeDeep';
+
+const MAX_FREQUENCY_AMPLITUDE = 255 as const;
+
+const logError = (e?: Error) => e && console.error('[AMPLITUDE RECORDER ERROR]', e);
+
+const rootMeanSquare = (values: Uint8Array) =>
+ Math.sqrt(values.reduce((acc, val) => acc + Math.pow(val, 2), 0) / values.length);
+
+/**
+ * fftSize
+ * An unsigned integer, representing the window size of the FFT, given in number of samples.
+ * A higher value will result in more details in the frequency domain but fewer details
+ * in the amplitude domain.
+ *
+ * Must be a power of 2 between 2^5 and 2^15, so one of: 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, and 32768.
+ * Defaults to 32.
+ *
+ * maxDecibels
+ * A double, representing the maximum decibel value for scaling the FFT analysis data,
+ * where 0 dB is the loudest possible sound, -10 dB is a 10th of that, etc.
+ * The default value is -30 dB.
+ *
+ * minDecibels
+ * A double, representing the minimum decibel value for scaling the FFT analysis data,
+ * where 0 dB is the loudest possible sound, -10 dB is a 10th of that, etc.
+ * The default value is -100 dB.
+ */
+export type AmplitudeAnalyserConfig = Pick;
+export type AmplitudeRecorderConfig = {
+ analyserConfig: AmplitudeAnalyserConfig;
+ sampleCount: number;
+ samplingFrequencyMs: number;
+};
+
+export const DEFAULT_AMPLITUDE_RECORDER_CONFIG: AmplitudeRecorderConfig = {
+ analyserConfig: {
+ fftSize: 32,
+ maxDecibels: 0,
+ minDecibels: -100,
+ } as AmplitudeAnalyserConfig,
+ sampleCount: 100,
+ samplingFrequencyMs: 60,
+};
+
+type AmplitudeAnalyserOptions = {
+ stream: MediaStream;
+ config?: AmplitudeRecorderConfig;
+};
+
+export enum AmplitudeRecorderState {
+ CLOSED = 'closed',
+ RECORDING = 'recording',
+ STOPPED = 'stopped',
+}
+
+export class AmplitudeRecorder {
+ audioContext: AudioContext | undefined;
+ analyserNode: AnalyserNode | undefined;
+ microphone: MediaStreamAudioSourceNode | undefined;
+ stream: MediaStream;
+
+ config: AmplitudeRecorderConfig;
+
+ amplitudeSamplingInterval: ReturnType | undefined;
+
+ amplitudes = new BehaviorSubject([]);
+ state = new BehaviorSubject(undefined);
+ error = new Subject();
+
+ constructor({ config, stream }: AmplitudeAnalyserOptions) {
+ this.config = mergeDeepUndefined({ ...config }, DEFAULT_AMPLITUDE_RECORDER_CONFIG);
+ this.stream = stream;
+ }
+
+ init() {
+ this.audioContext = new AudioContext();
+ this.analyserNode = this.audioContext.createAnalyser();
+ const { analyserConfig } = this.config;
+ this.analyserNode.fftSize = analyserConfig.fftSize;
+ this.analyserNode.maxDecibels = analyserConfig.maxDecibels;
+ this.analyserNode.minDecibels = analyserConfig.minDecibels;
+
+ this.microphone = this.audioContext.createMediaStreamSource(this.stream);
+ this.microphone.connect(this.analyserNode);
+ }
+
+ stop() {
+ clearInterval(this.amplitudeSamplingInterval);
+ this.amplitudeSamplingInterval = undefined;
+ this.state.next(AmplitudeRecorderState.STOPPED);
+ }
+
+ start = () => {
+ if (this.state.value === AmplitudeRecorderState.CLOSED) return;
+ if (!this.stream) {
+ throw new Error('Missing MediaStream instance. Cannot to start amplitude recording');
+ }
+
+ if (this.state.value === AmplitudeRecorderState.RECORDING) this.stop();
+
+ if (!this.analyserNode) {
+ if (!this.stream) return;
+ this.init();
+ }
+
+ this.state.next(AmplitudeRecorderState.RECORDING);
+
+ this.amplitudeSamplingInterval = setInterval(() => {
+ if (!(this.analyserNode && this.state.value === AmplitudeRecorderState.RECORDING)) return;
+ const frequencyBins = new Uint8Array(this.analyserNode.frequencyBinCount);
+ try {
+ this.analyserNode.getByteFrequencyData(frequencyBins);
+ } catch (e) {
+ logError(e as Error);
+ this.error.next(e as Error);
+ return;
+ }
+ const normalizedSignalStrength = rootMeanSquare(frequencyBins) / MAX_FREQUENCY_AMPLITUDE;
+ this.amplitudes.next([...this.amplitudes.value, normalizedSignalStrength]);
+ }, this.config.samplingFrequencyMs);
+ };
+
+ close() {
+ if (this.state.value !== AmplitudeRecorderState.STOPPED) this.stop();
+ this.state.next(AmplitudeRecorderState.CLOSED);
+ this.amplitudes.next([]);
+ this.microphone?.disconnect();
+ this.analyserNode?.disconnect();
+ if (this.audioContext?.state !== 'closed') this.audioContext?.close();
+ }
+}
diff --git a/src/components/MediaRecorder/classes/BrowserPermission.ts b/src/components/MediaRecorder/classes/BrowserPermission.ts
new file mode 100644
index 000000000..749c2c117
--- /dev/null
+++ b/src/components/MediaRecorder/classes/BrowserPermission.ts
@@ -0,0 +1,81 @@
+import type { RecordedMediaType } from '../../ReactFileUtilities';
+import { ChangeEvent } from 'react';
+import { Subscription } from '../observable/Subscription';
+import { Subject } from '../observable/Subject';
+import { BehaviorSubject } from '../observable/BehaviorSubject';
+
+export enum RecordingPermission {
+ CAM = 'camera',
+ MIC = 'microphone',
+}
+
+const MEDIA_TO_PERMISSION: Record = {
+ audio: RecordingPermission.MIC,
+ video: RecordingPermission.CAM,
+};
+
+export type BrowserPermissionOptions = {
+ mediaType: RecordedMediaType;
+};
+
+export class BrowserPermission {
+ name: string;
+ state = new BehaviorSubject(undefined);
+ status = new BehaviorSubject(undefined);
+ error = new Subject();
+
+ private changeSubscriptions: Subscription[] = [];
+
+ constructor({ mediaType }: BrowserPermissionOptions) {
+ this.name = MEDIA_TO_PERMISSION[mediaType];
+ }
+
+ get isWatching() {
+ return this.changeSubscriptions.some((subscription) => !subscription.closed);
+ }
+
+ async watch() {
+ if (!this.status.value) {
+ await this.check();
+ if (!this.status.value) return;
+ }
+
+ const status = this.status.value;
+ const handlePermissionChange = (e: Event) => {
+ const { state } = ((e as unknown) as ChangeEvent).target;
+ this.state.next(state);
+ };
+ status.addEventListener('change', handlePermissionChange);
+
+ this.changeSubscriptions.push(
+ new Subscription(() => {
+ status.removeEventListener('change', handlePermissionChange);
+ }),
+ );
+ }
+
+ unwatch() {
+ this.changeSubscriptions.forEach((subscription) => subscription.unsubscribe());
+ }
+
+ async check() {
+ if (!this.name) {
+ this.error.next(new Error('Unknown media recording permission'));
+ return;
+ }
+
+ let permissionState: PermissionState;
+ try {
+ const permissionStatus = await navigator.permissions.query({
+ name: (this.name as unknown) as PermissionName,
+ });
+ permissionState = permissionStatus.state;
+ this.status.next(permissionStatus);
+ } catch (e) {
+ // permission does not exist - cannot be queried
+ // an example would be Firefox - camera, neither microphone perms can be queried
+ permissionState = 'granted' as PermissionState;
+ }
+ this.state.next(permissionState);
+ }
+}
diff --git a/src/components/MediaRecorder/classes/MediaRecorderController.ts b/src/components/MediaRecorder/classes/MediaRecorderController.ts
new file mode 100644
index 000000000..bd8f52b3c
--- /dev/null
+++ b/src/components/MediaRecorder/classes/MediaRecorderController.ts
@@ -0,0 +1,369 @@
+import fixWebmDuration from 'fix-webm-duration';
+import { nanoid } from 'nanoid';
+import {
+ AmplitudeRecorder,
+ AmplitudeRecorderConfig,
+ DEFAULT_AMPLITUDE_RECORDER_CONFIG,
+} from './AmplitudeRecorder';
+import { BrowserPermission } from './BrowserPermission';
+import { BehaviorSubject, Subject } from '../observable';
+import { transcode } from '../transcode';
+import { resampleWaveformData } from '../../Attachment';
+import {
+ createFileFromBlobs,
+ getExtensionFromMimeType,
+ getRecordedMediaTypeFromMimeType,
+ RecordedMediaType,
+} from '../../ReactFileUtilities';
+import { TranslationContextValue } from '../../../context';
+import { defaultTranslatorFunction } from '../../../i18n';
+import { isSafari } from '../../../utils/browsers';
+import { mergeDeepUndefined } from '../../../utils/mergeDeep';
+
+import type { LocalVoiceRecordingAttachment } from '../../MessageInput';
+
+const RECORDED_MIME_TYPE_BY_BROWSER = {
+ audio: {
+ others: 'audio/webm',
+ safari: 'audio/mp4;codecs=mp4a.40.2',
+ },
+} as const;
+
+export const POSSIBLE_TRANSCODING_MIME_TYPES = ['audio/wav', 'audio/mp3'] as const;
+
+export const DEFAULT_MEDIA_RECORDER_CONFIG: MediaRecorderConfig = {
+ mimeType: isSafari()
+ ? RECORDED_MIME_TYPE_BY_BROWSER.audio.safari
+ : RECORDED_MIME_TYPE_BY_BROWSER.audio.others,
+} as const;
+
+export const DEFAULT_AUDIO_TRANSCODER_CONFIG: TranscoderConfig = {
+ sampleRate: 16000,
+ targetMimeType: 'audio/mp3',
+} as const;
+
+const disposeOfMediaStream = (stream?: MediaStream) => {
+ if (!stream?.active) return;
+ stream.getTracks().forEach((track) => {
+ track.stop();
+ stream.removeTrack(track);
+ });
+};
+
+const logError = (e?: Error) => e && console.error('[MEDIA RECORDER ERROR]', e);
+
+type SupportedTranscodeMimeTypes = typeof POSSIBLE_TRANSCODING_MIME_TYPES[number];
+
+export type TranscoderConfig = {
+ // defaults to 16000Hz
+ sampleRate: number;
+ // Defaults to audio/mp3;
+ targetMimeType: SupportedTranscodeMimeTypes;
+};
+
+type MediaRecorderConfig = Omit &
+ Required>;
+
+export type AudioRecorderConfig = {
+ amplitudeRecorderConfig: AmplitudeRecorderConfig;
+ mediaRecorderConfig: MediaRecorderOptions;
+ transcoderConfig: TranscoderConfig;
+};
+
+export type AudioRecorderOptions = {
+ config?: Partial;
+ generateRecordingTitle?: (mimeType: string) => string;
+ t?: TranslationContextValue['t'];
+};
+
+export enum MediaRecordingState {
+ PAUSED = 'paused',
+ RECORDING = 'recording',
+ STOPPED = 'stopped',
+}
+
+export enum RecordingAttachmentType {
+ VOICE_RECORDING = 'voiceRecording',
+}
+
+export class MediaRecorderController {
+ permission: BrowserPermission;
+ mediaRecorder: MediaRecorder | undefined;
+ amplitudeRecorder: AmplitudeRecorder | undefined;
+
+ amplitudeRecorderConfig: AmplitudeRecorderConfig;
+ mediaRecorderConfig: MediaRecorderConfig;
+ transcoderConfig: TranscoderConfig;
+
+ startTime: number | undefined;
+ recordedChunkDurations: number[] = [];
+ recordedData: Blob[] = [];
+ recordingUri: string | undefined;
+ mediaType: RecordedMediaType;
+
+ signalRecordingReady: ((r: LocalVoiceRecordingAttachment) => void) | undefined;
+
+ recordingState = new BehaviorSubject(undefined);
+ recording = new BehaviorSubject(undefined);
+ error = new Subject();
+ notification = new Subject<{ text: string; type: 'success' | 'error' } | undefined>();
+
+ customGenerateRecordingTitle: ((mimeType: string) => string) | undefined;
+ t: TranslationContextValue['t'];
+
+ constructor({ config, generateRecordingTitle, t }: AudioRecorderOptions = {}) {
+ this.t = t || defaultTranslatorFunction;
+
+ this.amplitudeRecorderConfig = mergeDeepUndefined(
+ { ...config?.amplitudeRecorderConfig },
+ DEFAULT_AMPLITUDE_RECORDER_CONFIG,
+ );
+
+ this.mediaRecorderConfig = mergeDeepUndefined(
+ { ...config?.mediaRecorderConfig },
+ DEFAULT_MEDIA_RECORDER_CONFIG,
+ );
+
+ this.transcoderConfig = mergeDeepUndefined(
+ { ...config?.transcoderConfig },
+ DEFAULT_AUDIO_TRANSCODER_CONFIG,
+ );
+ if (!POSSIBLE_TRANSCODING_MIME_TYPES.includes(this.transcoderConfig.targetMimeType)) {
+ this.transcoderConfig.targetMimeType = DEFAULT_AUDIO_TRANSCODER_CONFIG.targetMimeType;
+ }
+
+ const mediaType = getRecordedMediaTypeFromMimeType(this.mediaRecorderConfig.mimeType);
+ if (!mediaType) {
+ throw new Error(
+ `Unsupported media type (supported audio or video only). Provided mimeType: ${this.mediaRecorderConfig.mimeType}`,
+ );
+ }
+ this.mediaType = mediaType;
+
+ this.permission = new BrowserPermission({ mediaType });
+
+ this.customGenerateRecordingTitle = generateRecordingTitle;
+ }
+
+ get durationMs() {
+ return this.recordedChunkDurations.reduce((acc, val) => acc + val, 0);
+ }
+
+ generateRecordingTitle = (mimeType: string) => {
+ if (this.customGenerateRecordingTitle) {
+ return this.customGenerateRecordingTitle(mimeType);
+ }
+ return `${this.mediaType}_recording_${new Date().toISOString()}.${getExtensionFromMimeType(
+ mimeType,
+ )}`; // extension needed so that desktop Safari can play the asset
+ };
+
+ makeVoiceRecording = async () => {
+ if (this.recordingUri) URL.revokeObjectURL(this.recordingUri);
+
+ if (!this.recordedData.length) return;
+ const { mimeType } = this.mediaRecorderConfig;
+ let blob = new Blob(this.recordedData, { type: mimeType });
+ if (mimeType.match('audio/webm')) {
+ // The browser does not include duration metadata with the recorded blob
+ blob = await fixWebmDuration(blob, this.durationMs, {
+ logger: () => null, // prevents polluting the browser console
+ });
+ }
+ if (!mimeType.match('audio/mp4')) {
+ blob = await transcode({
+ blob,
+ ...this.transcoderConfig,
+ });
+ }
+
+ if (!blob) return;
+
+ this.recordingUri = URL.createObjectURL(blob);
+ const file = createFileFromBlobs({
+ blobsArray: [blob],
+ fileName: this.generateRecordingTitle(blob.type),
+ mimeType: blob.type,
+ });
+
+ return {
+ $internal: {
+ file,
+ id: nanoid(),
+ },
+ asset_url: this.recordingUri,
+ duration: this.durationMs / 1000,
+ file_size: blob.size,
+ mime_type: blob.type,
+ title: file.name,
+ type: RecordingAttachmentType.VOICE_RECORDING,
+ waveform_data: resampleWaveformData(
+ this.amplitudeRecorder?.amplitudes.value ?? [],
+ this.amplitudeRecorderConfig.sampleCount,
+ ),
+ };
+ };
+
+ handleErrorEvent = (e: Event) => {
+ const { error } = e as ErrorEvent;
+ logError(error);
+ this.error.next(error);
+ this.notification.next({
+ text: this.t('An error has occurred during recording'),
+ type: 'error',
+ });
+ };
+
+ handleDataavailableEvent = async (e: BlobEvent) => {
+ if (!e.data.size) return;
+ if (this.mediaType !== 'audio') return;
+ try {
+ this.recordedData.push(e.data);
+ const recording = await this.makeVoiceRecording();
+ if (!recording) return;
+ this.signalRecordingReady?.(recording);
+ this.recording.next(recording);
+ } catch (e) {
+ logError(e as Error);
+ this.error.next(e as Error);
+ this.notification.next({
+ text: this.t('An error has occurred during the recording processing'),
+ type: 'error',
+ });
+ }
+ };
+
+ resetRecordingState = () => {
+ this.recordedData = [];
+ this.recording.next(undefined);
+ this.recordingState.next(undefined);
+ this.recordedChunkDurations = [];
+ this.startTime = undefined;
+ };
+
+ cleanUp = () => {
+ this.resetRecordingState();
+ if (this.recordingUri) URL.revokeObjectURL(this.recordingUri);
+ this.amplitudeRecorder?.close();
+ if (this.mediaRecorder) {
+ disposeOfMediaStream(this.mediaRecorder.stream);
+ this.mediaRecorder.removeEventListener('dataavailable', this.handleDataavailableEvent);
+ this.mediaRecorder.removeEventListener('error', this.handleErrorEvent);
+ }
+ };
+
+ start = async () => {
+ if (
+ [MediaRecordingState.RECORDING, MediaRecordingState.PAUSED].includes(
+ this.recordingState.value as MediaRecordingState,
+ )
+ ) {
+ const error = new Error('Cannot start recording. Recording already in progress');
+ logError(error);
+ this.error.next(error);
+ return;
+ }
+
+ // account for requirement on iOS as per this bug report: https://bugs.webkit.org/show_bug.cgi?id=252303
+ if (!navigator.mediaDevices) {
+ const error = new Error('Media recording is not supported');
+ logError(error);
+ this.error.next(error);
+ this.notification.next({ text: this.t('Error starting recording'), type: 'error' });
+ return;
+ }
+
+ if (this.mediaType === 'video') {
+ const error = new Error(
+ `Video recording is not supported. Provided MIME type: ${this.mediaRecorderConfig.mimeType}`,
+ );
+ logError(error);
+ this.error.next(error);
+ this.notification.next({ text: this.t('Error starting recording'), type: 'error' });
+ return;
+ }
+
+ if (!this.permission.state.value) {
+ await this.permission.check();
+ }
+
+ if (this.permission.state.value === 'denied') {
+ logError(new Error('Permission denied'));
+ return;
+ }
+
+ try {
+ const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
+ this.mediaRecorder = new MediaRecorder(stream, this.mediaRecorderConfig);
+
+ this.mediaRecorder.addEventListener('dataavailable', this.handleDataavailableEvent);
+ this.mediaRecorder.addEventListener('error', this.handleErrorEvent);
+
+ this.startTime = new Date().getTime();
+ this.mediaRecorder.start();
+
+ if (this.mediaType === 'audio' && stream) {
+ this.amplitudeRecorder = new AmplitudeRecorder({
+ config: this.amplitudeRecorderConfig,
+ stream,
+ });
+ this.amplitudeRecorder.start();
+ }
+
+ this.recordingState.next(MediaRecordingState.RECORDING);
+ } catch (error) {
+ logError(error as Error);
+ this.cancel();
+ this.error.next(error as Error);
+ this.notification.next({ text: this.t('Error starting recording'), type: 'error' });
+ }
+ };
+
+ pause = () => {
+ if (this.recordingState.value !== MediaRecordingState.RECORDING) return;
+ if (this.startTime) {
+ this.recordedChunkDurations.push(new Date().getTime() - this.startTime);
+ this.startTime = undefined;
+ }
+ this.mediaRecorder?.pause();
+ this.amplitudeRecorder?.stop();
+ this.recordingState.next(MediaRecordingState.PAUSED);
+ };
+
+ resume = () => {
+ if (this.recordingState.value !== MediaRecordingState.PAUSED) return;
+ this.startTime = new Date().getTime();
+ this.mediaRecorder?.resume();
+ this.amplitudeRecorder?.start();
+ this.recordingState.next(MediaRecordingState.RECORDING);
+ };
+
+ stop = () => {
+ const recording = this.recording.value;
+ if (recording) return Promise.resolve(recording);
+
+ if (
+ ![MediaRecordingState.PAUSED, MediaRecordingState.RECORDING].includes(
+ (this.mediaRecorder?.state || '') as MediaRecordingState,
+ )
+ )
+ return Promise.resolve(undefined);
+
+ if (this.startTime) {
+ this.recordedChunkDurations.push(new Date().getTime() - this.startTime);
+ this.startTime = undefined;
+ }
+ const result = new Promise((res) => {
+ this.signalRecordingReady = res;
+ });
+ this.mediaRecorder?.stop();
+ this.amplitudeRecorder?.stop();
+ this.recordingState.next(MediaRecordingState.STOPPED);
+ return result;
+ };
+
+ cancel = () => {
+ this.stop();
+ this.cleanUp();
+ };
+}
diff --git a/src/components/MediaRecorder/classes/__tests__/AmplitudeRecorder.test.js b/src/components/MediaRecorder/classes/__tests__/AmplitudeRecorder.test.js
new file mode 100644
index 000000000..1cef13a0d
--- /dev/null
+++ b/src/components/MediaRecorder/classes/__tests__/AmplitudeRecorder.test.js
@@ -0,0 +1,113 @@
+import {
+ AmplitudeRecorder,
+ AmplitudeRecorderState,
+ DEFAULT_AMPLITUDE_RECORDER_CONFIG,
+} from '../AmplitudeRecorder';
+import { AudioContextMock } from '../../../../mock-builders/browser';
+
+// eslint-disable-next-line
+window.AudioContext = AudioContextMock;
+
+const intervalID = 1;
+jest.spyOn(window, 'setInterval').mockReturnValue(intervalID);
+
+describe('AmplitudeRecorder', () => {
+ it('is initiated with defaults', () => {
+ const ar = new AmplitudeRecorder({ stream: {} });
+ expect(ar.config).toStrictEqual(expect.objectContaining(DEFAULT_AMPLITUDE_RECORDER_CONFIG));
+ });
+ it('is initiated with custom config', () => {
+ const customConfig = {
+ analyserConfig: {
+ fftSize: 64,
+ maxDecibels: -6,
+ minDecibels: -90,
+ },
+ sampleCount: 50,
+ samplingFrequencyMs: 30,
+ };
+
+ const mixedConfig = {
+ analyserConfig: customConfig.analyserConfig,
+ sampleCount: DEFAULT_AMPLITUDE_RECORDER_CONFIG.sampleCount,
+ samplingFrequencyMs: DEFAULT_AMPLITUDE_RECORDER_CONFIG.samplingFrequencyMs,
+ };
+
+ let ar = new AmplitudeRecorder({ config: customConfig });
+ expect(ar.config).toStrictEqual(expect.objectContaining(customConfig));
+ ar = new AmplitudeRecorder({ config: { analyserConfig: customConfig.analyserConfig } });
+ expect(ar.config).toStrictEqual(expect.objectContaining(mixedConfig));
+ });
+
+ describe('start', () => {
+ it('throws error if MediaStream is not available', () => {
+ const ar = new AmplitudeRecorder({ stream: {} });
+ ar.stream = undefined;
+ expect(ar.start).toThrow('Missing MediaStream instance. Cannot to start amplitude recording');
+ });
+
+ it('initiates the recorder state', () => {
+ const ar = new AmplitudeRecorder({ stream: {} });
+ ar.start({});
+ expect(ar.audioContext).toBeDefined();
+ expect(ar.analyserNode).toStrictEqual(
+ expect.objectContaining(DEFAULT_AMPLITUDE_RECORDER_CONFIG.analyserConfig),
+ );
+ expect(ar.microphone).toBeDefined();
+ expect(ar.microphone.connect).toHaveBeenCalledWith(ar.analyserNode);
+ expect(ar.state.value).toBe(AmplitudeRecorderState.RECORDING);
+ expect(ar.amplitudeSamplingInterval).toBe(intervalID);
+ });
+ });
+
+ it('stops the recording', () => {
+ const ar = new AmplitudeRecorder({ stream: {} });
+ ar.start({});
+ ar.stop();
+ expect(ar.audioContext).toBeDefined();
+ expect(ar.analyserNode).toStrictEqual(
+ expect.objectContaining(DEFAULT_AMPLITUDE_RECORDER_CONFIG.analyserConfig),
+ );
+ expect(ar.microphone).toBeDefined();
+ expect(ar.microphone.connect).toHaveBeenCalledWith(ar.analyserNode);
+ expect(ar.state.value).toBe(AmplitudeRecorderState.STOPPED);
+ expect(ar.amplitudeSamplingInterval).toBeUndefined();
+ });
+
+ describe('close', () => {
+ it('disconnects all the devices', () => {
+ const ar = new AmplitudeRecorder({ stream: {} });
+ const stopSpy = jest.spyOn(ar, 'stop');
+ ar.start({});
+ ar.stop();
+ ar.close();
+
+ expect(stopSpy).toHaveBeenCalledTimes(1);
+ expect(ar.state.value).toBe(AmplitudeRecorderState.CLOSED);
+ expect(ar.microphone.disconnect).toHaveBeenCalledWith();
+ expect(ar.analyserNode.disconnect).toHaveBeenCalledWith();
+ expect(ar.audioContext.close).toHaveBeenCalledWith();
+ });
+
+ it('stops the recording if not already stopped', () => {
+ const ar = new AmplitudeRecorder({ stream: {} });
+ const stopSpy = jest.spyOn(ar, 'stop');
+ ar.start({});
+ ar.close();
+ expect(stopSpy).toHaveBeenCalledWith();
+ expect(ar.state.value).toBe(AmplitudeRecorderState.CLOSED);
+ expect(ar.microphone.disconnect).toHaveBeenCalledWith();
+ expect(ar.analyserNode.disconnect).toHaveBeenCalledWith();
+ expect(ar.audioContext.close).toHaveBeenCalledWith();
+ stopSpy.mockRestore();
+ });
+
+ it('cannot restart the recorder', () => {
+ const ar = new AmplitudeRecorder({ stream: {} });
+ ar.start({});
+ ar.close();
+ ar.start();
+ expect(ar.state.value).toBe(AmplitudeRecorderState.CLOSED);
+ });
+ });
+});
diff --git a/src/components/MediaRecorder/classes/__tests__/BrowserPermission.test.js b/src/components/MediaRecorder/classes/__tests__/BrowserPermission.test.js
new file mode 100644
index 000000000..d5c848171
--- /dev/null
+++ b/src/components/MediaRecorder/classes/__tests__/BrowserPermission.test.js
@@ -0,0 +1,110 @@
+import { BrowserPermission } from '../BrowserPermission';
+import { EventEmitterMock } from '../../../../mock-builders/browser';
+
+const defaultMockState = 'prompt';
+window.navigator.permissions = {
+ query: jest.fn(),
+};
+
+describe('BrowserPermission', () => {
+ afterEach(jest.clearAllMocks);
+
+ it('is initiated for microphone', () => {
+ const permission = new BrowserPermission({ mediaType: 'audio' });
+ expect(permission.name).toBe('microphone');
+ expect(permission.state.value).toBeUndefined();
+ expect(permission.status.value).toBeUndefined();
+ expect(permission.isWatching).toBe(false);
+ });
+
+ it('is initiated for camera', () => {
+ const permission = new BrowserPermission({ mediaType: 'video' });
+ expect(permission.name).toBe('camera');
+ expect(permission.state.value).toBeUndefined();
+ expect(permission.status.value).toBeUndefined();
+ expect(permission.isWatching).toBe(false);
+ });
+
+ describe('check', () => {
+ it('registers error and returns on checking unsupported permission', async () => {
+ const permission = new BrowserPermission({ mediaType: 'X' });
+ let error;
+ const errorSubscription = permission.error.subscribe((e) => {
+ error = e;
+ });
+ await permission.check();
+ expect(permission.state.value).toBeUndefined();
+ expect(permission.status.value).toBeUndefined();
+ expect(permission.isWatching).toBe(false);
+ expect(error.message).toBe('Unknown media recording permission');
+ errorSubscription.unsubscribe();
+ });
+
+ it('handles permission query error', async () => {
+ const permission = new BrowserPermission({ mediaType: 'audio' });
+ window.navigator.permissions.query.mockRejectedValueOnce('Query error');
+ await permission.check();
+ expect(permission.state.value).toBe('granted');
+ });
+
+ it('emits permission status and state', async () => {
+ const permission = new BrowserPermission({ mediaType: 'audio' });
+ const status = new EventEmitterMock();
+ status.state = defaultMockState;
+ window.navigator.permissions.query.mockResolvedValueOnce(status);
+ await permission.check();
+
+ expect(permission.status.value).toStrictEqual(status);
+ expect(permission.state.value).toBe(defaultMockState);
+ });
+ });
+
+ describe('listening to permission status change', () => {
+ it('is prevented for unsupported permission', async () => {
+ const permission = new BrowserPermission({ mediaType: 'X' });
+ let error;
+ const errorSubscription = permission.error.subscribe((e) => {
+ error = e;
+ });
+ await permission.watch();
+ expect(permission.state.value).toBeUndefined();
+ expect(permission.status.value).toBeUndefined();
+ expect(permission.isWatching).toBe(false);
+ expect(error.message).toBe('Unknown media recording permission');
+ errorSubscription.unsubscribe();
+ });
+
+ it('subscribes to permission status change event', async () => {
+ const permission = new BrowserPermission({ mediaType: 'audio' });
+ let error;
+ const errorSubscription = permission.error.subscribe((e) => {
+ error = e;
+ });
+ const status = new EventEmitterMock();
+ status.state = defaultMockState;
+ window.navigator.permissions.query.mockResolvedValueOnce(status);
+ await permission.watch();
+
+ expect(permission.state.value).toBe(defaultMockState);
+ expect(permission.isWatching).toBe(true);
+ expect(error).toBeUndefined();
+ errorSubscription.unsubscribe();
+
+ const registeredHandler = permission.status.value.addEventListener.mock.calls[0][1];
+ registeredHandler({ target: { state: 'granted' } });
+ expect(permission.state.value).toBe('granted');
+ });
+
+ it('allows to unsubscribe from watching permission status change event', async () => {
+ const permission = new BrowserPermission({ mediaType: 'audio' });
+ const status = new EventEmitterMock();
+ status.state = defaultMockState;
+ window.navigator.permissions.query.mockResolvedValueOnce(status);
+ await permission.watch();
+ expect(permission.status.value.removeEventListener).not.toHaveBeenCalled();
+ permission.unwatch();
+ expect(permission.status.value.removeEventListener).toHaveBeenCalledTimes(1);
+ expect(permission.isWatching).toBe(false);
+ });
+ });
+});
diff --git a/src/components/MediaRecorder/classes/__tests__/MediaRecorderController.test.js b/src/components/MediaRecorder/classes/__tests__/MediaRecorderController.test.js
new file mode 100644
index 000000000..fec26b8b6
--- /dev/null
+++ b/src/components/MediaRecorder/classes/__tests__/MediaRecorderController.test.js
@@ -0,0 +1,507 @@
+import fixWebmDuration from 'fix-webm-duration';
+import * as transcoder from '../../transcode';
+import {
+ DEFAULT_AUDIO_TRANSCODER_CONFIG,
+ DEFAULT_MEDIA_RECORDER_CONFIG,
+ MediaRecorderController,
+ MediaRecordingState,
+ RecordingAttachmentType,
+} from '../MediaRecorderController';
+import { AmplitudeRecorderState, DEFAULT_AMPLITUDE_RECORDER_CONFIG } from '../AmplitudeRecorder';
+import { defaultTranslatorFunction } from '../../../../i18n';
+import * as audioSampling from '../../../Attachment/audioSampling';
+import * as reactFileUtils from '../../../ReactFileUtilities/utils';
+import { generateVoiceRecordingAttachment } from '../../../../mock-builders';
+import { AudioContextMock, MediaRecorderMock } from '../../../../mock-builders/browser';
+import { generateDataavailableEvent } from '../../../../mock-builders/browser/events/dataavailable';
+
+const fileObjectURL = 'fileObjectURL';
+const nanoidMockValue = 'randomNanoId';
+const fileMock = { name: 'fileName' };
+
+const recordedChunkCount = 10;
+const dataPoints = Array.from({ length: recordedChunkCount }, (_, i) => i);
+jest.mock('nanoid', () => ({
+ nanoid: () => nanoidMockValue,
+}));
+
+jest.mock('fix-webm-duration', () => jest.fn((blob) => blob));
+
+const transcodeSpy = jest
+ .spyOn(transcoder, 'transcode')
+ .mockImplementation((opts) =>
+ Promise.resolve(new Blob([opts.blob], { type: opts.targetMimeType })),
+ );
+
+jest.spyOn(audioSampling, 'resampleWaveformData').mockReturnValue(dataPoints);
+
+jest.spyOn(reactFileUtils, 'createFileFromBlobs').mockReturnValue(fileMock);
+
+const consoleErrorSpy = jest.spyOn(console, 'error').mockImplementation();
+const expectRegistersError = async ({ action, controller, errorMsg, notificationMsg }) => {
+ let error, notification;
+ const errorSubscription = controller.error.subscribe((e) => {
+ error = e;
+ });
+ const notificationSubscription =
+ notificationMsg &&
+ controller.notification.subscribe((n) => {
+ notification = n;
+ });
+ await action();
+ expect(error.message).toBe(errorMsg);
+ expect(consoleErrorSpy.mock.calls[0][0]).toBe('[MEDIA RECORDER ERROR]');
+ expect(consoleErrorSpy.mock.calls[0][1].message).toBe(errorMsg);
+ if (notificationMsg)
+ expect(notification).toStrictEqual(
+ expect.objectContaining({ text: notificationMsg, type: 'error' }),
+ );
+ expect(controller.mediaRecorder).toBeUndefined();
+
+ errorSubscription.unsubscribe();
+ notificationSubscription?.unsubscribe();
+};
+
+// eslint-disable-next-line
+window.MediaRecorder = MediaRecorderMock;
+
+// eslint-disable-next-line
+window.AudioContext = AudioContextMock;
+
+// eslint-disable-next-line
+window.URL.createObjectURL = jest.fn(() => fileObjectURL);
+// eslint-disable-next-line
+window.URL.revokeObjectURL = jest.fn();
+
+describe('MediaRecorderController', () => {
+ beforeEach(() => {
+ window.navigator.mediaDevices = {
+ getUserMedia: jest.fn().mockResolvedValue({}),
+ };
+ });
+ afterEach(jest.clearAllMocks);
+
+ it('provides defaults on initiation', () => {
+ const controller = new MediaRecorderController();
+ expect(controller.mediaRecorderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_MEDIA_RECORDER_CONFIG),
+ );
+ expect(controller.transcoderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_AUDIO_TRANSCODER_CONFIG),
+ );
+ expect(controller.amplitudeRecorderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_AMPLITUDE_RECORDER_CONFIG),
+ );
+ expect(controller.t).toStrictEqual(defaultTranslatorFunction);
+ expect(controller.mediaType).toStrictEqual('audio');
+ expect(controller.customGenerateRecordingTitle).toBeUndefined();
+ });
+
+ it('overrides the defaults on initiation', () => {
+ const config = {
+ amplitudeRecorderConfig: {
+ analyserConfig: {
+ fftSize: 64,
+ maxDecibels: -6,
+ minDecibels: -90,
+ },
+ sampleCount: 50,
+ samplingFrequencyMs: 30,
+ },
+ mediaRecorderConfig: { mimeType: 'audio/ogg' },
+ transcoderConfig: {
+ sampleRate: 22050,
+ targetMimeType: 'audio/wav',
+ },
+ };
+ const generateRecordingTitle = jest.fn();
+ const t = jest.fn();
+ const controller = new MediaRecorderController({
+ config,
+ generateRecordingTitle,
+ t,
+ });
+ expect(controller.mediaRecorderConfig).toStrictEqual(
+ expect.objectContaining(config.mediaRecorderConfig),
+ );
+ expect(controller.transcoderConfig).toStrictEqual(
+ expect.objectContaining(config.transcoderConfig),
+ );
+ expect(controller.amplitudeRecorderConfig).toStrictEqual(
+ expect.objectContaining(config.amplitudeRecorderConfig),
+ );
+ expect(controller.t).toStrictEqual(t);
+ expect(controller.mediaType).toStrictEqual('audio');
+ expect(controller.customGenerateRecordingTitle).toStrictEqual(generateRecordingTitle);
+ });
+
+ it('overrides the defaults on initiation partially', () => {
+ const generateRecordingTitle = jest.fn();
+ const controller = new MediaRecorderController({ generateRecordingTitle });
+ expect(controller.customGenerateRecordingTitle).toStrictEqual(generateRecordingTitle);
+ expect(controller.mediaRecorderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_MEDIA_RECORDER_CONFIG),
+ );
+ expect(controller.transcoderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_AUDIO_TRANSCODER_CONFIG),
+ );
+ expect(controller.amplitudeRecorderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_AMPLITUDE_RECORDER_CONFIG),
+ );
+ expect(controller.t).toStrictEqual(defaultTranslatorFunction);
+ expect(controller.mediaType).toStrictEqual('audio');
+ });
+
+ it('generates a default recording audio title', () => {
+ const controller = new MediaRecorderController();
+ expect(controller.generateRecordingTitle('audio/webm')).toMatch(/audio_recording_.+\.webm/);
+ });
+
+ it('generates a custom recording audio title', () => {
+ const title = 'Test title';
+ const controller = new MediaRecorderController({ generateRecordingTitle: () => title });
+ expect(controller.generateRecordingTitle('audio/webm')).toBe(title);
+ });
+
+ describe('start', () => {
+ it('checks device permission if unknown', async () => {
+ const controller = new MediaRecorderController();
+ controller.permission.state.next(undefined);
+ const permissionCheckSpy = jest.spyOn(controller.permission, 'check').mockImplementation();
+ await controller.start();
+ expect(permissionCheckSpy).toHaveBeenCalledWith();
+ });
+
+ it.each([MediaRecordingState.RECORDING, MediaRecordingState.PAUSED])(
+ 'registers error if %s',
+ async (recordingState) => {
+ const controller = new MediaRecorderController();
+ controller.recordingState.next(recordingState);
+ await expectRegistersError({
+ action: controller.start,
+ controller,
+ errorMsg: 'Cannot start recording. Recording already in progress',
+ });
+ },
+ );
+
+ describe.each([undefined, MediaRecordingState.STOPPED])('recording in state %s', () => {
+ describe.each([['denied'], ['prompt'], ['granted']])('with permission "%s"', (permission) => {
+ it('registers error on unavailable navigator.mediaDevices', async () => {
+ window.navigator.mediaDevices = undefined;
+ const controller = new MediaRecorderController();
+ controller.permission.state.next(permission);
+ await expectRegistersError({
+ action: controller.start,
+ controller,
+ errorMsg: 'Media recording is not supported',
+ notificationMsg: 'Error starting recording',
+ });
+ expect(controller.recordingState.value).toBeUndefined();
+ });
+
+ it('registers error for video recording', async () => {
+ const controller = new MediaRecorderController({
+ config: { mediaRecorderConfig: { mimeType: 'video/webm' } },
+ });
+ controller.permission.state.next(permission);
+ await expectRegistersError({
+ action: controller.start,
+ controller,
+ errorMsg: 'Video recording is not supported. Provided MIME type: video/webm',
+ notificationMsg: 'Error starting recording',
+ });
+ expect(controller.recordingState.value).toBeUndefined();
+ });
+
+ it('does not check device permission', async () => {
+ const controller = new MediaRecorderController();
+ controller.permission.state.next(permission);
+ const permissionCheckSpy = jest
+ .spyOn(controller.permission, 'check')
+ .mockImplementation();
+ await controller.start();
+ expect(permissionCheckSpy).not.toHaveBeenCalledWith();
+ });
+
+ it.each([['prevents accessing'], ['accesses'], ['accesses']])(
+ '%s media devices',
+ async () => {
+ const controller = new MediaRecorderController();
+ controller.permission.state.next(permission);
+ await controller.start();
+ if (permission === 'denied') {
+ expect(window.navigator.mediaDevices.getUserMedia).not.toHaveBeenCalled();
+ expect(controller.recordingState.value).toBeUndefined();
+ } else {
+ expect(window.navigator.mediaDevices.getUserMedia).toHaveBeenCalledWith(
+ expect.objectContaining({ audio: true }),
+ );
+ expect(controller.recordingState.value).toBe(MediaRecordingState.RECORDING);
+ }
+ },
+ );
+
+ it.each([['does not initiate'], ['initiates'], ['initiates']])(
+ '%s amplitude recording for audio recording',
+ async () => {
+ const controller = new MediaRecorderController();
+ controller.permission.state.next(permission);
+ await controller.start();
+ if (permission === 'denied') {
+ expect(controller.amplitudeRecorder).toBeUndefined();
+ } else {
+ expect(controller.amplitudeRecorder.state.value).toBe(
+ AmplitudeRecorderState.RECORDING,
+ );
+ }
+ },
+ );
+ it('starts MediaRecorder', async () => {
+ const controller = new MediaRecorderController();
+ await controller.start();
+ expect(controller.mediaRecorder.start).toHaveBeenCalledWith();
+ });
+
+ it('handles runtime error', async () => {
+ const controller = new MediaRecorderController();
+ const errorMsg = 'User media error';
+ window.navigator.mediaDevices.getUserMedia.mockRejectedValueOnce(new Error(errorMsg));
+ await expectRegistersError({
+ action: controller.start,
+ controller,
+ errorMsg,
+ notificationMsg: 'Error starting recording',
+ });
+ expect(controller.recordingState.value).toBeUndefined();
+ });
+ });
+ });
+ });
+
+ describe('pause', () => {
+ it('pauses recording', async () => {
+ const controller = new MediaRecorderController();
+ expect(controller.recordedChunkDurations).toHaveLength(0);
+ await controller.start();
+ controller.pause();
+ expect(controller.startTime).toBeUndefined();
+ expect(controller.recordingState.value).toBe(MediaRecordingState.PAUSED);
+ expect(controller.mediaRecorder.pause).toHaveBeenCalledWith();
+ expect(controller.amplitudeRecorder.state.value).toBe(AmplitudeRecorderState.STOPPED);
+ controller.resume();
+ controller.pause();
+ expect(controller.recordedChunkDurations).toHaveLength(2);
+ });
+
+ it.each([MediaRecordingState.PAUSED, MediaRecordingState.STOPPED, undefined])(
+ 'does nothing if recording state is %s',
+ (recordingState) => {
+ const controller = new MediaRecorderController();
+ controller.recordingState.next(recordingState);
+ controller.pause();
+ expect(controller.recordedChunkDurations).toHaveLength(0);
+ expect(controller.recordingState.value).toBe(recordingState);
+ },
+ );
+ });
+
+ describe('resume', () => {
+ it('resumes paused recording', async () => {
+ const controller = new MediaRecorderController();
+ await controller.start();
+ controller.pause();
+ controller.resume();
+ expect(controller.startTime).toBeDefined();
+ expect(controller.mediaRecorder.resume).toHaveBeenCalledWith();
+ expect(controller.amplitudeRecorder.state.value).toBe(AmplitudeRecorderState.RECORDING);
+ expect(controller.recordingState.value).toBe(MediaRecordingState.RECORDING);
+ });
+
+ it.each([MediaRecordingState.RECORDING, MediaRecordingState.STOPPED, undefined])(
+ 'does nothing if recording state is %s',
+ (recordingState) => {
+ const controller = new MediaRecorderController();
+ controller.recordingState.next(recordingState);
+ controller.resume();
+ expect(controller.recordedChunkDurations).toHaveLength(0);
+ expect(controller.recordingState.value).toBe(recordingState);
+ },
+ );
+ });
+
+ describe('stop', () => {
+ it('returns existing recording', async () => {
+ const controller = new MediaRecorderController();
+ const existingRecording = generateVoiceRecordingAttachment();
+ controller.recording.next(existingRecording);
+ expect(await controller.stop()).toStrictEqual(expect.objectContaining(existingRecording));
+ });
+
+ it.each([MediaRecordingState.STOPPED, undefined])(
+ 'does nothing if recording state is %s',
+ async (recordingState) => {
+ const controller = new MediaRecorderController();
+ controller.recordingState.next(recordingState);
+ expect(await controller.stop()).toBeUndefined();
+ expect(controller.recordingState.value).toBe(recordingState);
+ },
+ );
+
+ it.each([MediaRecordingState.RECORDING, MediaRecordingState.PAUSED])(
+ 'stops recording if recording state is %s',
+ async (recordingState) => {
+ const controller = new MediaRecorderController();
+ await controller.start();
+ controller.mediaRecorder.state = MediaRecordingState.RECORDING;
+ if (recordingState === MediaRecordingState.PAUSED) {
+ controller.pause();
+ controller.mediaRecorder.state = MediaRecordingState.PAUSED;
+ }
+ const voiceRecording = generateVoiceRecordingAttachment();
+ setTimeout(() => {
+ controller.signalRecordingReady(voiceRecording);
+ }, 0);
+ const stopResult = await controller.stop();
+ expect(stopResult).toStrictEqual(expect.objectContaining(voiceRecording));
+ expect(controller.startTime).toBeUndefined();
+ expect(controller.mediaRecorder.stop).toHaveBeenCalledWith();
+ expect(controller.amplitudeRecorder.state.value).toBe(AmplitudeRecorderState.STOPPED);
+ expect(controller.recordingState.value).toBe(MediaRecordingState.STOPPED);
+ },
+ );
+ });
+
+ describe('handleDataavailable event handler', () => {
+ it('does nothing if event does not contain data', () => {
+ const controller = new MediaRecorderController();
+ controller.handleDataavailableEvent(
+ generateDataavailableEvent({
+ dataOverrides: { data: new Blob([], { type: 'audio/webm' }) },
+ }),
+ );
+ expect(controller.recording.value).toBeUndefined();
+ });
+
+ it('handles error', () => {
+ const errorMsg = 'Error making voice recording';
+ const controller = new MediaRecorderController();
+ const makeVoiceRecordingSpy = jest
+ .spyOn(controller, 'makeVoiceRecording')
+ .mockRejectedValue(new Error(errorMsg));
+ expectRegistersError({
+ action: () => controller.handleDataavailableEvent(generateDataavailableEvent()),
+ controller,
+ errorMsg,
+ notificationMsg: 'An error has occurred during the recording processing',
+ });
+ makeVoiceRecordingSpy.mockRestore();
+ });
+
+ it('does not emit recording if generation was unsuccessful', async () => {
+ const controller = new MediaRecorderController();
+ const makeVoiceRecordingSpy = jest
+ .spyOn(controller, 'makeVoiceRecording')
+ .mockResolvedValue(undefined);
+ await controller.handleDataavailableEvent(generateDataavailableEvent());
+ expect(controller.recording.value).toBeUndefined();
+ makeVoiceRecordingSpy.mockRestore();
+ });
+
+ it('emits recording if generation was successful', async () => {
+ const controller = new MediaRecorderController();
+ const voiceRecording = generateVoiceRecordingAttachment();
+ const makeVoiceRecordingSpy = jest
+ .spyOn(controller, 'makeVoiceRecording')
+ .mockResolvedValue(voiceRecording);
+ await controller.handleDataavailableEvent(generateDataavailableEvent());
+ expect(controller.recording.value).toStrictEqual(expect.objectContaining(voiceRecording));
+ makeVoiceRecordingSpy.mockRestore();
+ });
+ });
+
+ describe('makeVoiceRecording', () => {
+ it('does not generate recording if no data was recorded', async () => {
+ const controller = new MediaRecorderController();
+ const recording = await controller.makeVoiceRecording();
+ expect(recording).toBeUndefined();
+ });
+
+ it('revokes URI of the previous recording', async () => {
+ const recordingUri = 'recordingUri';
+ const controller = new MediaRecorderController();
+ controller.recordingUri = recordingUri;
+ await controller.makeVoiceRecording();
+ expect(window.URL.revokeObjectURL).toHaveBeenCalledWith(recordingUri);
+ });
+
+ it.each([
+ ['does not add', 'audio/mp4'],
+ ['adds', 'audio/webm'],
+ ['does not add', 'audio/ogg'],
+ ])('%s recording duration to %s recording', async (_, mimeType) => {
+ const controller = new MediaRecorderController({
+ config: { mediaRecorderConfig: { mimeType } },
+ });
+ controller.recordedData.push(new Blob([1], { type: mimeType }));
+ await controller.makeVoiceRecording();
+ if (mimeType === 'audio/webm') {
+ expect(fixWebmDuration).toHaveBeenCalledTimes(1);
+ } else {
+ expect(fixWebmDuration).not.toHaveBeenCalled();
+ }
+ });
+
+ it.each([
+ ['does not transcode', 'audio/mp4'],
+ ['transcodes', 'audio/webm'],
+ ['transcodes', 'audio/ogg'],
+ ])('%s recording of MIME type %s', async (_, mimeType) => {
+ const controller = new MediaRecorderController({
+ config: { mediaRecorderConfig: { mimeType } },
+ });
+ controller.recordedData.push(new Blob([1], { type: mimeType }));
+ await controller.makeVoiceRecording();
+ if (mimeType === 'audio/mp4') {
+ expect(transcodeSpy).not.toHaveBeenCalled();
+ } else {
+ expect(transcodeSpy).toHaveBeenCalledTimes(1);
+ }
+ });
+
+ it.each([
+ ['audio/mp4', 'audio/mp4'],
+ ['audio/mp3', 'audio/webm'],
+ ['audio/mp3', 'audio/ogg'],
+ ])(
+ 'generates recording of MIME type %s for original recording of MIME type %s',
+ async (targetMimeType, recordedMimeType) => {
+ const controller = new MediaRecorderController({
+ config: { mediaRecorderConfig: { mimeType: recordedMimeType } },
+ });
+
+ controller.recordedData = [
+ new Blob(new Uint8Array(dataPoints), { type: recordedMimeType }),
+ ];
+ controller.recordedChunkDurations = dataPoints.map((n) => n * 1000);
+ const recording = await controller.makeVoiceRecording();
+
+ expect(recording).toStrictEqual(
+ expect.objectContaining({
+ $internal: {
+ file: fileMock,
+ id: nanoidMockValue,
+ },
+ asset_url: fileObjectURL,
+ duration: dataPoints.reduce((acc, n) => acc + n),
+ file_size: recordedChunkCount,
+ mime_type: targetMimeType,
+ title: fileMock.name,
+ type: RecordingAttachmentType.VOICE_RECORDING,
+ waveform_data: dataPoints,
+ }),
+ );
+ },
+ );
+ });
+});
diff --git a/src/components/MediaRecorder/classes/index.ts b/src/components/MediaRecorder/classes/index.ts
new file mode 100644
index 000000000..3cd112128
--- /dev/null
+++ b/src/components/MediaRecorder/classes/index.ts
@@ -0,0 +1,3 @@
+export * from './BrowserPermission';
+export * from './MediaRecorderController';
+export type { AmplitudeRecorderConfig } from './AmplitudeRecorder';
diff --git a/src/components/MediaRecorder/hooks/__tests__/useMediaRecorder.test.js b/src/components/MediaRecorder/hooks/__tests__/useMediaRecorder.test.js
new file mode 100644
index 000000000..d33922e7e
--- /dev/null
+++ b/src/components/MediaRecorder/hooks/__tests__/useMediaRecorder.test.js
@@ -0,0 +1,164 @@
+import { TranslationProvider } from '../../../../context';
+import { renderHook } from '@testing-library/react-hooks';
+import React from 'react';
+import { useMediaRecorder } from '../useMediaRecorder';
+import { EventEmitterMock } from '../../../../mock-builders/browser';
+import { act } from '@testing-library/react';
+import { DEFAULT_AMPLITUDE_RECORDER_CONFIG } from '../../classes/AmplitudeRecorder';
+import { DEFAULT_AUDIO_TRANSCODER_CONFIG } from '../../classes';
+import { generateVoiceRecordingAttachment } from '../../../../mock-builders';
+
+const handleSubmit = jest.fn();
+const uploadAttachment = jest.fn();
+
+const defaultMockPermissionState = 'prompt';
+const status = new EventEmitterMock();
+status.state = defaultMockPermissionState;
+window.navigator.permissions = {
+ query: jest.fn().mockResolvedValue(status),
+};
+
+const translationContext = {
+ t: (s) => s,
+};
+
+const render = async (params = {}) => {
+ const wrapper = ({ children }) => (
+ {children}
+ );
+ let result;
+ await act(() => {
+ result = renderHook(() => useMediaRecorder({ enabled: true, ...params }), { wrapper });
+ });
+ return result;
+};
+
+describe('useMediaRecorder', () => {
+ afterEach(jest.clearAllMocks);
+
+ it('subscribes to MediaRecorderController state updates', async () => {
+ const {
+ result: {
+ current: { permissionState, recorder, recording, recordingState },
+ },
+ } = await render();
+ expect(recorder.permission.isWatching).toBe(true);
+ expect(permissionState).toBe(defaultMockPermissionState);
+ expect(recording).toBeUndefined();
+ expect(recordingState).toBeUndefined();
+ });
+
+ it('unsubscribes MediaRecorderController state updates on unmount', async () => {
+ const {
+ result: {
+ current: { recorder },
+ },
+ unmount,
+ } = await render();
+ unmount();
+ expect(recorder.permission.isWatching).toBe(false);
+ });
+
+ it('does not initiate MediaRecorderController instance when recording is disabled', async () => {
+ const {
+ result: {
+ current: { permissionState, recorder, recording, recordingState },
+ },
+ } = await render({ enabled: false });
+ expect(recorder).toBeUndefined();
+ expect(permissionState).toBeUndefined();
+ expect(recording).toBeUndefined();
+ expect(recordingState).toBeUndefined();
+ });
+
+ it('forwards recordingConfig to recorder instance', async () => {
+ const mediaRecorderConfig = { mimeType: 'audio/ogg' };
+ const {
+ result: {
+ current: { recorder },
+ },
+ } = await render({ recordingConfig: { mediaRecorderConfig } });
+ expect(recorder.mediaRecorderConfig).toStrictEqual(
+ expect.objectContaining(mediaRecorderConfig),
+ );
+ expect(recorder.amplitudeRecorderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_AMPLITUDE_RECORDER_CONFIG),
+ );
+ expect(recorder.transcoderConfig).toStrictEqual(
+ expect.objectContaining(DEFAULT_AUDIO_TRANSCODER_CONFIG),
+ );
+ });
+
+ it('forwards custom function to generate recording title to recorder instance', async () => {
+ const customTitle = 'custom title';
+ const generateRecordingTitle = () => customTitle;
+ const {
+ result: {
+ current: { recorder },
+ },
+ } = await render({ generateRecordingTitle });
+ expect(recorder.generateRecordingTitle()).toBe(customTitle);
+ });
+
+ describe('completeRecording', () => {
+ it('does nothing if recording is disabled', async () => {
+ const {
+ result: {
+ current: { completeRecording },
+ },
+ } = await render({ enabled: false, handleSubmit, uploadAttachment });
+ await completeRecording();
+ expect(uploadAttachment).not.toHaveBeenCalled();
+ expect(handleSubmit).not.toHaveBeenCalled();
+ });
+
+ it('does nothing if recording attachment is not generated on stop', async () => {
+ const {
+ result: {
+ current: { completeRecording, recorder },
+ },
+ } = await render({ handleSubmit, uploadAttachment });
+ const recorderStopSpy = jest.spyOn(recorder, 'stop').mockResolvedValue(undefined);
+ const recorderCleanUpSpy = jest.spyOn(recorder, 'cleanUp').mockResolvedValue(undefined);
+ await completeRecording();
+ expect(recorderStopSpy).toHaveBeenCalledWith();
+ expect(recorderCleanUpSpy).not.toHaveBeenCalledWith();
+ expect(uploadAttachment).not.toHaveBeenCalled();
+ expect(handleSubmit).not.toHaveBeenCalled();
+ });
+
+ it('uploads and submits the attachment', async () => {
+ const generatedVoiceRecording = generateVoiceRecordingAttachment();
+ const {
+ result: {
+ current: { completeRecording, recorder },
+ },
+ } = await render({ handleSubmit, uploadAttachment });
+ jest.spyOn(recorder, 'stop').mockResolvedValue(generatedVoiceRecording);
+ const recorderCleanUpSpy = jest.spyOn(recorder, 'cleanUp').mockResolvedValue(undefined);
+ await act(() => {
+ completeRecording();
+ });
+ expect(uploadAttachment).toHaveBeenCalledWith(generatedVoiceRecording);
+ expect(handleSubmit).toHaveBeenCalledWith();
+ expect(recorderCleanUpSpy).toHaveBeenCalledWith();
+ });
+
+ it('uploads but does not submit the attachment if multiple async messages enabled', async () => {
+ const generatedVoiceRecording = generateVoiceRecordingAttachment();
+ const {
+ result: {
+ current: { completeRecording, recorder },
+ },
+ } = await render({ asyncMessagesMultiSendEnabled: true, handleSubmit, uploadAttachment });
+ jest.spyOn(recorder, 'stop').mockResolvedValue(generatedVoiceRecording);
+ const recorderCleanUpSpy = jest.spyOn(recorder, 'cleanUp').mockResolvedValue(undefined);
+ await act(() => {
+ completeRecording();
+ });
+ expect(uploadAttachment).toHaveBeenCalledWith(generatedVoiceRecording);
+ expect(handleSubmit).not.toHaveBeenCalled();
+ expect(recorderCleanUpSpy).toHaveBeenCalledWith();
+ });
+ });
+});
diff --git a/src/components/MediaRecorder/hooks/index.ts b/src/components/MediaRecorder/hooks/index.ts
new file mode 100644
index 000000000..f2b882e3b
--- /dev/null
+++ b/src/components/MediaRecorder/hooks/index.ts
@@ -0,0 +1 @@
+export type { CustomAudioRecordingConfig, RecordingController } from './useMediaRecorder';
diff --git a/src/components/MediaRecorder/hooks/useMediaRecorder.ts b/src/components/MediaRecorder/hooks/useMediaRecorder.ts
new file mode 100644
index 000000000..e4e0ca931
--- /dev/null
+++ b/src/components/MediaRecorder/hooks/useMediaRecorder.ts
@@ -0,0 +1,99 @@
+import { useCallback, useEffect, useMemo, useState } from 'react';
+import { MessageInputContextValue, useTranslationContext } from '../../../context';
+import { AudioRecorderConfig, MediaRecorderController, MediaRecordingState } from '../classes';
+
+import type { LocalVoiceRecordingAttachment } from '../../MessageInput';
+import type { DefaultStreamChatGenerics } from '../../../types';
+
+export type CustomAudioRecordingConfig = Partial;
+
+export type RecordingController = {
+ completeRecording: () => void;
+ permissionState?: PermissionState;
+ recorder?: MediaRecorderController;
+ recording?: LocalVoiceRecordingAttachment;
+ recordingState?: MediaRecordingState;
+};
+
+type UseMediaRecorderParams<
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+> = Pick<
+ MessageInputContextValue,
+ 'asyncMessagesMultiSendEnabled' | 'handleSubmit' | 'uploadAttachment'
+> & {
+ enabled: boolean;
+ generateRecordingTitle?: (mimeType: string) => string;
+ recordingConfig?: CustomAudioRecordingConfig;
+};
+
+export const useMediaRecorder = <
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+>({
+ asyncMessagesMultiSendEnabled,
+ enabled,
+ generateRecordingTitle,
+ handleSubmit,
+ recordingConfig,
+ uploadAttachment,
+}: UseMediaRecorderParams): RecordingController => {
+ const { t } = useTranslationContext('useMediaRecorder');
+
+ const [recording, setRecording] = useState();
+ const [recordingState, setRecordingState] = useState();
+ const [permissionState, setPermissionState] = useState();
+ const [isScheduledForSubmit, scheduleForSubmit] = useState(false);
+
+ const recorder = useMemo(
+ () =>
+ enabled
+ ? new MediaRecorderController({
+ config: recordingConfig ?? {},
+ generateRecordingTitle,
+ t,
+ })
+ : undefined,
+ [recordingConfig, enabled, generateRecordingTitle, t],
+ );
+
+ const completeRecording = useCallback(async () => {
+ if (!recorder) return;
+ const recording = await recorder.stop();
+ if (!recording) return;
+ await uploadAttachment(recording);
+ if (!asyncMessagesMultiSendEnabled) {
+ // FIXME: cannot call handleSubmit() directly as the function has stale reference to attachments
+ scheduleForSubmit(true);
+ }
+ recorder.cleanUp();
+ }, [asyncMessagesMultiSendEnabled, recorder, uploadAttachment]);
+
+ useEffect(() => {
+ if (!isScheduledForSubmit) return;
+ handleSubmit();
+ scheduleForSubmit(false);
+ }, [handleSubmit, isScheduledForSubmit]);
+
+ useEffect(() => {
+ if (!recorder) return;
+ recorder.permission.watch();
+ const recordingSubscription = recorder.recording.subscribe(setRecording);
+ const recordingStateSubscription = recorder.recordingState.subscribe(setRecordingState);
+ const permissionStateSubscription = recorder.permission.state.subscribe(setPermissionState);
+
+ return () => {
+ recorder.cancel();
+ recorder.permission.unwatch();
+ recordingSubscription.unsubscribe();
+ recordingStateSubscription.unsubscribe();
+ permissionStateSubscription.unsubscribe();
+ };
+ }, [recorder]);
+
+ return {
+ completeRecording,
+ permissionState,
+ recorder,
+ recording,
+ recordingState,
+ };
+};
diff --git a/src/components/MediaRecorder/index.ts b/src/components/MediaRecorder/index.ts
new file mode 100644
index 000000000..9c40539fc
--- /dev/null
+++ b/src/components/MediaRecorder/index.ts
@@ -0,0 +1,5 @@
+export * from './RecordingPermissionDeniedNotification';
+export * from './AudioRecorder';
+export * from './hooks';
+export { MediaRecordingState } from './classes/MediaRecorderController';
+export { RecordingPermission } from './classes/BrowserPermission';
diff --git a/src/components/MediaRecorder/observable/BehaviorSubject.ts b/src/components/MediaRecorder/observable/BehaviorSubject.ts
new file mode 100644
index 000000000..024f0425d
--- /dev/null
+++ b/src/components/MediaRecorder/observable/BehaviorSubject.ts
@@ -0,0 +1,28 @@
+import { Subject } from './Subject';
+import { createObserver, ObserverOrNext } from './Observer';
+import { Subscription } from './Subscription';
+
+export class BehaviorSubject extends Subject {
+ constructor(private _value: T) {
+ super();
+ }
+
+ get value(): T {
+ const { _value, thrownError } = this;
+ if (thrownError) {
+ throw thrownError;
+ }
+ return _value;
+ }
+
+ subscribe(observerOrNext: ObserverOrNext): Subscription {
+ const observer = createObserver(observerOrNext);
+ const subscription = super.subscribe(observerOrNext);
+ if (!subscription.closed) observer.next(this._value);
+ return subscription;
+ }
+
+ next(value: T): void {
+ super.next((this._value = value));
+ }
+}
diff --git a/src/components/MediaRecorder/observable/Observable.ts b/src/components/MediaRecorder/observable/Observable.ts
new file mode 100644
index 000000000..577f51a04
--- /dev/null
+++ b/src/components/MediaRecorder/observable/Observable.ts
@@ -0,0 +1,35 @@
+import { createObserver, ObserverOrNext } from './Observer';
+import { Subscription } from './Subscription';
+
+export interface Unsubscribable {
+ unsubscribe(): void;
+}
+
+type Producer = (observer: ObserverOrNext) => Subscription;
+
+export interface Subscribable {
+ subscribe(observerOrNext: ObserverOrNext): Unsubscribable;
+}
+
+export class Observable implements Subscribable {
+ protected _closed = false;
+ private _producer: Producer | undefined;
+
+ constructor(producer?: Producer) {
+ if (producer) this._producer = producer;
+ }
+
+ get closed() {
+ return this._closed;
+ }
+
+ subscribe(observerOrNext: ObserverOrNext): Subscription {
+ const observer = createObserver(observerOrNext);
+ if (!this.closed) {
+ this._producer?.(observer);
+ }
+ return new Subscription(() => {
+ this._closed = true;
+ });
+ }
+}
diff --git a/src/components/MediaRecorder/observable/Observer.ts b/src/components/MediaRecorder/observable/Observer.ts
new file mode 100644
index 000000000..abcb1531f
--- /dev/null
+++ b/src/components/MediaRecorder/observable/Observer.ts
@@ -0,0 +1,11 @@
+type Next = (value: T) => void;
+export type Observer = {
+ next(value: T): void;
+ complete?(): void;
+ error?(error: Error): void;
+};
+export type ObserverOrNext = Next | Observer;
+
+export function createObserver(observerOrNext: ObserverOrNext): Observer {
+ return typeof observerOrNext === 'function' ? { next: observerOrNext } : observerOrNext;
+}
diff --git a/src/components/MediaRecorder/observable/Subject.ts b/src/components/MediaRecorder/observable/Subject.ts
new file mode 100644
index 000000000..213258b24
--- /dev/null
+++ b/src/components/MediaRecorder/observable/Subject.ts
@@ -0,0 +1,65 @@
+import { Observable } from './Observable';
+import { Subscription, SubscriptionLike } from './Subscription';
+import { createObserver, Observer, ObserverOrNext } from './Observer';
+
+export class Subject extends Observable implements SubscriptionLike {
+ private _observers: Map> = new Map();
+ private _observerCounter = 0;
+ thrownError: Error | undefined;
+
+ constructor() {
+ super();
+ }
+
+ get observers() {
+ return Array.from(this._observers.values());
+ }
+
+ next(value: T) {
+ if (this.closed) return;
+ const observers = this.observers;
+ for (let i = 0; i < observers.length; i++) {
+ observers[i].next(value);
+ }
+ }
+
+ error(err: Error) {
+ if (this.closed) return;
+ this.thrownError = err;
+ const { observers } = this;
+ for (let i = 0; i < observers.length; i++) {
+ observers[i].error?.(err);
+ }
+ this._observers.clear();
+ }
+
+ complete() {
+ if (this.closed) return;
+ this._closed = true;
+ const { observers } = this;
+ for (let i = 0; i < observers.length; i++) {
+ observers[i].complete?.();
+ }
+ this._observers.clear();
+ }
+
+ subscribe(observerOrNext: ObserverOrNext): Subscription {
+ const observer = createObserver(observerOrNext);
+ if (this.thrownError || this.closed) {
+ const subscription = new Subscription();
+ subscription.closed = true;
+ return subscription;
+ }
+
+ const observerId = this._observerCounter++;
+ this._observers.set(observerId, observer);
+ return new Subscription(() => {
+ this._observers.delete(observerId);
+ });
+ }
+
+ unsubscribe(): void {
+ this._closed = true;
+ this._observers.clear();
+ }
+}
diff --git a/src/components/MediaRecorder/observable/Subscription.ts b/src/components/MediaRecorder/observable/Subscription.ts
new file mode 100644
index 000000000..c92b86b42
--- /dev/null
+++ b/src/components/MediaRecorder/observable/Subscription.ts
@@ -0,0 +1,19 @@
+export interface SubscriptionLike {
+ closed: boolean;
+
+ unsubscribe(): void;
+}
+
+export class Subscription implements SubscriptionLike {
+ closed = false;
+ private _unsubscribe: (() => void) | undefined;
+
+ constructor(unsubscribe?: () => void) {
+ this._unsubscribe = unsubscribe;
+ }
+
+ unsubscribe() {
+ this.closed = true;
+ this._unsubscribe?.();
+ }
+}
diff --git a/src/components/MediaRecorder/observable/__tests__/BehaviorSubject.test.js b/src/components/MediaRecorder/observable/__tests__/BehaviorSubject.test.js
new file mode 100644
index 000000000..e20542c46
--- /dev/null
+++ b/src/components/MediaRecorder/observable/__tests__/BehaviorSubject.test.js
@@ -0,0 +1,126 @@
+import { BehaviorSubject } from '../BehaviorSubject';
+
+const emittedValues = ['emitted-1', 'emitted-2'];
+const errors = emittedValues.map((val) => new Error(val));
+const initialValue = 'init';
+
+describe('BehaviorSubject', () => {
+ it('allows access to current value', () => {
+ const subject = new BehaviorSubject(initialValue);
+ expect(subject.value).toBe(initialValue);
+ subject.next(emittedValues[0]);
+ expect(subject.value).toBe(emittedValues[0]);
+ });
+
+ it('emits current value on subscribe', () => {
+ const subject = new BehaviorSubject(initialValue);
+ const observers = Array.from({ length: 5 }, () => jest.fn());
+ observers.forEach((observer) => subject.subscribe(observer));
+ observers.forEach((observer) => expect(observer).toHaveBeenCalledWith(initialValue));
+ });
+
+ it('emits value to all observers', () => {
+ const subject = new BehaviorSubject();
+ const observers = Array.from({ length: 5 }, () => jest.fn());
+ observers.forEach((observer) => subject.subscribe(observer));
+ emittedValues.forEach((emitted) => {
+ subject.next(emitted);
+ observers.forEach((observer) => expect(observer).toHaveBeenCalledWith(emitted));
+ });
+ expect(subject.observers).toHaveLength(5);
+ expect(subject.thrownError).toBeUndefined();
+ });
+
+ it('emits error to all observers', () => {
+ const subject = new BehaviorSubject();
+ const observers = Array.from({ length: 5 }, () => ({
+ error: jest.fn(),
+ next: jest.fn(),
+ }));
+ observers.forEach((observer) => subject.subscribe(observer));
+
+ errors.forEach((emitted) => {
+ subject.error(emitted);
+ });
+
+ observers.forEach((observer) => {
+ expect(observer.error).toHaveBeenCalledWith(errors[0]);
+ expect(observer.error).toHaveBeenCalledTimes(1);
+ });
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBe(errors[1]);
+ });
+
+ it('completes all subscriptions', () => {
+ const subject = new BehaviorSubject();
+ const observers = Array.from({ length: 5 }, () => ({
+ complete: jest.fn(),
+ next: jest.fn(),
+ }));
+ observers.forEach((observer) => subject.subscribe(observer));
+ Array.from({ length: 2 }, () => subject.complete());
+ observers.forEach((observer) => {
+ expect(observer.complete).toHaveBeenCalledTimes(1);
+ });
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBeUndefined();
+ });
+
+ it('unsubscribes observers', () => {
+ const subject = new BehaviorSubject(initialValue);
+ const observers = Array.from({ length: 5 }, () => ({
+ complete: jest.fn(),
+ error: jest.fn(),
+ next: jest.fn(),
+ }));
+ const subscriptions = observers.map((observer) => subject.subscribe(observer));
+ subscriptions.slice(3).forEach((subscription) => subscription.unsubscribe());
+ expect(subject.observers).toHaveLength(3);
+
+ subject.next(emittedValues[0]);
+ subject.error(errors[1]);
+ subject.complete();
+
+ observers.slice(0, 3).forEach((observer) => {
+ expect(observer.next).toHaveBeenCalledTimes(2);
+ expect(observer.next.mock.calls[0][0]).toBe(initialValue);
+ expect(observer.next.mock.calls[1][0]).toBe(emittedValues[0]);
+ expect(observer.error).toHaveBeenCalledTimes(1);
+ expect(observer.error).toHaveBeenCalledWith(errors[1]);
+ expect(observer.complete).not.toHaveBeenCalled();
+ });
+ observers.slice(3).forEach((observer) => {
+ expect(observer.next).toHaveBeenCalledTimes(1);
+ expect(observer.next.mock.calls[0][0]).toBe(initialValue);
+ expect(observer.error).not.toHaveBeenCalled();
+ expect(observer.complete).not.toHaveBeenCalled();
+ });
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBe(errors[1]);
+ });
+
+ it('unsubscribes', () => {
+ const subject = new BehaviorSubject(initialValue);
+ const observers = Array.from({ length: 5 }, () => ({
+ complete: jest.fn(),
+ error: jest.fn(),
+ next: jest.fn(),
+ }));
+ observers.map((observer) => subject.subscribe(observer));
+ subject.unsubscribe();
+
+ subject.next(emittedValues[0]);
+ subject.error(errors[1]);
+ subject.complete();
+
+ observers.slice(3).forEach((observer) => {
+ expect(observer.next).toHaveBeenCalledTimes(1);
+ expect(observer.next).toHaveBeenCalledWith(initialValue);
+ expect(observer.error).not.toHaveBeenCalled();
+ expect(observer.complete).not.toHaveBeenCalled();
+ expect(observer.complete).not.toHaveBeenCalled();
+ });
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBeUndefined();
+ });
+});
diff --git a/src/components/MediaRecorder/observable/__tests__/Subject.test.js b/src/components/MediaRecorder/observable/__tests__/Subject.test.js
new file mode 100644
index 000000000..fdd1fd42b
--- /dev/null
+++ b/src/components/MediaRecorder/observable/__tests__/Subject.test.js
@@ -0,0 +1,106 @@
+import { Subject } from '../Subject';
+
+const emittedValues = ['emitted-1', 'emitted-2'];
+const errors = emittedValues.map((val) => new Error(val));
+
+describe('Subject', () => {
+ it('emits value to all observers', () => {
+ const subject = new Subject();
+ const observers = Array.from({ length: 5 }, () => jest.fn());
+ observers.forEach((observer) => subject.subscribe(observer));
+ emittedValues.forEach((emitted) => {
+ subject.next(emitted);
+ observers.forEach((observer) => expect(observer).toHaveBeenCalledWith(emitted));
+ });
+ expect(subject.observers).toHaveLength(5);
+ });
+
+ it('emits error to all observers', () => {
+ const subject = new Subject();
+ const observers = Array.from({ length: 5 }, () => ({
+ error: jest.fn(),
+ next: jest.fn(),
+ }));
+ observers.forEach((observer) => subject.subscribe(observer));
+
+ errors.forEach((emitted) => {
+ subject.error(emitted);
+ });
+
+ observers.forEach((observer) => {
+ expect(observer.error).toHaveBeenCalledWith(errors[0]);
+ expect(observer.error).toHaveBeenCalledTimes(1);
+ });
+
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBe(errors[1]);
+ });
+
+ it('completes all subscriptions', () => {
+ const subject = new Subject();
+ const observers = Array.from({ length: 5 }, () => ({
+ complete: jest.fn(),
+ next: jest.fn(),
+ }));
+ observers.forEach((observer) => subject.subscribe(observer));
+ Array.from({ length: 2 }, () => subject.complete());
+ observers.forEach((observer) => {
+ expect(observer.complete).toHaveBeenCalledTimes(1);
+ });
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBeUndefined();
+ });
+
+ it('unsubscribes observers', () => {
+ const subject = new Subject();
+ const observers = Array.from({ length: 5 }, () => ({
+ complete: jest.fn(),
+ error: jest.fn(),
+ next: jest.fn(),
+ }));
+ const subscriptions = observers.map((observer) => subject.subscribe(observer));
+ subscriptions.slice(3).forEach((subscription) => subscription.unsubscribe());
+
+ subject.next(emittedValues[0]);
+ subject.error(errors[1]);
+ subject.complete();
+
+ observers.slice(0, 3).forEach((observer) => {
+ expect(observer.next).toHaveBeenCalledTimes(1);
+ expect(observer.next).toHaveBeenCalledWith(emittedValues[0]);
+ expect(observer.error).toHaveBeenCalledTimes(1);
+ expect(observer.error).toHaveBeenCalledWith(errors[1]);
+ expect(observer.complete).not.toHaveBeenCalled();
+ });
+ observers.slice(3).forEach((observer) => {
+ expect(observer.next).not.toHaveBeenCalled();
+ expect(observer.error).not.toHaveBeenCalled();
+ expect(observer.complete).not.toHaveBeenCalled();
+ });
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBe(errors[1]);
+ });
+
+ it('unsubscribes', () => {
+ const subject = new Subject();
+ const observers = Array.from({ length: 5 }, () => ({
+ complete: jest.fn(),
+ error: jest.fn(),
+ next: jest.fn(),
+ }));
+ observers.map((observer) => subject.subscribe(observer));
+ subject.unsubscribe();
+
+ subject.next(emittedValues[0]);
+ subject.error(errors[1]);
+ subject.complete();
+
+ observers.slice(3).forEach((observer) => {
+ expect(observer.next).not.toHaveBeenCalled();
+ expect(observer.error).not.toHaveBeenCalled();
+ expect(observer.complete).not.toHaveBeenCalled();
+ });
+ expect(subject.observers).toHaveLength(0);
+ expect(subject.thrownError).toBeUndefined();
+ });
+});
diff --git a/src/components/MediaRecorder/observable/index.ts b/src/components/MediaRecorder/observable/index.ts
new file mode 100644
index 000000000..f0057e203
--- /dev/null
+++ b/src/components/MediaRecorder/observable/index.ts
@@ -0,0 +1,5 @@
+export * from './BehaviorSubject';
+export * from './Observable';
+export * from './Observer';
+export * from './Subject';
+export * from './Subscription';
diff --git a/src/components/MediaRecorder/transcode/audioProcessing.ts b/src/components/MediaRecorder/transcode/audioProcessing.ts
new file mode 100644
index 000000000..289692429
--- /dev/null
+++ b/src/components/MediaRecorder/transcode/audioProcessing.ts
@@ -0,0 +1,37 @@
+import { readFileAsArrayBuffer } from '../../ReactFileUtilities';
+
+/**
+ * In the context of resampling audio data, AudioContext is used to decode the input audio file into an AudioBuffer,
+ * which is a fundamental data structure representing audio data.
+ * @param file
+ */
+export const toAudioBuffer = async (file: File) => {
+ const audioCtx = new AudioContext();
+
+ const arrayBuffer = await readFileAsArrayBuffer(file);
+ const decodedData = await audioCtx.decodeAudioData(arrayBuffer);
+ if (audioCtx.state !== 'closed') await audioCtx.close();
+ return decodedData;
+};
+
+/**
+ * OfflineAudioContext is a specialized type of AudioContext that does not render audio in real-time and is used for offline audio processing tasks.
+ * It allows performing audio processing and rendering without actually playing the audio through speakers or outputting it to a destination.
+ * In the context of resampling audio data, OfflineAudioContext is used to resample the decoded AudioBuffer from a file to the desired sample rate.
+ * It provides more flexibility and control over audio processing, as it can operate at different sample rates and durations compared to real-time audio contexts.
+ * @param audioBuffer
+ * @param sampleRate
+ */
+export const renderAudio = async (audioBuffer: AudioBuffer, sampleRate: number) => {
+ const offlineAudioCtx = new OfflineAudioContext(
+ audioBuffer.numberOfChannels,
+ audioBuffer.duration * sampleRate,
+ sampleRate,
+ );
+ const source = offlineAudioCtx.createBufferSource();
+ source.buffer = audioBuffer;
+ source.connect(offlineAudioCtx.destination);
+ source.start();
+
+ return await offlineAudioCtx.startRendering();
+};
diff --git a/src/components/MediaRecorder/transcode/index.ts b/src/components/MediaRecorder/transcode/index.ts
new file mode 100644
index 000000000..af28daff1
--- /dev/null
+++ b/src/components/MediaRecorder/transcode/index.ts
@@ -0,0 +1,25 @@
+import { encodeToWaw } from './wav';
+import { encodeToMp3 } from './mp3';
+import { createFileFromBlobs, getExtensionFromMimeType } from '../../ReactFileUtilities';
+
+type TranscodeParams = {
+ blob: Blob;
+ sampleRate: number;
+ targetMimeType: string;
+};
+export const transcode = ({ blob, sampleRate, targetMimeType }: TranscodeParams): Promise => {
+ const file = createFileFromBlobs({
+ blobsArray: [blob],
+ fileName: `audio_recording_${new Date().toISOString()}.${getExtensionFromMimeType(blob.type)}`,
+ mimeType: blob.type,
+ });
+
+ if (targetMimeType.match('audio/wav')) {
+ return encodeToWaw(file, sampleRate);
+ }
+
+ if (targetMimeType.match('audio/mp3')) {
+ return encodeToMp3(file, sampleRate);
+ }
+ return Promise.resolve(blob);
+};
diff --git a/src/components/MediaRecorder/transcode/mp3.ts b/src/components/MediaRecorder/transcode/mp3.ts
new file mode 100644
index 000000000..b0066862f
--- /dev/null
+++ b/src/components/MediaRecorder/transcode/mp3.ts
@@ -0,0 +1,47 @@
+import { Mp3Encoder } from '@breezystack/lamejs';
+import { renderAudio, toAudioBuffer } from './audioProcessing';
+
+const ENCODING_BIT_RATE = 128; // kbps;
+const COUNT_SAMPLES_PER_ENCODED_BLOCK = 1152;
+
+const float32ArrayToInt16Array = (float32Arr: Float32Array) => {
+ const int16Arr = new Int16Array(float32Arr.length);
+ for (let i = 0; i < float32Arr.length; i++) {
+ const float32Value = float32Arr[i];
+ // Clamp the float value between -1 and 1
+ const clampedValue = Math.max(-1, Math.min(1, float32Value));
+ // Convert the float value to a signed 16-bit integer
+ int16Arr[i] = Math.round(clampedValue * 32767);
+ }
+ return int16Arr;
+};
+
+const splitDataByChannel = (audioBuffer: AudioBuffer) =>
+ Array.from({ length: audioBuffer.numberOfChannels }, (_, i) => audioBuffer.getChannelData(i)).map(
+ float32ArrayToInt16Array,
+ );
+
+export async function encodeToMp3(file: File, sampleRate: number) {
+ const audioBuffer = await renderAudio(await toAudioBuffer(file), sampleRate);
+ const channelCount = audioBuffer.numberOfChannels;
+ const dataByChannel = splitDataByChannel(audioBuffer);
+ const mp3Encoder = new Mp3Encoder(channelCount, sampleRate, ENCODING_BIT_RATE);
+
+ const dataBuffer: Int8Array[] = [];
+ let remaining = dataByChannel[0].length;
+ for (
+ let i = 0;
+ remaining >= COUNT_SAMPLES_PER_ENCODED_BLOCK;
+ i += COUNT_SAMPLES_PER_ENCODED_BLOCK
+ ) {
+ const [leftChannelBlock, rightChannelBlock] = dataByChannel.map((channel) =>
+ channel.subarray(i, i + COUNT_SAMPLES_PER_ENCODED_BLOCK),
+ );
+ dataBuffer.push(new Int8Array(mp3Encoder.encodeBuffer(leftChannelBlock, rightChannelBlock)));
+ remaining -= COUNT_SAMPLES_PER_ENCODED_BLOCK;
+ }
+
+ const lastBlock = mp3Encoder.flush();
+ if (lastBlock.length) dataBuffer.push(new Int8Array(lastBlock));
+ return new Blob(dataBuffer, { type: 'audio/mp3;sbu_type=voice' });
+}
diff --git a/src/components/MediaRecorder/transcode/wav.ts b/src/components/MediaRecorder/transcode/wav.ts
new file mode 100644
index 000000000..cf305ff20
--- /dev/null
+++ b/src/components/MediaRecorder/transcode/wav.ts
@@ -0,0 +1,139 @@
+import { renderAudio, toAudioBuffer } from './audioProcessing';
+
+const WAV_HEADER_LENGTH_BYTES = 44 as const;
+const BYTES_PER_SAMPLE = 2 as const;
+const RIFF_FILE_MAX_BYTES = 4294967295 as const;
+
+const HEADER = {
+ AUDIO_FORMAT: { offset: 20, value: 1 }, // PCM = 1
+ BITS_PER_SAMPLE: { offset: 34, value: BYTES_PER_SAMPLE * 8 }, // 16 bits encoding
+ BLOCK_ALIGN: { offset: 32 },
+ BYTE_RATE: { offset: 28 },
+ CHANNEL_COUNT: { offset: 22 }, // 1 - mono, 2 - stereo
+ CHUNK_ID: { offset: 0, value: 0x52494646 }, // hex representation of string "RIFF" (Resource Interchange File Format) - identifies the file structure that defines a class of more specific file formats, e.g. WAVE
+ CHUNK_SIZE: { offset: 4 },
+ FILE_FORMAT: { offset: 8, value: 0x57415645 }, // hex representation of string "WAVE"
+ SAMPLE_RATE: { offset: 24 },
+ SUBCHUNK1_ID: { offset: 12, value: 0x666d7420 }, // hex representation of string "fmt " - identifies the start of "format" section of the header
+ SUBCHUNK1_SIZE: { offset: 16, value: 16 }, // Subchunk1 Size without SUBCHUNK1_ID and SUBCHUNK1_SIZE fields
+ SUBCHUNK2_ID: { offset: 36, value: 0x64617461 }, // hex representation of string "data" - identifies the start of actual audio data section
+ SUBCHUNK2_SIZE: { offset: 40 }, // actual audio data size
+} as const;
+
+const fourCharsToInt = (chars: string) =>
+ (chars.charCodeAt(0) << 24) |
+ (chars.charCodeAt(1) << 16) |
+ (chars.charCodeAt(2) << 8) |
+ chars.charCodeAt(3);
+
+const WAV_HEADER_FLAGS = {
+ data: fourCharsToInt('data'),
+ fmt: fourCharsToInt('fmt '),
+ RIFF: fourCharsToInt('RIFF'),
+ WAVE: fourCharsToInt('WAVE'),
+};
+
+type WriteWaveHeaderParams = {
+ arrayBuffer: ArrayBuffer;
+ // 1 - mono, 2 - stereo
+ channelCount: number;
+ // Number of samples per second, e.g. 44100Hz
+ sampleRate: number;
+};
+const writeWavHeader = ({ arrayBuffer, channelCount, sampleRate }: WriteWaveHeaderParams) => {
+ const byteRate = sampleRate * channelCount * BYTES_PER_SAMPLE; // bytes/sec
+ const blockAlign = channelCount * BYTES_PER_SAMPLE;
+
+ const dataView = new DataView(arrayBuffer);
+ /*
+ * The maximum size of a RIFF file is 4294967295 bytes and since the header takes up 44 bytes there are 4294967251 bytes left for the
+ * data chunk.
+ */
+ const dataChunkSize = Math.min(
+ dataView.byteLength - WAV_HEADER_LENGTH_BYTES,
+ RIFF_FILE_MAX_BYTES - WAV_HEADER_LENGTH_BYTES,
+ );
+
+ dataView.setUint32(HEADER.CHUNK_ID.offset, HEADER.CHUNK_ID.value); // "RIFF"
+ dataView.setUint32(HEADER.CHUNK_SIZE.offset, arrayBuffer.byteLength - 8, true); // adjustment for the first two headers - chunk id + file size
+ dataView.setUint32(HEADER.FILE_FORMAT.offset, HEADER.FILE_FORMAT.value); // "WAVE"
+
+ dataView.setUint32(HEADER.SUBCHUNK1_ID.offset, HEADER.SUBCHUNK1_ID.value); // "fmt "
+ dataView.setUint32(HEADER.SUBCHUNK1_SIZE.offset, HEADER.SUBCHUNK1_SIZE.value, true);
+ dataView.setUint16(HEADER.AUDIO_FORMAT.offset, HEADER.AUDIO_FORMAT.value, true);
+ dataView.setUint16(HEADER.CHANNEL_COUNT.offset, channelCount, true);
+ dataView.setUint32(HEADER.SAMPLE_RATE.offset, sampleRate, true);
+ dataView.setUint32(HEADER.BYTE_RATE.offset, byteRate, true);
+ dataView.setUint16(HEADER.BLOCK_ALIGN.offset, blockAlign, true);
+ dataView.setUint16(HEADER.BITS_PER_SAMPLE.offset, HEADER.BITS_PER_SAMPLE.value, true);
+
+ dataView.setUint32(HEADER.SUBCHUNK2_ID.offset, HEADER.SUBCHUNK2_ID.value); // "data"
+ dataView.setUint32(HEADER.SUBCHUNK2_SIZE.offset, dataChunkSize, true);
+};
+
+export const readWavHeader = (dataView: DataView) => {
+ const header = dataView.getUint32(0, false);
+ if (WAV_HEADER_FLAGS.RIFF !== header) {
+ console.error('Missing RIFF header in WAVE file');
+ return;
+ }
+ if (WAV_HEADER_FLAGS.WAVE !== dataView.getUint32(HEADER.FILE_FORMAT.offset, false)) {
+ console.error('Missing WAVE header in WAVE file');
+ return;
+ }
+ if (WAV_HEADER_FLAGS.fmt !== dataView.getUint32(HEADER.SUBCHUNK1_ID.offset, false)) {
+ console.error('Missing fmt header in WAVE file');
+ return;
+ }
+
+ return {
+ audioDataSizeBytes: dataView.getUint32(HEADER.SUBCHUNK2_SIZE.offset, true),
+ audioDataStartOffset: WAV_HEADER_LENGTH_BYTES,
+ channelCount: dataView.getUint16(HEADER.CHANNEL_COUNT.offset, true),
+ sampleRate: dataView.getUint32(HEADER.SAMPLE_RATE.offset, true),
+ };
+};
+
+const splitDataByChannel = (audioBuffer: AudioBuffer) =>
+ Array.from({ length: audioBuffer.numberOfChannels }, (_, i) => audioBuffer.getChannelData(i));
+
+type WriteAudioDataParams = {
+ arrayBuffer: ArrayBuffer;
+ dataByChannel: Float32Array[];
+};
+
+/**
+ * In a WAV file, samples for each channel are usually interleaved, meaning samples from each channel are grouped together sequentially.
+ * For example, in a stereo audio file (2 channels), samples alternate between the left and right channels.
+ * @param arrayBuffer
+ * @param dataByChannel
+ */
+const writeWavAudioData = ({ arrayBuffer, dataByChannel }: WriteAudioDataParams) => {
+ const dataView = new DataView(arrayBuffer);
+ const channelCount = dataByChannel.length;
+
+ dataByChannel.forEach((channelData, channelIndex) => {
+ let writeOffset = WAV_HEADER_LENGTH_BYTES + channelCount * channelIndex;
+
+ channelData.forEach((float32Value) => {
+ dataView.setInt16(
+ writeOffset,
+ float32Value < 0 ? Math.max(-1, float32Value) * 32768 : Math.min(1, float32Value) * 32767,
+ true,
+ );
+ writeOffset += channelCount * BYTES_PER_SAMPLE;
+ });
+ });
+};
+
+export const encodeToWaw = async (file: File, sampleRate: number) => {
+ const audioBuffer = await renderAudio(await toAudioBuffer(file), sampleRate);
+ const numberOfSamples = audioBuffer.duration * sampleRate;
+ const fileSizeBytes =
+ numberOfSamples * audioBuffer.numberOfChannels * BYTES_PER_SAMPLE + WAV_HEADER_LENGTH_BYTES;
+
+ const arrayBuffer = new ArrayBuffer(fileSizeBytes);
+ writeWavHeader({ arrayBuffer, channelCount: audioBuffer.numberOfChannels, sampleRate });
+ writeWavAudioData({ arrayBuffer, dataByChannel: splitDataByChannel(audioBuffer) });
+ return new Blob([arrayBuffer], { type: 'audio/wav' });
+};
diff --git a/src/components/MessageInput/AttachmentPreviewList.tsx b/src/components/MessageInput/AttachmentPreviewList.tsx
index 9f71c5278..71eea1a4c 100644
--- a/src/components/MessageInput/AttachmentPreviewList.tsx
+++ b/src/components/MessageInput/AttachmentPreviewList.tsx
@@ -1,15 +1,32 @@
import clsx from 'clsx';
import React, { useCallback, useState } from 'react';
+import { CloseIcon, DownloadIcon, LoadingIndicatorIcon, RetryIcon } from './icons';
+import {
+ isAudioAttachment,
+ isMediaAttachment,
+ isVoiceRecordingAttachment,
+ PlayButton,
+} from '../Attachment';
import { BaseImage as DefaultBaseImage } from '../Gallery';
+import { useAudioController } from '../Attachment/hooks/useAudioController';
+import { RecordingTimer } from '../MediaRecorder';
import { FileIcon } from '../ReactFileUtilities';
import { useComponentContext, useMessageInputContext } from '../../context';
-import { useFileState } from './hooks/useFileState';
-import { CloseIcon, DownloadIcon, LoadingIndicatorIcon, RetryIcon } from './icons';
+import type { LocalAttachment } from './types';
+import type { DefaultStreamChatGenerics } from '../../types';
-export const AttachmentPreviewList = () => {
- const { fileOrder, imageOrder } = useMessageInputContext('AttachmentPreviewList');
+export const AttachmentPreviewList = <
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+>() => {
+ const {
+ attachments,
+ fileOrder,
+ imageOrder,
+ removeAttachment,
+ uploadAttachment,
+ } = useMessageInputContext('AttachmentPreviewList');
return (
@@ -17,6 +34,31 @@ export const AttachmentPreviewList = () => {
className='str-chat__attachment-list-scroll-container'
data-testid='attachment-list-scroll-container'
>
+ {attachments.map((attachment) => {
+ if (isVoiceRecordingAttachment(attachment)) {
+ return (
+
+ );
+ } else if (isAudioAttachment(attachment) || isMediaAttachment(attachment)) {
+ // unnecessary to pass handleRetry as video and audio if among attachments is already uploaded
+ // - user looking at the edit message input
+ return (
+
+ );
+ }
+ return null;
+ })}
{imageOrder.map((id) => (
))}
@@ -28,6 +70,111 @@ export const AttachmentPreviewList = () => {
);
};
+type AttachmentPreviewProps
= {
+ attachment: A;
+ removeAttachment: (id: string) => void;
+ handleRetry?: (attachment: A) => void | Promise ;
+ mimeType?: string;
+};
+
+const VoiceRecordingPreview = ({
+ attachment,
+ handleRetry,
+ mimeType,
+ removeAttachment,
+}: AttachmentPreviewProps) => {
+ const { audioRef, isPlaying, secondsElapsed, togglePlay } = useAudioController({ mimeType });
+
+ return (
+
+
+
+
+
+
+
attachment.$internal?.id && removeAttachment(attachment.$internal.id)}
+ >
+
+
+
+ {attachment.$internal?.uploadState === 'failed' && !!handleRetry && (
+
handleRetry(attachment)}
+ >
+
+
+ )}
+
+
+
+ {attachment.title}
+
+ {typeof attachment.duration !== 'undefined' && (
+
+ )}
+ {attachment.$internal?.uploadState === 'uploading' &&
}
+
+
+
+
+
+ );
+};
+
+const FilePreview = ({ attachment, handleRetry, removeAttachment }: AttachmentPreviewProps) => (
+
+
+
+
+
+
attachment.$internal?.id && removeAttachment(attachment.$internal?.id)}
+ >
+
+
+
+ {attachment.$internal?.uploadState === 'failed' && !!handleRetry && (
+
handleRetry(attachment)}
+ >
+
+
+ )}
+
+
+
+ {attachment.title}
+
+ {attachment.$internal?.uploadState === 'finished' && (
+
+
+
+ )}
+ {attachment.$internal?.uploadState === 'uploading' &&
}
+
+
+);
+
type PreviewItemProps = { id: string };
export const ImagePreviewItem = ({ id }: PreviewItemProps) => {
@@ -98,60 +245,37 @@ export const ImagePreviewItem = ({ id }: PreviewItemProps) => {
const FilePreviewItem = ({ id }: PreviewItemProps) => {
const { fileUploads, removeFile, uploadFile } = useMessageInputContext('FilePreviewItem');
- const handleRemove: React.MouseEventHandler = useCallback(
- (e) => {
- e.stopPropagation();
+ const handleRemove = useCallback(
+ (id: string) => {
removeFile(id);
},
- [removeFile, id],
+ [removeFile],
+ );
+ const handleRetry = useCallback(
+ (attachment: LocalAttachment) => attachment.$internal && uploadFile(attachment.$internal.id),
+ [uploadFile],
);
- const handleRetry = useCallback(() => uploadFile(id), [uploadFile, id]);
const file = fileUploads[id];
- const state = useFileState(file);
if (!file) return null;
- return (
-
-
-
-
-
-
-
-
-
- {state.failed && (
-
-
-
- )}
+ const attachment: LocalAttachment = {
+ $internal: {
+ file: file.file as File,
+ id,
+ uploadState: file.state,
+ },
+ asset_url: file.url,
+ mime_type: file.file.type,
+ title: file.file.name,
+ };
-
-
{file.file.name}
- {state.finished && (
-
-
-
- )}
- {state.uploading &&
}
-
-
+ return (
+
);
};
diff --git a/src/components/MessageInput/CooldownTimer.tsx b/src/components/MessageInput/CooldownTimer.tsx
index 2fe1127ae..216324523 100644
--- a/src/components/MessageInput/CooldownTimer.tsx
+++ b/src/components/MessageInput/CooldownTimer.tsx
@@ -1,31 +1,16 @@
-import React, { useEffect, useState } from 'react';
+import React from 'react';
+import { useTimer } from './hooks/useTimer';
export type CooldownTimerProps = {
cooldownInterval: number;
setCooldownRemaining: React.Dispatch>;
};
export const CooldownTimer = ({ cooldownInterval }: CooldownTimerProps) => {
- const [seconds, setSeconds] = useState();
-
- useEffect(() => {
- let countdownTimeout: ReturnType;
- if (typeof seconds === 'number' && seconds > 0) {
- countdownTimeout = setTimeout(() => {
- setSeconds(seconds - 1);
- }, 1000);
- }
- return () => {
- clearTimeout(countdownTimeout);
- };
- }, [seconds]);
-
- useEffect(() => {
- setSeconds(cooldownInterval ?? 0);
- }, [cooldownInterval]);
+ const secondsLeft = useTimer({ startFrom: cooldownInterval });
return (
- {seconds}
+ {secondsLeft}
);
};
diff --git a/src/components/MessageInput/MessageInput.tsx b/src/components/MessageInput/MessageInput.tsx
index de5ff14ff..5326fb8c8 100644
--- a/src/components/MessageInput/MessageInput.tsx
+++ b/src/components/MessageInput/MessageInput.tsx
@@ -21,6 +21,7 @@ import type {
} from '../../types/types';
import type { URLEnrichmentConfig } from './hooks/useLinkPreviews';
import type { FileUpload, ImageUpload } from './types';
+import type { CustomAudioRecordingConfig } from '../MediaRecorder';
export type EmojiSearchIndexResult = {
id: string;
@@ -42,6 +43,15 @@ export type MessageInputProps<
> = {
/** Additional props to be passed to the underlying `AutoCompleteTextarea` component, [available props](https://www.npmjs.com/package/react-textarea-autosize) */
additionalTextareaProps?: React.TextareaHTMLAttributes;
+ /**
+ * When enabled, recorded messages won’t be sent immediately.
+ * Instead, they will “stack up” with other attachments in the message composer allowing the user to send multiple attachments as part of the same message.
+ */
+ asyncMessagesMultiSendEnabled?: boolean;
+ /** Allows to configure the audio recording parameters for voice messages. */
+ audioRecordingConfig?: CustomAudioRecordingConfig;
+ /** Controls whether the users will be provided with the UI to record voice messages. */
+ audioRecordingEnabled?: boolean;
/** Function to clear the editing state while editing a message */
clearEditingState?: () => void;
/** If true, disables the text input */
diff --git a/src/components/MessageInput/MessageInputFlat.tsx b/src/components/MessageInput/MessageInputFlat.tsx
index b1ffb7146..b6272339b 100644
--- a/src/components/MessageInput/MessageInputFlat.tsx
+++ b/src/components/MessageInput/MessageInputFlat.tsx
@@ -1,4 +1,4 @@
-import React, { useEffect, useMemo } from 'react';
+import React, { useCallback, useEffect, useMemo, useState } from 'react';
import { FileUploadButton, ImageDropzone, UploadButton } from '../ReactFileUtilities';
import type { Event } from 'stream-chat';
import clsx from 'clsx';
@@ -7,9 +7,16 @@ import { nanoid } from 'nanoid';
import {
FileUploadIconFlat as DefaultFileUploadIcon,
- SendButton as DefaultSendButton,
UploadIcon as DefaultUploadIcon,
} from './icons';
+import { CooldownTimer as DefaultCooldownTimer } from './CooldownTimer';
+import { SendButton as DefaultSendButton } from './SendButton';
+import {
+ AudioRecorder as DefaultAudioRecorder,
+ RecordingPermissionDeniedNotification as DefaultRecordingPermissionDeniedNotification,
+ StartRecordingAudioButton as DefaultStartRecordingAudioButton,
+ RecordingPermission,
+} from '../MediaRecorder';
import {
QuotedMessagePreview as DefaultQuotedMessagePreview,
QuotedMessagePreviewHeader,
@@ -20,6 +27,7 @@ import { UploadsPreview } from './UploadsPreview';
import { ChatAutoComplete } from '../ChatAutoComplete/ChatAutoComplete';
import { Tooltip } from '../Tooltip/Tooltip';
+import { RecordingAttachmentType } from '../MediaRecorder/classes';
import { useChatContext } from '../../context/ChatContext';
import { useChannelActionContext } from '../../context/ChannelActionContext';
@@ -29,7 +37,6 @@ import { useMessageInputContext } from '../../context/MessageInputContext';
import { useComponentContext } from '../../context/ComponentContext';
import type { DefaultStreamChatGenerics } from '../../types/types';
-import { CooldownTimer as DefaultCooldownTimer } from './CooldownTimer';
export const MessageInputFlat = <
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
@@ -163,6 +170,8 @@ const MessageInputV2 = <
const { t } = useTranslationContext('MessageInputV2');
const {
+ asyncMessagesMultiSendEnabled,
+ attachments,
cooldownRemaining,
findAndEnqueueURLsToEnrich,
handleSubmit,
@@ -172,21 +181,33 @@ const MessageInputV2 = <
maxFilesLeft,
message,
numberOfUploads,
+ recordingController,
setCooldownRemaining,
text,
uploadNewFiles,
} = useMessageInputContext('MessageInputV2');
const {
+ AudioRecorder = DefaultAudioRecorder,
AttachmentPreviewList = DefaultAttachmentPreviewList,
CooldownTimer = DefaultCooldownTimer,
FileUploadIcon = DefaultUploadIcon,
LinkPreviewList = DefaultLinkPreviewList,
QuotedMessagePreview = DefaultQuotedMessagePreview,
+ RecordingPermissionDeniedNotification = DefaultRecordingPermissionDeniedNotification,
SendButton = DefaultSendButton,
+ StartRecordingAudioButton = DefaultStartRecordingAudioButton,
EmojiPicker,
} = useComponentContext('MessageInputV2');
+ const [
+ showRecordingPermissionDeniedNotification,
+ setShowRecordingPermissionDeniedNotification,
+ ] = useState(false);
+ const closePermissionDeniedNotification = useCallback(() => {
+ setShowRecordingPermissionDeniedNotification(false);
+ }, []);
+
const id = useMemo(() => nanoid(), []);
const accept = useMemo(
@@ -206,13 +227,25 @@ const MessageInputV2 = <
onDrop: uploadNewFiles,
});
+ if (recordingController.recordingState) return ;
+
// TODO: "!message" condition is a temporary fix for shared
// state when editing a message (fix shared state issue)
const displayQuotedMessage = !message && quotedMessage && !quotedMessage.parent_id;
+ const recordingEnabled = !!(recordingController.recorder && navigator.mediaDevices); // account for requirement on iOS as per this bug report: https://bugs.webkit.org/show_bug.cgi?id=252303
+ const isRecording = !!recordingController.recordingState;
return (
<>
+ {recordingEnabled &&
+ recordingController.permissionState === 'denied' &&
+ showRecordingPermissionDeniedNotification && (
+
+ )}
{findAndEnqueueURLsToEnrich && (
)}
@@ -226,7 +259,6 @@ const MessageInputV2 = <
{isDragReject &&
{t('Some of the files will not be accepted')}
}
)}
-
{displayQuotedMessage && }
@@ -247,7 +279,9 @@ const MessageInputV2 = <
{displayQuotedMessage &&
}
- {isUploadEnabled && !!numberOfUploads &&
}
+ {isUploadEnabled && !!(numberOfUploads || attachments.length) && (
+
+ )}
@@ -263,10 +297,27 @@ const MessageInputV2 = <
setCooldownRemaining={setCooldownRemaining}
/>
) : (
-
+ <>
+
+ {recordingEnabled && (
+
a.type === RecordingAttachmentType.VOICE_RECORDING,
+ ))
+ }
+ onClick={() => {
+ recordingController.recorder?.start();
+ setShowRecordingPermissionDeniedNotification(true);
+ }}
+ />
+ )}
+ >
)}
>
)}
diff --git a/src/components/MessageInput/MessageInputSmall.tsx b/src/components/MessageInput/MessageInputSmall.tsx
index 5269a3e74..e5275e515 100644
--- a/src/components/MessageInput/MessageInputSmall.tsx
+++ b/src/components/MessageInput/MessageInputSmall.tsx
@@ -2,13 +2,11 @@ import React, { useEffect } from 'react';
import { FileUploadButton, ImageDropzone } from '../ReactFileUtilities';
import type { Event } from 'stream-chat';
-import {
- FileUploadIconFlat as DefaultFileUploadIcon,
- SendButton as DefaultSendButton,
- EmojiIconSmall,
-} from './icons';
+import { FileUploadIconFlat as DefaultFileUploadIcon, EmojiIconSmall } from './icons';
import { UploadsPreview } from './UploadsPreview';
+import { CooldownTimer as DefaultCooldownTimer } from './CooldownTimer';
+import { SendButton as DefaultSendButton } from './SendButton';
import { ChatAutoComplete } from '../ChatAutoComplete/ChatAutoComplete';
import { Tooltip } from '../Tooltip/Tooltip';
@@ -22,7 +20,6 @@ import { useComponentContext } from '../../context/ComponentContext';
import { QuotedMessagePreview as DefaultQuotedMessagePreview } from './QuotedMessagePreview';
import type { CustomTrigger, DefaultStreamChatGenerics } from '../../types/types';
-import { CooldownTimer as DefaultCooldownTimer } from './CooldownTimer';
/**
* @deprecated This component has beend deprecated in favor of [`MessageInputFlat`](./MessageInputFlat.tsx) from which
diff --git a/src/components/MessageInput/SendButton.tsx b/src/components/MessageInput/SendButton.tsx
new file mode 100644
index 000000000..ab965080b
--- /dev/null
+++ b/src/components/MessageInput/SendButton.tsx
@@ -0,0 +1,35 @@
+import React from 'react';
+import { Message } from 'stream-chat';
+import { useChatContext } from '../../context';
+import { SendIconV1, SendIconV2 } from './icons';
+import type { DefaultStreamChatGenerics } from '../../types/types';
+
+export type SendButtonProps<
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+> = {
+ sendMessage: (
+ event: React.BaseSyntheticEvent,
+ customMessageData?: Partial>,
+ ) => void;
+} & React.ComponentProps<'button'>;
+export const SendButton = <
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+>({
+ sendMessage,
+ ...rest
+}: SendButtonProps) => {
+ const { themeVersion } = useChatContext('SendButton');
+
+ return (
+
+ {themeVersion === '2' ? : }
+
+ );
+};
diff --git a/src/components/MessageInput/__tests__/AttachmentPreviewList.test.js b/src/components/MessageInput/__tests__/AttachmentPreviewList.test.js
index 4bfde15fb..f16ad43f0 100644
--- a/src/components/MessageInput/__tests__/AttachmentPreviewList.test.js
+++ b/src/components/MessageInput/__tests__/AttachmentPreviewList.test.js
@@ -3,16 +3,25 @@
import React, { useEffect } from 'react';
import { act, fireEvent, render, screen } from '@testing-library/react';
-import renderer from 'react-test-renderer';
import '@testing-library/jest-dom';
import { Chat } from '../../Chat';
import { Channel } from '../../Channel';
import { AttachmentPreviewList, ImagePreviewItem } from '../AttachmentPreviewList';
-import { ComponentProvider, useChatContext } from '../../../context';
+import { ChannelActionProvider, ComponentProvider, useChatContext } from '../../../context';
import { MessageInputContextProvider } from '../../../context/MessageInputContext';
-import { generateUpload, initClientWithChannels } from '../../../mock-builders';
+import {
+ generateAudioAttachment,
+ generateUpload,
+ generateVideoAttachment,
+ generateVoiceRecordingAttachment,
+ initClientWithChannels,
+} from '../../../mock-builders';
+
+const RETRY_BTN_TEST_ID = 'file-preview-item-retry-button';
+const DELETE_BTN_TEST_ID = 'file-preview-item-delete-button';
+const LOADING_INDICATOR_TEST_ID = 'loading-indicator';
const uploadsReducer = (pv, cv) => {
pv[cv.id] = cv;
@@ -28,13 +37,16 @@ const capitalize = ([firstLetter, ...restOfTheWord]) =>
const orderMapper = ({ id }) => id;
-const generateMessageInputContextValue = ({ files = [], images = [] } = {}) => ({
+const generateMessageInputContextValue = ({ attachments = [], files = [], images = [] } = {}) => ({
+ attachments,
fileOrder: files.map(orderMapper),
fileUploads: files.reduce(uploadsReducer, {}),
imageOrder: images.map(orderMapper),
imageUploads: images.reduce(uploadsReducer, {}),
+ removeAttachment: jest.fn(),
removeFile: jest.fn(),
removeImage: jest.fn(),
+ uploadAttachment: jest.fn(),
uploadFile: jest.fn(),
uploadImage: jest.fn(),
});
@@ -42,9 +54,11 @@ const generateMessageInputContextValue = ({ files = [], images = [] } = {}) => (
const renderComponent = (value = {}, renderFunction = render) =>
renderFunction(
-
-
-
+
+
+
+
+
,
);
@@ -61,30 +75,47 @@ describe('AttachmentPreviewList', () => {
expect(attachmentList).toBeEmptyDOMElement();
});
- it.each(['uploading', 'failed', 'finished'])(
- 'renders with one image and one file with state "%s"',
- (state) => {
- const [file, image] = [
- generateUpload({
- fileOverrides: { name: 'file-upload' },
- objectOverrides: { state },
- }),
- generateUpload({
- fileOverrides: { name: 'image-upload', type: 'image' },
- objectOverrides: { state },
- }),
- ];
-
- const tree = renderComponent(
- generateMessageInputContextValue({ files: [file], images: [image] }),
- renderer.create,
- ).toJSON();
-
- expect(tree).toMatchSnapshot();
- },
- );
+ it.each(['uploading', 'failed', 'finished'])('renders previews with state "%s"', (state) => {
+ renderComponent(
+ generateMessageInputContextValue({
+ attachments: [
+ generateAudioAttachment({
+ $internal: { uploadState: state },
+ title: `audio-attachment-${state}`,
+ }),
+ generateVoiceRecordingAttachment({
+ $internal: { uploadState: state },
+ title: `voice-recording-attachment-${state}`,
+ }),
+ generateVideoAttachment({
+ $internal: { uploadState: state },
+ title: `video-attachment-${state}`,
+ }),
+ ],
+ files: [
+ generateUpload({
+ fileOverrides: { name: `file-upload-${state}` },
+ objectOverrides: { state },
+ }),
+ ],
+ images: [
+ generateUpload({
+ fileOverrides: { name: `image-upload-${state}`, type: 'image' },
+ objectOverrides: { state },
+ }),
+ ],
+ }),
+ render,
+ );
- it.each(['file', 'image'])('tests "retry" click on %s upload', (type) => {
+ expect(screen.getByTitle(`file-upload-${state}`)).toBeInTheDocument();
+ expect(screen.getByTitle(`image-upload-${state}`)).toBeInTheDocument();
+ expect(screen.getByTitle(`audio-attachment-${state}`)).toBeInTheDocument();
+ expect(screen.getByTitle(`voice-recording-attachment-${state}`)).toBeInTheDocument();
+ expect(screen.getByTitle(`video-attachment-${state}`)).toBeInTheDocument();
+ });
+
+ it.each(['file', 'image'])('retries upload on click with %s', (type) => {
const file = generateUpload({
fileOverrides: { type },
objectOverrides: { state: 'failed' },
@@ -101,6 +132,74 @@ describe('AttachmentPreviewList', () => {
expect(contextValue[`upload${capitalize(type)}`]).toHaveBeenCalledWith(file.id);
});
+ it.each(['audio', 'voiceRecording', 'video'])('retries upload on click with %s', (type) => {
+ const state = 'failed';
+ const title = `${type}-attachment-${state}`;
+ const generate = {
+ audio: generateAudioAttachment,
+ video: generateVideoAttachment,
+ voiceRecording: generateVoiceRecordingAttachment,
+ };
+ const uploadedAttachmentData = generate[type]({
+ title,
+ });
+ const localAttachment = { ...uploadedAttachmentData, $internal: { uploadState: state } };
+
+ const contextValue = generateMessageInputContextValue({
+ attachments: [localAttachment],
+ });
+
+ renderComponent(contextValue);
+
+ const retryButton = screen.getByTestId(RETRY_BTN_TEST_ID);
+
+ fireEvent.click(retryButton);
+
+ expect(contextValue.uploadAttachment).toHaveBeenCalledWith(
+ expect.objectContaining(uploadedAttachmentData),
+ );
+ });
+
+ it.each(['file', 'image'])('renders loading indicator for %s preview', (type) => {
+ const file = generateUpload({
+ fileOverrides: { type },
+ objectOverrides: { state: 'uploading' },
+ });
+
+ const contextValue = generateMessageInputContextValue({ [`${type}s`]: [file] });
+
+ renderComponent(contextValue);
+
+ expect(screen.queryByTestId(LOADING_INDICATOR_TEST_ID)).toBeInTheDocument();
+ expect(screen.queryByTestId(RETRY_BTN_TEST_ID)).not.toBeInTheDocument();
+ });
+
+ it.each(['audio', 'voiceRecording', 'video'])(
+ 'renders loading indicator for %s preview',
+ (type) => {
+ const state = 'uploading';
+ const title = `${type}-attachment-${state}`;
+ const generate = {
+ audio: generateAudioAttachment,
+ video: generateVideoAttachment,
+ voiceRecording: generateVoiceRecordingAttachment,
+ };
+ const uploadedAttachmentData = generate[type]({
+ title,
+ });
+ const localAttachment = { ...uploadedAttachmentData, $internal: { uploadState: state } };
+
+ const contextValue = generateMessageInputContextValue({
+ attachments: [localAttachment],
+ });
+
+ renderComponent(contextValue);
+
+ expect(screen.queryByTestId(LOADING_INDICATOR_TEST_ID)).toBeInTheDocument();
+ expect(screen.queryByTestId(RETRY_BTN_TEST_ID)).not.toBeInTheDocument();
+ },
+ );
+
it.each(['file', 'image'])('tests "remove" click on %s upload', (type) => {
const file = generateUpload({
fileOverrides: { type },
@@ -118,6 +217,56 @@ describe('AttachmentPreviewList', () => {
expect(contextValue[`remove${capitalize(type)}`]).toHaveBeenCalledWith(file.id);
});
+ it.each(['audio', 'voiceRecording', 'video'])(
+ 'removes retry button on %s successful upload',
+ (type) => {
+ const state = 'finished';
+ const title = `${type}-attachment-${state}`;
+ const generate = {
+ audio: generateAudioAttachment,
+ video: generateVideoAttachment,
+ voiceRecording: generateVoiceRecordingAttachment,
+ };
+ const uploadedAttachmentData = generate[type]({
+ title,
+ });
+ const localAttachment = { ...uploadedAttachmentData, $internal: { uploadState: state } };
+
+ const contextValue = generateMessageInputContextValue({
+ attachments: [localAttachment],
+ });
+
+ renderComponent(contextValue);
+
+ expect(screen.queryByTestId(RETRY_BTN_TEST_ID)).not.toBeInTheDocument();
+ },
+ );
+
+ it.each(['audio', 'voiceRecording', 'video'])('removes the %s preview', (type) => {
+ const state = 'finished';
+ const title = `${type}-attachment-${state}`;
+ const id = `${type}-id`;
+ const generate = {
+ audio: generateAudioAttachment,
+ video: generateVideoAttachment,
+ voiceRecording: generateVoiceRecordingAttachment,
+ };
+ const uploadedAttachmentData = generate[type]({
+ title,
+ });
+ const localAttachment = { ...uploadedAttachmentData, $internal: { id, uploadState: state } };
+
+ const contextValue = generateMessageInputContextValue({
+ attachments: [localAttachment],
+ });
+
+ renderComponent(contextValue);
+
+ fireEvent.click(screen.getByTestId(DELETE_BTN_TEST_ID));
+
+ expect(contextValue.removeAttachment).toHaveBeenCalledWith(localAttachment.$internal.id);
+ });
+
it('should render custom BaseImage component', async () => {
const ActiveChannelSetter = ({ activeChannel }) => {
const { setActiveChannel } = useChatContext();
@@ -195,7 +344,7 @@ describe('ImagePreviewItem', () => {
const { container } = renderImagePreviewItem({
imageUploads: { [defaultId]: { ...imageUploads[defaultId], og_scrape_url: 'og_scrape_url' } },
});
- expect(container).toBeEmpty();
+ expect(container).toBeEmptyDOMElement();
});
it('renders uploading state', () => {
const { container } = renderImagePreviewItem({
diff --git a/src/components/MessageInput/__tests__/SendButton.test.js b/src/components/MessageInput/__tests__/SendButton.test.js
index 737a08d7f..e55dbd846 100644
--- a/src/components/MessageInput/__tests__/SendButton.test.js
+++ b/src/components/MessageInput/__tests__/SendButton.test.js
@@ -5,7 +5,7 @@ import { toHaveNoViolations } from 'jest-axe';
import { axe } from '../../../../axe-helper';
expect.extend(toHaveNoViolations);
-import { SendButton } from '../icons';
+import { SendButton } from '../SendButton';
describe('SendButton', () => {
it('should call whatever callback was passed into the sendMessage prop when the button is pressed', async () => {
diff --git a/src/components/MessageInput/__tests__/__snapshots__/AttachmentPreviewList.test.js.snap b/src/components/MessageInput/__tests__/__snapshots__/AttachmentPreviewList.test.js.snap
index d6e930b57..6eb454095 100644
--- a/src/components/MessageInput/__tests__/__snapshots__/AttachmentPreviewList.test.js.snap
+++ b/src/components/MessageInput/__tests__/__snapshots__/AttachmentPreviewList.test.js.snap
@@ -1,553 +1,5 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
-exports[`AttachmentPreviewList renders with one image and one file with state "failed" 1`] = `
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-`;
-
-exports[`AttachmentPreviewList renders with one image and one file with state "finished" 1`] = `
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-`;
-
-exports[`AttachmentPreviewList renders with one image and one file with state "uploading" 1`] = `
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-`;
-
exports[`AttachmentPreviewList should render custom BaseImage component 1`] = `
>,
textareaRef: React.MutableRefObject
,
) => {
- const { noFiles } = props;
+ const { doFileUploadRequest, errorHandler, noFiles } = props;
const { fileUploads, imageUploads } = state;
-
- const { maxNumberOfFiles, multipleUploads } = useChannelStateContext(
+ const { getAppSettings } = useChatContext('useAttachments');
+ const { t } = useTranslationContext('useAttachments');
+ const { addNotification } = useChannelActionContext('useAttachments');
+ const { channel, maxNumberOfFiles, multipleUploads } = useChannelStateContext(
'useAttachments',
);
@@ -81,11 +89,109 @@ export const useAttachments = <
[maxFilesLeft, noFiles],
);
+ const removeAttachment = (id: string) => {
+ dispatch({ id, type: 'removeAttachment' });
+ };
+
+ const uploadAttachment = useCallback(
+ async (
+ att: LocalAttachment,
+ ): Promise> => {
+ const { $internal, ...attachment } = att;
+ if (!$internal?.file) return att;
+
+ const id = $internal?.id ?? nanoid();
+ const { file } = $internal;
+ const canUpload = await checkUploadPermissions({
+ addNotification,
+ file,
+ getAppSettings,
+ t,
+ uploadType: 'file',
+ });
+
+ if (!canUpload) {
+ const notificationText = t('Missing permissions to upload the attachment');
+ console.error(new Error(notificationText));
+ addNotification(notificationText, 'error');
+ return att;
+ }
+
+ dispatch({
+ attachment: {
+ ...attachment,
+ $internal: {
+ ...$internal,
+ id,
+ uploadState: 'uploading',
+ },
+ },
+ type: 'upsertAttachment',
+ });
+
+ try {
+ let response: SendFileAPIResponse;
+ if (doFileUploadRequest) {
+ response = await doFileUploadRequest(file, channel);
+ } else {
+ response = await channel.sendFile(file as File);
+ }
+ const uploadedAttachment = {
+ ...attachment,
+ $internal: {
+ ...$internal,
+ uploadState: 'finished',
+ },
+ asset_url: response.file,
+ } as LocalAttachment;
+
+ dispatch({
+ attachment: uploadedAttachment,
+ type: 'upsertAttachment',
+ });
+
+ return uploadedAttachment;
+ } catch (error) {
+ let finalError: Error = { message: t('Error uploading attachment'), name: 'Error' };
+ if (typeof (error as Error).message === 'string') {
+ finalError = error as Error;
+ } else if (typeof error === 'object') {
+ finalError = Object.assign(finalError, error);
+ }
+
+ console.error(finalError);
+ addNotification(finalError.message, 'error');
+
+ const failedAttachment = {
+ ...attachment,
+ $internal: {
+ ...$internal,
+ uploadState: 'failed',
+ },
+ } as LocalAttachment;
+
+ dispatch({
+ attachment: failedAttachment,
+ type: 'upsertAttachment',
+ });
+
+ if (errorHandler) {
+ errorHandler(finalError as Error, 'upload-attachment', file);
+ }
+
+ return failedAttachment;
+ }
+ },
+ [addNotification, channel, doFileUploadRequest, dispatch, errorHandler, getAppSettings, t],
+ );
+
return {
maxFilesLeft,
numberOfUploads,
+ removeAttachment,
removeFile,
removeImage,
+ uploadAttachment,
uploadFile,
uploadImage,
uploadNewFiles,
diff --git a/src/components/MessageInput/hooks/useCreateMessageInputContext.ts b/src/components/MessageInput/hooks/useCreateMessageInputContext.ts
index bae9a61c6..a4c4dabe7 100644
--- a/src/components/MessageInput/hooks/useCreateMessageInputContext.ts
+++ b/src/components/MessageInput/hooks/useCreateMessageInputContext.ts
@@ -11,7 +11,9 @@ export const useCreateMessageInputContext = <
) => {
const {
additionalTextareaProps,
+ asyncMessagesMultiSendEnabled,
attachments,
+ audioRecordingEnabled,
autocompleteTriggers,
cancelURLEnrichment,
clearEditingState,
@@ -54,6 +56,8 @@ export const useCreateMessageInputContext = <
overrideSubmitHandler,
parent,
publishTypingEvent,
+ recordingController,
+ removeAttachment,
removeFile,
removeImage,
setCooldownRemaining,
@@ -63,6 +67,7 @@ export const useCreateMessageInputContext = <
showMentionsList,
text,
textareaRef,
+ uploadAttachment,
uploadFile,
uploadImage,
uploadNewFiles,
@@ -85,7 +90,9 @@ export const useCreateMessageInputContext = <
const messageInputContext: MessageInputContextValue = useMemo(
() => ({
additionalTextareaProps,
+ asyncMessagesMultiSendEnabled,
attachments,
+ audioRecordingEnabled,
autocompleteTriggers,
cancelURLEnrichment,
clearEditingState,
@@ -128,6 +135,8 @@ export const useCreateMessageInputContext = <
overrideSubmitHandler,
parent,
publishTypingEvent,
+ recordingController,
+ removeAttachment,
removeFile,
removeImage,
setCooldownRemaining,
@@ -137,6 +146,7 @@ export const useCreateMessageInputContext = <
showMentionsList,
text,
textareaRef,
+ uploadAttachment,
uploadFile,
uploadImage,
uploadNewFiles,
@@ -144,6 +154,8 @@ export const useCreateMessageInputContext = <
}),
// eslint-disable-next-line react-hooks/exhaustive-deps
[
+ asyncMessagesMultiSendEnabled,
+ audioRecordingEnabled,
cancelURLEnrichment,
cooldownInterval,
cooldownRemaining,
@@ -152,6 +164,7 @@ export const useCreateMessageInputContext = <
emojiSearchIndex,
fileUploadsValue,
findAndEnqueueURLsToEnrich,
+ handleSubmit,
hideSendButton,
imageUploadsValue,
isUploadEnabled,
@@ -159,10 +172,12 @@ export const useCreateMessageInputContext = <
mentionedUsersLength,
parentId,
publishTypingEvent,
+ recordingController,
+ removeAttachment,
showCommandsList,
showMentionsList,
text,
- handleSubmit,
+ uploadAttachment,
],
);
diff --git a/src/components/MessageInput/hooks/useFileState.ts b/src/components/MessageInput/hooks/useFileState.ts
deleted file mode 100644
index f692a9619..000000000
--- a/src/components/MessageInput/hooks/useFileState.ts
+++ /dev/null
@@ -1,13 +0,0 @@
-import { useMemo } from 'react';
-
-import type { FileUpload } from '../types';
-
-export const useFileState = >(file: T) =>
- useMemo(
- () => ({
- failed: file.state === 'failed',
- finished: file.state === 'finished',
- uploading: file.state === 'uploading',
- }),
- [file.state],
- );
diff --git a/src/components/MessageInput/hooks/useMessageInputState.ts b/src/components/MessageInput/hooks/useMessageInputState.ts
index e12e300d7..e70a8dead 100644
--- a/src/components/MessageInput/hooks/useMessageInputState.ts
+++ b/src/components/MessageInput/hooks/useMessageInputState.ts
@@ -4,12 +4,15 @@ import { nanoid } from 'nanoid';
import { StreamMessage, useChannelStateContext } from '../../../context/ChannelStateContext';
import { useAttachments } from './useAttachments';
+import { EnrichURLsController, useLinkPreviews } from './useLinkPreviews';
import { useMessageInputText } from './useMessageInputText';
import { useSubmitHandler } from './useSubmitHandler';
import { usePasteHandler } from './usePasteHandler';
+import { RecordingController, useMediaRecorder } from '../../MediaRecorder/hooks/useMediaRecorder';
+import { LinkPreviewState, LocalAttachment, SetLinkPreviewMode } from '../types';
import type { FileLike } from '../../ReactFileUtilities';
-import type { Attachment, Message, OGAttachment, UserResponse } from 'stream-chat';
+import type { Message, OGAttachment, UserResponse } from 'stream-chat';
import type { MessageInputProps } from '../MessageInput';
@@ -18,14 +21,13 @@ import type {
DefaultStreamChatGenerics,
SendMessageOptions,
} from '../../../types/types';
-import { EnrichURLsController, useLinkPreviews } from './useLinkPreviews';
import type { FileUpload, ImageUpload, LinkPreviewMap } from '../types';
-import { LinkPreviewState, SetLinkPreviewMode } from '../types';
+import { mergeDeep } from '../../../utils/mergeDeep';
export type MessageInputState<
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
> = {
- attachments: Attachment[];
+ attachments: LocalAttachment[];
fileOrder: string[];
fileUploads: Record;
imageOrder: string[];
@@ -36,6 +38,16 @@ export type MessageInputState<
text: string;
};
+type UpsertAttachmentAction = {
+ attachment: LocalAttachment;
+ type: 'upsertAttachment';
+};
+
+type RemoveAttachmentAction = {
+ id: string;
+ type: 'removeAttachment';
+};
+
type SetTextAction = {
getNewText: (currentStateText: string) => string;
type: 'setText';
@@ -96,14 +108,16 @@ export type MessageInputReducerAction<
| SetLinkPreviewsAction
| RemoveImageUploadAction
| RemoveFileUploadAction
- | AddMentionedUserAction;
+ | AddMentionedUserAction
+ | UpsertAttachmentAction
+ | RemoveAttachmentAction;
export type MessageInputHookProps<
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
> = EnrichURLsController & {
handleChange: React.ChangeEventHandler;
handleSubmit: (
- event: React.BaseSyntheticEvent,
+ event?: React.BaseSyntheticEvent,
customMessageData?: Partial>,
options?: SendMessageOptions,
) => void;
@@ -113,9 +127,14 @@ export type MessageInputHookProps<
numberOfUploads: number;
onPaste: (event: React.ClipboardEvent) => void;
onSelectUser: (item: UserResponse) => void;
+ recordingController: RecordingController;
+ removeAttachment: (id: string) => void;
removeFile: (id: string) => void;
removeImage: (id: string) => void;
textareaRef: React.MutableRefObject;
+ uploadAttachment: (
+ attachment: LocalAttachment,
+ ) => Promise>;
uploadFile: (id: string) => void;
uploadImage: (id: string) => void;
uploadNewFiles: (files: FileList | File[]) => void;
@@ -211,7 +230,15 @@ const initState = <
const fileOrder = Object.keys(fileUploads);
const attachments =
- message.attachments?.filter(({ type }) => type !== 'file' && type !== 'image') || [];
+ message.attachments
+ ?.filter(({ type }) => type !== 'file' && type !== 'image')
+ .map(
+ (att) =>
+ ({
+ ...att,
+ $internal: { id: nanoid(), uploadState: 'finished' },
+ } as LocalAttachment),
+ ) || [];
const mentioned_users: StreamMessage['mentioned_users'] = message.mentioned_users || [];
@@ -244,6 +271,34 @@ const messageInputReducer = <
case 'clear':
return makeEmptyMessageInputState();
+ case 'upsertAttachment': {
+ const attachmentIndex = state.attachments.findIndex(
+ (att) => att.$internal?.id && att.$internal?.id === action.attachment.$internal?.id,
+ );
+ const upsertedAttachment = mergeDeep(
+ state.attachments[attachmentIndex] ?? {},
+ action.attachment,
+ );
+ const attachments = [...state.attachments];
+ attachments.splice(attachmentIndex, 1, upsertedAttachment);
+ return {
+ ...state,
+ attachments,
+ };
+ }
+
+ case 'removeAttachment': {
+ const attachmentIndex = state.attachments.findIndex(
+ (att) => att.$internal?.id && att.$internal?.id === action.id,
+ );
+ if (attachmentIndex === -1) return state;
+
+ return {
+ ...state,
+ attachments: [...state.attachments.splice(attachmentIndex, 1)],
+ };
+ }
+
case 'setImageUpload': {
const imageAlreadyExists = state.imageUploads[action.id];
if (!imageAlreadyExists && !action.file) return state;
@@ -365,7 +420,15 @@ export const useMessageInputState = <
MessageInputHookProps &
CommandsListState &
MentionsListState => {
- const { additionalTextareaProps, getDefaultValue, message, urlEnrichmentConfig } = props;
+ const {
+ additionalTextareaProps,
+ asyncMessagesMultiSendEnabled,
+ audioRecordingConfig,
+ audioRecordingEnabled,
+ getDefaultValue,
+ message,
+ urlEnrichmentConfig,
+ } = props;
const {
channelCapabilities = {},
@@ -430,8 +493,10 @@ export const useMessageInputState = <
const {
maxFilesLeft,
numberOfUploads,
+ removeAttachment,
removeFile,
removeImage,
+ uploadAttachment,
uploadFile,
uploadImage,
uploadNewFiles,
@@ -444,6 +509,15 @@ export const useMessageInputState = <
numberOfUploads,
enrichURLsController,
);
+ const recordingController = useMediaRecorder({
+ asyncMessagesMultiSendEnabled,
+ enabled: !!audioRecordingEnabled,
+ handleSubmit,
+ recordingConfig: audioRecordingConfig,
+ uploadAttachment,
+ });
+
+ // todo: remove the check for channelConfig?.uploads
const isUploadEnabled =
channelConfig?.uploads !== false && channelCapabilities['upload-file'] !== false;
@@ -477,12 +551,15 @@ export const useMessageInputState = <
onSelectUser,
openCommandsList,
openMentionsList,
+ recordingController,
+ removeAttachment,
removeFile,
removeImage,
setText,
showCommandsList,
showMentionsList,
textareaRef,
+ uploadAttachment,
uploadFile,
uploadImage,
uploadNewFiles,
diff --git a/src/components/MessageInput/hooks/useSubmitHandler.ts b/src/components/MessageInput/hooks/useSubmitHandler.ts
index adaf742b4..7d783a1dd 100644
--- a/src/components/MessageInput/hooks/useSubmitHandler.ts
+++ b/src/components/MessageInput/hooks/useSubmitHandler.ts
@@ -2,15 +2,19 @@ import { useEffect, useRef } from 'react';
import { useChannelActionContext } from '../../../context/ChannelActionContext';
import { useChannelStateContext } from '../../../context/ChannelStateContext';
import { useTranslationContext } from '../../../context/TranslationContext';
+import { LinkPreviewState } from '../types';
import type { Attachment, Message, UpdatedMessage } from 'stream-chat';
import type { MessageInputReducerAction, MessageInputState } from './useMessageInputState';
import type { MessageInputProps } from '../MessageInput';
-import type { CustomTrigger, DefaultStreamChatGenerics } from '../../../types/types';
+import type {
+ CustomTrigger,
+ DefaultStreamChatGenerics,
+ SendMessageOptions,
+} from '../../../types/types';
import type { EnrichURLsController } from './useLinkPreviews';
-import { LinkPreviewState } from '../types';
const getAttachmentTypeFromMime = (mime: string) => {
if (mime.includes('video/')) return 'video';
@@ -59,7 +63,7 @@ export const useSubmitHandler = <
textReference.current.hasChanged = text !== textReference.current.initialText;
}, [text]);
- const getAttachmentsFromUploads = () => {
+ const getAttachmentsFromUploads = (): Attachment[] => {
const imageAttachments = imageOrder
.map((id) => imageUploads[id])
.filter((upload) => upload.state !== 'failed')
@@ -97,19 +101,22 @@ export const useSubmitHandler = <
type: getAttachmentTypeFromMime(upload.file.type || ''),
}));
- return [
- ...attachments, // from state
- ...imageAttachments,
- ...fileAttachments,
- ];
+ const otherAttachments = attachments
+ .filter((att) => att.$internal?.uploadState !== 'failed')
+ .map((localAttachment) => {
+ const { $internal: _, ...attachment } = localAttachment;
+ return attachment as Attachment;
+ });
+
+ return [...otherAttachments, ...imageAttachments, ...fileAttachments];
};
const handleSubmit = async (
- event: React.BaseSyntheticEvent,
+ event?: React.BaseSyntheticEvent,
customMessageData?: Partial>,
+ options?: SendMessageOptions,
) => {
- event.preventDefault();
-
+ event?.preventDefault();
const trimmedMessage = text.trim();
const isEmptyMessage =
trimmedMessage === '' ||
@@ -121,12 +128,12 @@ export const useSubmitHandler = <
trimmedMessage === '__' ||
trimmedMessage === '****';
- if (isEmptyMessage && numberOfUploads === 0) return;
-
+ if (isEmptyMessage && numberOfUploads === 0 && attachments.length === 0) return;
// the channel component handles the actual sending of the message
const someAttachmentsUploading =
Object.values(imageUploads).some((upload) => upload.state === 'uploading') ||
- Object.values(fileUploads).some((upload) => upload.state === 'uploading');
+ Object.values(fileUploads).some((upload) => upload.state === 'uploading') ||
+ attachments.some((att) => att.$internal?.uploadState === 'uploading');
if (someAttachmentsUploading) {
return addNotification(t('Wait until all attachments have uploaded'), 'error');
@@ -188,7 +195,11 @@ export const useSubmitHandler = <
linkPreviewsEnabled &&
((!someLinkPreviewsLoading && attachmentsFromLinkPreviews.length > 0) ||
someLinkPreviewsDismissed);
- const sendOptions = linkPreviewsEnabled ? { skip_enrich_url } : undefined;
+ const sendOptions =
+ linkPreviewsEnabled || options
+ ? Object.assign(linkPreviewsEnabled ? { skip_enrich_url } : {}, options ?? {})
+ : undefined;
+
if (message && message.type !== 'error') {
delete message.i18n;
diff --git a/src/components/MessageInput/hooks/useTimeElapsed.ts b/src/components/MessageInput/hooks/useTimeElapsed.ts
new file mode 100644
index 000000000..930e6e4a3
--- /dev/null
+++ b/src/components/MessageInput/hooks/useTimeElapsed.ts
@@ -0,0 +1,37 @@
+import { useCallback, useEffect, useRef, useState } from 'react';
+
+type UseTimeElapsedParams = {
+ startOnMount?: boolean;
+};
+
+// todo: provide start timestamp
+export const useTimeElapsed = ({ startOnMount }: UseTimeElapsedParams = {}) => {
+ const [secondsElapsed, setSecondsElapsed] = useState(0);
+ const updateInterval = useRef>();
+
+ const startCounter = useCallback(() => {
+ updateInterval.current = setInterval(() => {
+ setSecondsElapsed((prev) => prev + 1);
+ }, 1000);
+ }, []);
+
+ const stopCounter = useCallback(() => {
+ clearInterval(updateInterval.current);
+ }, []);
+
+ useEffect(() => {
+ if (!startOnMount) return;
+ updateInterval.current = setInterval(() => {
+ setSecondsElapsed((prev) => prev + 1);
+ }, 1000);
+ return () => {
+ stopCounter();
+ };
+ }, [startOnMount, stopCounter]);
+
+ return {
+ secondsElapsed,
+ startCounter,
+ stopCounter,
+ };
+};
diff --git a/src/components/MessageInput/hooks/useTimer.ts b/src/components/MessageInput/hooks/useTimer.ts
new file mode 100644
index 000000000..4ea7f57cc
--- /dev/null
+++ b/src/components/MessageInput/hooks/useTimer.ts
@@ -0,0 +1,23 @@
+import { useEffect, useState } from 'react';
+
+export const useTimer = ({ startFrom }: { startFrom: number }) => {
+ const [secondsLeft, setSecondsLeft] = useState();
+
+ useEffect(() => {
+ let countdownTimeout: ReturnType;
+ if (typeof secondsLeft === 'number' && secondsLeft > 0) {
+ countdownTimeout = setTimeout(() => {
+ setSecondsLeft(secondsLeft - 1);
+ }, 1000);
+ }
+ return () => {
+ clearTimeout(countdownTimeout);
+ };
+ }, [secondsLeft]);
+
+ useEffect(() => {
+ setSecondsLeft(startFrom ?? 0);
+ }, [startFrom]);
+
+ return secondsLeft;
+};
diff --git a/src/components/MessageInput/icons.tsx b/src/components/MessageInput/icons.tsx
index 2571de2a4..575213757 100644
--- a/src/components/MessageInput/icons.tsx
+++ b/src/components/MessageInput/icons.tsx
@@ -2,11 +2,6 @@ import React, { useMemo } from 'react';
import { nanoid } from 'nanoid';
import { useTranslationContext } from '../../context/TranslationContext';
-import { useChatContext } from '../../context/ChatContext';
-
-import type { Message } from 'stream-chat';
-
-import type { DefaultStreamChatGenerics } from '../../types/types';
export const EmojiIconLarge = () => {
const { t } = useTranslationContext('EmojiIconLarge');
@@ -206,49 +201,45 @@ export const SendIconV2 = () => {
return (
{t('Send')}
-
+
);
};
-export type SendButtonProps<
- StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
-> = {
- sendMessage: (
- event: React.BaseSyntheticEvent,
- customMessageData?: Partial>,
- ) => void;
-} & React.ComponentProps<'button'>;
-
-export const SendButton = <
- StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
->({
- sendMessage,
- ...rest
-}: SendButtonProps) => {
- const { themeVersion } = useChatContext('SendButton');
- const { t } = useTranslationContext('SendButton');
+export const MicIcon = () => (
+
+
+
+
+);
- return (
-
- {themeVersion === '2' ? : }
-
- );
-};
+export const BinIcon = () => (
+
+
+
+);
+
+export const PauseIcon = () => (
+
+
+
+);
+
+export const PlayIcon = () => (
+
+
+
+);
+
+export const CheckSignIcon = () => (
+
+
+
+);
diff --git a/src/components/MessageInput/index.ts b/src/components/MessageInput/index.ts
index 6c9b0bb2b..c6672866f 100644
--- a/src/components/MessageInput/index.ts
+++ b/src/components/MessageInput/index.ts
@@ -9,5 +9,6 @@ export * from './MessageInput';
export * from './MessageInputFlat';
export * from './MessageInputSmall';
export * from './QuotedMessagePreview';
+export * from './SendButton';
export * from './UploadsPreview';
export * from './types';
diff --git a/src/components/MessageInput/types.ts b/src/components/MessageInput/types.ts
index 7c43be1d3..e69416290 100644
--- a/src/components/MessageInput/types.ts
+++ b/src/components/MessageInput/types.ts
@@ -1,4 +1,4 @@
-import type { Attachment, OGAttachment } from 'stream-chat';
+import type { Attachment, DefaultGenerics, ExtendableGenerics, OGAttachment } from 'stream-chat';
import type { DefaultStreamChatGenerics } from '../../types/types';
type AttachmentLoadingState = 'uploading' | 'finished' | 'failed';
@@ -65,3 +65,63 @@ export enum SetLinkPreviewMode {
}
export type LinkPreviewMap = Map;
+
+export type VoiceRecordingAttachment<
+ StreamChatGenerics extends ExtendableGenerics = DefaultGenerics
+> = Attachment & {
+ asset_url: string;
+ type: 'voiceRecording';
+ duration?: number;
+ file_size?: number;
+ mime_type?: string;
+ title?: string;
+ waveform_data?: Array;
+};
+
+export type AudioAttachment<
+ StreamChatGenerics extends ExtendableGenerics = DefaultGenerics
+> = Attachment & {
+ asset_url: string;
+ type: 'audio';
+ file_size?: number;
+ mime_type?: string;
+ title?: string;
+};
+
+export type VideoAttachment<
+ StreamChatGenerics extends ExtendableGenerics = DefaultGenerics
+> = Attachment & {
+ asset_url: string;
+ type: 'video';
+ mime_type?: string;
+ thumb_url?: string;
+ title?: string;
+};
+
+export type AttachmentInternalMetadata = {
+ id: string;
+ file?: File;
+ uploadState?: AttachmentLoadingState;
+};
+
+type LocalAttachmentCast = T & { $internal: AttachmentInternalMetadata };
+
+export type LocalVoiceRecordingAttachment<
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+> = LocalAttachmentCast>;
+
+export type LocalAudioAttachment<
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+> = LocalAttachmentCast>;
+
+export type LocalVideoAttachment<
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+> = LocalAttachmentCast>;
+
+export type LocalAttachment<
+ StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics
+> =
+ | LocalAttachmentCast>
+ | LocalAudioAttachment
+ | LocalVideoAttachment
+ | LocalVoiceRecordingAttachment;
diff --git a/src/components/ReactFileUtilities/types.ts b/src/components/ReactFileUtilities/types.ts
index 3ea0ebe46..9f3bc0e04 100644
--- a/src/components/ReactFileUtilities/types.ts
+++ b/src/components/ReactFileUtilities/types.ts
@@ -1,3 +1,5 @@
+export type RecordedMediaType = 'audio' | 'video';
+
export type UploadState = 'uploading' | 'finished' | 'failed';
export type FileLike = Blob | File;
diff --git a/src/components/ReactFileUtilities/utils.ts b/src/components/ReactFileUtilities/utils.ts
index 46af21740..15cea1c8a 100644
--- a/src/components/ReactFileUtilities/utils.ts
+++ b/src/components/ReactFileUtilities/utils.ts
@@ -1,4 +1,4 @@
-import type { FileLike } from './types';
+import { FileLike, RecordedMediaType } from './types';
import { ChangeEvent, useCallback } from 'react';
export const useHandleFileChangeWrapper = (
@@ -97,3 +97,40 @@ const extractImageSources = (s: string) => {
const imageTags = new DOMParser().parseFromString(s, 'text/html').getElementsByTagName('img');
return Array.from(imageTags, (tag) => tag.src).filter((tag) => tag);
};
+
+export const createFileFromBlobs = ({
+ blobsArray,
+ fileName,
+ mimeType,
+}: {
+ blobsArray: Blob[];
+ fileName: string;
+ mimeType: string;
+}) => {
+ const concatenatedBlob = new Blob(blobsArray, { type: mimeType });
+ return new File([concatenatedBlob], fileName, { type: concatenatedBlob.type });
+};
+
+export const getExtensionFromMimeType = (mimeType: string) => {
+ const match = mimeType.match(/\/([^/;]+)/);
+ return match && match[1];
+};
+
+export const getRecordedMediaTypeFromMimeType = (mimeType: string): RecordedMediaType | null => {
+ const match = mimeType.match(/^(audio|video)\/.*$/);
+ return match && (match[1] as RecordedMediaType);
+};
+
+export const readFileAsArrayBuffer = (file: File): Promise =>
+ new Promise((resolve, reject) => {
+ const fileReader = new FileReader();
+ fileReader.onload = () => {
+ resolve(fileReader.result as ArrayBuffer);
+ };
+
+ fileReader.onerror = () => {
+ reject(fileReader.error);
+ };
+
+ fileReader.readAsArrayBuffer(file);
+ });
diff --git a/src/components/Reactions/utils/utils.tsx b/src/components/Reactions/utils/utils.ts
similarity index 68%
rename from src/components/Reactions/utils/utils.tsx
rename to src/components/Reactions/utils/utils.ts
index a8ff88229..5f793ea7a 100644
--- a/src/components/Reactions/utils/utils.tsx
+++ b/src/components/Reactions/utils/utils.ts
@@ -1,8 +1,8 @@
-export const isMutableRef = (
- ref: React.ForwardedRef | null,
-): ref is React.MutableRefObject => {
+import { ForwardedRef, MutableRefObject } from 'react';
+
+export const isMutableRef = (ref: ForwardedRef | null): ref is MutableRefObject => {
if (ref) {
- return (ref as React.MutableRefObject).current !== undefined;
+ return (ref as MutableRefObject).current !== undefined;
}
return false;
};
diff --git a/src/components/index.ts b/src/components/index.ts
index 7e87f741f..9b1a388cb 100644
--- a/src/components/index.ts
+++ b/src/components/index.ts
@@ -18,6 +18,7 @@ export * from './Gallery';
export * from './InfiniteScrollPaginator';
export * from './Loading';
export * from './LoadMore';
+export * from './MediaRecorder';
export * from './Message';
export * from './MessageActions';
export * from './MessageInput';
diff --git a/src/context/ComponentContext.tsx b/src/context/ComponentContext.tsx
index 2cef40532..07ab40e72 100644
--- a/src/context/ComponentContext.tsx
+++ b/src/context/ComponentContext.tsx
@@ -7,7 +7,11 @@ import type { EmptyStateIndicatorProps } from '../components/EmptyStateIndicator
import type { EventComponentProps } from '../components/EventComponent/EventComponent';
import type { LoadingIndicatorProps } from '../components/Loading/LoadingIndicator';
import type { FixedHeightMessageProps } from '../components/Message/FixedHeightMessage';
-import type { MessageUIComponentProps, PinIndicatorProps } from '../components/Message/types';
+import type {
+ MessageProps,
+ MessageUIComponentProps,
+ PinIndicatorProps,
+} from '../components/Message/types';
import type { MessageDeletedProps } from '../components/Message/MessageDeleted';
import type { GiphyPreviewMessageProps } from '../components/MessageList/GiphyPreviewMessage';
import type { MessageListNotificationsProps } from '../components/MessageList/MessageListNotifications';
@@ -15,7 +19,6 @@ import type { MessageNotificationProps } from '../components/MessageList/Message
import type { MessageOptionsProps } from '../components/Message/MessageOptions';
import type { EmojiSearchIndex, MessageInputProps } from '../components/MessageInput/MessageInput';
import type { QuotedMessagePreviewProps } from '../components/MessageInput/QuotedMessagePreview';
-import type { MessageProps } from '../components/Message/types';
import type { MessageRepliesCountButtonProps } from '../components/Message/MessageRepliesCountButton';
import type { MessageStatusProps } from '../components/Message/MessageStatus';
import type { MessageTimestampProps } from '../components/Message/MessageTimestamp';
@@ -28,20 +31,22 @@ import type {
} from '../components/ChatAutoComplete/ChatAutoComplete';
import { UnreadMessagesSeparatorProps } from '../components/MessageList/UnreadMessagesSeparator';
import type { SuggestionListHeaderProps } from '../components/AutoCompleteTextarea';
-import type { SendButtonProps } from '../components/MessageInput/icons';
import type { ThreadHeaderProps } from '../components/Thread/ThreadHeader';
import type { TypingIndicatorProps } from '../components/TypingIndicator/TypingIndicator';
import type { CustomTrigger, DefaultStreamChatGenerics, UnknownType } from '../types/types';
-import type {
+import {
BaseImageProps,
CooldownTimerProps,
CustomMessageActionsListProps,
+ StartRecordingAudioButtonProps,
} from '../components';
import type { LinkPreviewListProps } from '../components/MessageInput/LinkPreviewList';
import type { ReactionOptions } from '../components/Reactions/reactionOptions';
import type { MessageBouncePromptProps } from '../components/MessageBounce';
import type { UnreadMessagesNotificationProps } from '../components/MessageList/UnreadMessagesNotification';
+import type { SendButtonProps } from '../components/MessageInput/SendButton';
+import type { RecordingPermissionDeniedNotificationProps } from '../components';
import type { TimestampProps } from '../components/Message/Timestamp';
export type ComponentContextValue<
@@ -55,6 +60,7 @@ export type ComponentContextValue<
reactionOptions: ReactionOptions;
UnreadMessagesSeparator: React.ComponentType;
AttachmentPreviewList?: React.ComponentType;
+ AudioRecorder?: React.ComponentType;
AutocompleteSuggestionHeader?: React.ComponentType;
AutocompleteSuggestionItem?: React.ComponentType>;
AutocompleteSuggestionList?: React.ComponentType>;
@@ -86,7 +92,9 @@ export type ComponentContextValue<
QuotedMessagePreview?: React.ComponentType>;
ReactionSelector?: React.ForwardRefExoticComponent>;
ReactionsList?: React.ComponentType>;
+ RecordingPermissionDeniedNotification?: React.ComponentType;
SendButton?: React.ComponentType>;
+ StartRecordingAudioButton?: React.ComponentType;
ThreadHead?: React.ComponentType>;
ThreadHeader?: React.ComponentType>;
ThreadInput?: React.ComponentType>;
diff --git a/src/i18n/de.json b/src/i18n/de.json
index 2700e4bc0..78c990a68 100644
--- a/src/i18n/de.json
+++ b/src/i18n/de.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Zugriff auf Kamera erlauben",
+ "Allow access to microphone": "Zugriff auf Mikrofon erlauben",
+ "An error has occurred during recording": "Ein Fehler ist während der Aufnahme aufgetreten",
+ "An error has occurred during the recording processing": "Ein Fehler ist während der Aufnahmeverarbeitung aufgetreten",
"Attach files": "Dateien anhängen",
"Cancel": "Stornieren",
+ "Cannot seek in the recording": "In der Aufnahme es kann nicht gesucht werden",
"Channel Missing": "Kanal fehlt",
"Close": "Schließen",
"Close emoji picker": "Emoji-Picker schließen",
@@ -24,13 +29,16 @@
"Error pinning message": "Fehler beim Pinnen der Nachricht",
"Error removing message pin": "Fehler beim Entfernen der gepinnten Nachricht",
"Error reproducing the recording": "Fehler bei der Wiedergabe der Aufnahme",
+ "Error starting recording": "Fehler beim Starten der Aufnahme",
"Error unmuting a user ...": "Stummschaltung des Nutzers fehlgeschlagen ...",
+ "Error uploading attachment": "Fehler beim Hochladen des Anhangs",
"Error uploading file": "Fehler beim Hochladen der Datei",
"Error uploading image": "Hochladen des Bildes fehlgeschlagen",
"Error · Unsent": "Fehler nicht gesendet",
"Error: {{ errorMessage }}": "Fehler: {{ errorMessage }}",
"Failed to jump to the first unread message": "Fehler beim Springen zur ersten ungelesenen Nachricht",
"Failed to mark channel as read": "Fehler beim Markieren des Kanals als gelesen",
+ "Failed to play the recording": "Wiedergabe der Aufnahme fehlgeschlagen",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "Datei ist zu groß: {{ size }}, maximale Upload-Größe beträgt {{ limit }}",
"Flag": "Meldung",
"Latest Messages": "Neueste Nachrichten",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "Nachricht wurde erfolgreich gemeldet",
"Message pinned": "Nachricht gepinnt",
"Messages have been marked unread.": "Nachrichten wurden als ungelesen markiert.",
+ "Missing permissions to upload the attachment": "Berechtigungen zum Hochladen des Anhangs fehlen",
"Mute": "Stumm schalten",
"New": "Neu",
"New Messages!": "Neue Nachrichten!",
"No chats here yet…": "Noch keine Chats hier...",
"No results found": "keine Ergebnisse gefunden",
"Nothing yet...": "Noch nichts...",
+ "Ok": "Ok",
"Open emoji picker": "Emoji-Picker öffnen",
"People matching": "Passende Personen",
"Pin": "Pin",
"Pinned by": "Gepinnt von",
+ "Recording format is not supported and cannot be reproduced": "Aufnahmeformat wird nicht unterstützt und kann nicht wiedergegeben werden",
"Reply": "Antworten",
"Reply to Message": "Auf Nachricht antworten",
"Search": "Suche",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Diese Nachricht entsprach nicht unseren Inhaltsrichtlinien",
"This message was deleted...": "Diese Nachricht wurde gelöscht...",
"Thread": "Thread",
+ "To start recording, allow the camera access in your browser": "Um mit der Aufnahme zu beginnen, erlauben Sie den Zugriff auf die Kamera in Ihrem Browser",
+ "To start recording, allow the microphone access in your browser": "Um mit der Aufnahme zu beginnen, erlauben Sie den Zugriff auf das Mikrofon in Ihrem Browser",
"Type your message": "Nachricht eingeben",
"Unmute": "Stummschaltung aufheben",
"Unpin": "Pin entfernen",
diff --git a/src/i18n/en.json b/src/i18n/en.json
index 96661a75a..67da302ec 100644
--- a/src/i18n/en.json
+++ b/src/i18n/en.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Allow access to camera",
+ "Allow access to microphone": "Allow access to microphone",
+ "An error has occurred during recording": "An error has occurred during recording",
+ "An error has occurred during the recording processing": "An error has occurred during the recording processing",
"Attach files": "Attach files",
"Cancel": "Cancel",
+ "Cannot seek in the recording": "Cannot seek in the recording",
"Channel Missing": "Channel Missing",
"Close": "Close",
"Close emoji picker": "Close emoji picker",
@@ -24,13 +29,16 @@
"Error pinning message": "Error pinning message",
"Error removing message pin": "Error removing message pin",
"Error reproducing the recording": "Error reproducing the recording",
+ "Error starting recording": "Error starting recording",
"Error unmuting a user ...": "Error unmuting a user ...",
+ "Error uploading attachment": "Error uploading attachment",
"Error uploading file": "Error uploading file",
"Error uploading image": "Error uploading image",
"Error · Unsent": "Error · Unsent",
"Error: {{ errorMessage }}": "Error: {{ errorMessage }}",
"Failed to jump to the first unread message": "Failed to jump to the first unread message",
"Failed to mark channel as read": "Failed to mark channel as read",
+ "Failed to play the recording": "Failed to play the recording",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "File is too large: {{ size }}, maximum upload size is {{ limit }}",
"Flag": "Flag",
"Latest Messages": "Latest Messages",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "Message has been successfully flagged",
"Message pinned": "Message pinned",
"Messages have been marked unread.": "Messages have been marked unread.",
+ "Missing permissions to upload the attachment": "Missing permissions to upload the attachment",
"Mute": "Mute",
"New": "New",
"New Messages!": "New Messages!",
"No chats here yet…": "No chats here yet…",
"No results found": "No results found",
"Nothing yet...": "Nothing yet...",
+ "Ok": "Ok",
"Open emoji picker": "Open emoji picker",
"People matching": "People matching",
"Pin": "Pin",
"Pinned by": "Pinned by",
+ "Recording format is not supported and cannot be reproduced": "Recording format is not supported and cannot be reproduced",
"Reply": "Reply",
"Reply to Message": "Reply to Message",
"Search": "Search",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "This message did not meet our content guidelines",
"This message was deleted...": "This message was deleted...",
"Thread": "Thread",
+ "To start recording, allow the camera access in your browser": "To start recording, allow the camera access in your browser",
+ "To start recording, allow the microphone access in your browser": "To start recording, allow the microphone access in your browser",
"Type your message": "Type your message",
"Unmute": "Unmute",
"Unpin": "Unpin",
diff --git a/src/i18n/es.json b/src/i18n/es.json
index 98aa43751..421110d32 100644
--- a/src/i18n/es.json
+++ b/src/i18n/es.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Permitir acceso a la cámara",
+ "Allow access to microphone": "Permitir acceso al micrófono",
+ "An error has occurred during recording": "Se ha producido un error durante la grabación",
+ "An error has occurred during the recording processing": "Se ha producido un error durante el procesamiento de la grabación",
"Attach files": "Adjuntar archivos",
"Cancel": "Cancelar",
+ "Cannot seek in the recording": "No se puede buscar en la grabación",
"Channel Missing": "Falta canal",
"Close": "Cerca",
"Close emoji picker": "Cerrar el selector de emojis",
@@ -24,13 +29,16 @@
"Error pinning message": "Mensaje de error al fijar",
"Error removing message pin": "Error al quitar el pin del mensaje",
"Error reproducing the recording": "Error al reproducir la grabación",
+ "Error starting recording": "Error al iniciar la grabación",
"Error unmuting a user ...": "Error al activar el silencio de un usuario ...",
+ "Error uploading attachment": "Error al subir el archivo adjunto",
"Error uploading file": "Error al cargar el archivo",
"Error uploading image": "Error subiendo imagen",
"Error · Unsent": "Error · No enviado",
"Error: {{ errorMessage }}": "Error: {{ errorMessage }}",
"Failed to jump to the first unread message": "Error al saltar al primer mensaje no leído",
"Failed to mark channel as read": "Error al marcar el canal como leído",
+ "Failed to play the recording": "No se pudo reproducir la grabación",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "El archivo es demasiado grande: {{ size }}, el tamaño máximo de carga es de {{ limit }}",
"Flag": "Bandera",
"Latest Messages": "Últimos mensajes",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "El mensaje se marcó correctamente",
"Message pinned": "Mensaje fijado",
"Messages have been marked unread.": "Los mensajes han sido marcados como no leídos.",
+ "Missing permissions to upload the attachment": "Permisos faltantes para subir el archivo adjunto",
"Mute": "Mudo",
"New": "Nuevo",
"New Messages!": "¡Nuevos mensajes!",
"No chats here yet…": "Aún no hay mensajes aquí...",
"No results found": "No se han encontrado resultados",
"Nothing yet...": "Nada aún...",
+ "Ok": "Ok",
"Open emoji picker": "Selector de emoji abierto",
"People matching": "Personas que coinciden",
"Pin": "Alfiler",
"Pinned by": "Fijado por",
+ "Recording format is not supported and cannot be reproduced": "El formato de grabación no es compatible y no se puede reproducir",
"Reply": "Respuesta",
"Reply to Message": "Responder al mensaje",
"Search": "Buscar",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Este mensaje no cumple nuestras directrices de contenido",
"This message was deleted...": "Este mensaje fue eliminado ...",
"Thread": "Hilo",
+ "To start recording, allow the camera access in your browser": "Para comenzar a grabar, permita el acceso a la cámara en su navegador",
+ "To start recording, allow the microphone access in your browser": "Para comenzar a grabar, permita el acceso al micrófono en su navegador",
"Type your message": "Escribe tu mensaje",
"Unmute": "Activar sonido",
"Unpin": "Desprender",
diff --git a/src/i18n/fr.json b/src/i18n/fr.json
index e9484d08d..a32c9756c 100644
--- a/src/i18n/fr.json
+++ b/src/i18n/fr.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Autoriser l'accès à la caméra",
+ "Allow access to microphone": "Autoriser l'accès au microphone",
+ "An error has occurred during recording": "Une erreur s'est produite pendant l'enregistrement",
+ "An error has occurred during the recording processing": "Une erreur s'est produite pendant le traitement de l'enregistrement",
"Attach files": "Pièces jointes",
"Cancel": "Annuler",
+ "Cannot seek in the recording": "Impossible de rechercher dans l'enregistrement",
"Channel Missing": "Canal Manquant",
"Close": "Fermer",
"Close emoji picker": "Fermer le sélecteur d'emojis",
@@ -24,13 +29,16 @@
"Error pinning message": "Erreur d'épinglage du message",
"Error removing message pin": "Erreur lors de la suppression du code PIN du message",
"Error reproducing the recording": "Erreur lors de la reproduction de l'enregistrement",
+ "Error starting recording": "Erreur de démarrage de l'enregistrement",
"Error unmuting a user ...": "Erreur de désactivation de la fonction sourdine pour un utilisateur ...",
+ "Error uploading attachment": "Erreur lors du téléchargement de la pièce jointe",
"Error uploading file": "Erreur lors du téléchargement du fichier",
"Error uploading image": "Erreur lors de l'envoi de l'image",
"Error · Unsent": "Erreur - Non envoyé",
"Error: {{ errorMessage }}": "Erreur : {{ errorMessage }}",
"Failed to jump to the first unread message": "Échec de saut vers le premier message non lu",
"Failed to mark channel as read": "Échec de la marque du canal comme lu",
+ "Failed to play the recording": "Impossible de lire l'enregistrement",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "Le fichier est trop volumineux : {{ size }}, la taille de téléchargement maximale est de {{ limit }}",
"Flag": "Signaler",
"Latest Messages": "Derniers messages",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "Le message a été signalé avec succès",
"Message pinned": "Message épinglé",
"Messages have been marked unread.": "Les messages ont été marqués comme non lus.",
+ "Missing permissions to upload the attachment": "Autorisations manquantes pour télécharger la pièce jointe",
"Mute": "Muet",
"New": "Nouveaux",
"New Messages!": "Nouveaux Messages!",
"No chats here yet…": "Pas encore de messages ici...",
"No results found": "Aucun résultat trouvé",
"Nothing yet...": "Aucun message...",
+ "Ok": "D'accord",
"Open emoji picker": "Ouvrez le sélecteur d'emoji",
"People matching": "Correspondance de personnes",
"Pin": "Épingle",
"Pinned by": "Épinglé par",
+ "Recording format is not supported and cannot be reproduced": "Le format d'enregistrement n'est pas pris en charge et ne peut pas être reproduit",
"Reply": "Réponse",
"Reply to Message": "Répondre au message",
"Search": "Rechercher",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Ce message n'est pas conforme à nos lignes directrices en matière de contenu",
"This message was deleted...": "Ce message a été supprimé...",
"Thread": "Fil de discussion",
+ "To start recording, allow the camera access in your browser": "Pour commencer l'enregistrement, autorisez l'accès à la caméra dans votre navigateur",
+ "To start recording, allow the microphone access in your browser": "Pour commencer l'enregistrement, autorisez l'accès au microphone dans votre navigateur",
"Type your message": "Saisissez votre message",
"Unmute": "Désactiver muet",
"Unpin": "Détacher",
diff --git a/src/i18n/hi.json b/src/i18n/hi.json
index df0d32846..9c8a3b8ee 100644
--- a/src/i18n/hi.json
+++ b/src/i18n/hi.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "कैमरा तक पहुँच दें",
+ "Allow access to microphone": "माइक्रोफ़ोन तक पहुँच दें",
+ "An error has occurred during recording": "रेकॉर्डिंग के दौरान एक त्रुटि आ गई है",
+ "An error has occurred during the recording processing": "रेकॉर्डिंग प्रोसेसिंग के दौरान एक त्रुटि आ गई है",
"Attach files": "फाइल्स अटैच करे",
"Cancel": "रद्द करें",
+ "Cannot seek in the recording": "रेकॉर्डिंग में खोज नहीं की जा सकती",
"Channel Missing": "चैनल उपलब्ध नहीं है",
"Close": "बंद करे",
"Close emoji picker": "इमोजी पिकर बंद करें",
@@ -25,13 +30,16 @@
"Error pinning message": "संदेश को पिन करने में त्रुटि",
"Error removing message pin": "संदेश पिन निकालने में त्रुटि",
"Error reproducing the recording": "रिकॉर्डिंग पुन: उत्पन्न करने में त्रुटि",
+ "Error starting recording": "रेकॉर्डिंग शुरू करने में त्रुटि",
"Error unmuting a user ...": "यूजर को अनम्यूट करने का प्रयास फेल हुआ",
+ "Error uploading attachment": "अटैचमेंट अपलोड करते समय त्रुटि",
"Error uploading file": "फ़ाइल अपलोड करने में त्रुटि",
"Error uploading image": "छवि अपलोड करने में त्रुटि",
"Error · Unsent": "फेल",
"Error: {{ errorMessage }}": "फेल: {{ errorMessage }}",
"Failed to jump to the first unread message": "पहले अपठित संदेश पर जाने में विफल",
"Failed to mark channel as read": "चैनल को पढ़ा हुआ चिह्नित करने में विफल।",
+ "Failed to play the recording": "रेकॉर्डिंग प्ले करने में विफल",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "फ़ाइल बहुत बड़ी है: {{ size }}, अधिकतम अपलोड साइज़ {{ limit }} है",
"Flag": "फ्लैग करे",
"Latest Messages": "नवीनतम संदेश",
@@ -44,16 +52,19 @@
"Message has been successfully flagged": "मैसेज को फ्लैग कर दिया गया है",
"Message pinned": "संदेश पिन किया गया",
"Messages have been marked unread.": "संदेशों को अपठित चिह्नित किया गया है।",
+ "Missing permissions to upload the attachment": "अटैचमेंट अपलोड करने के लिए अनुमतियां गायब",
"Mute": "म्यूट करे",
"New": "नए",
"New Messages!": "नए मैसेज!",
"No chats here yet…": "यहां अभी तक कोई चैट नहीं...",
"No results found": "कोई परिणाम नहीं मिला",
"Nothing yet...": "कोई मैसेज नहीं है",
+ "Ok": "ठीक है",
"Open emoji picker": "इमोजी पिकर खोलिये",
"People matching": "मेल खाते लोग",
"Pin": "पिन",
"Pinned by": "द्वारा पिन किया गया",
+ "Recording format is not supported and cannot be reproduced": "रेकॉर्डिंग फ़ॉर्मेट समर्थित नहीं है और पुनः उत्पन्न नहीं किया जा सकता",
"Reply": "जवाब दे दो",
"Reply to Message": "संदेश का जवाब दें",
"Search": "खोज",
@@ -68,6 +79,8 @@
"This message did not meet our content guidelines": "यह संदेश हमारे सामग्री दिशानिर्देशों के अनुरूप नहीं था",
"This message was deleted...": "मैसेज हटा दिया गया",
"Thread": "रिप्लाई थ्रेड",
+ "To start recording, allow the camera access in your browser": "रिकॉर्डिंग शुरू करने के लिए, अपने ब्राउज़र में कैमरा तक पहुँच दें",
+ "To start recording, allow the microphone access in your browser": "Tरिकॉर्डिंग शुरू करने के लिए, अपने ब्राउज़र में माइक्रोफ़ोन तक पहुँच दें",
"Type your message": "अपना मैसेज लिखे",
"Unmute": "अनम्यूट",
"Unpin": "अनपिन",
diff --git a/src/i18n/it.json b/src/i18n/it.json
index 2ce682348..54c8ce0ca 100644
--- a/src/i18n/it.json
+++ b/src/i18n/it.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Consenti l'accesso alla fotocamera",
+ "Allow access to microphone": "Consenti l'accesso al microfono",
+ "An error has occurred during recording": "Si è verificato un errore durante la registrazione",
+ "An error has occurred during the recording processing": "Si è verificato un errore durante l'elaborazione della registrazione",
"Attach files": "Allega file",
"Cancel": "Annulla",
+ "Cannot seek in the recording": "Impossibile cercare nella registrazione",
"Channel Missing": "Il canale non esiste",
"Close": "Chiudi",
"Close emoji picker": "Chiudi il selettore di emoji",
@@ -24,13 +29,16 @@
"Error pinning message": "Errore durante il blocco del messaggio",
"Error removing message pin": "Errore durante la rimozione del PIN del messaggio",
"Error reproducing the recording": "Errore durante la riproduzione della registrazione",
+ "Error starting recording": "Errore durante l'avvio della registrazione",
"Error unmuting a user ...": "Errore riattivando le notifiche per l'utente ...",
+ "Error uploading attachment": "Errore durante il caricamento dell'allegato",
"Error uploading file": "Errore durante il caricamento del file",
"Error uploading image": "Errore durante il caricamento dell'immagine",
"Error · Unsent": "Errore · Non inviato",
"Error: {{ errorMessage }}": "Errore: {{ errorMessage }}",
"Failed to jump to the first unread message": "Impossibile passare al primo messaggio non letto",
"Failed to mark channel as read": "Impossibile contrassegnare il canale come letto",
+ "Failed to play the recording": "Impossibile riprodurre la registrazione",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "Il file è troppo grande: {{ size }}, la dimensione massima di caricamento è {{ limit }}",
"Flag": "Segnala",
"Latest Messages": "Ultimi messaggi",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "Il messaggio é stato segnalato con successo",
"Message pinned": "Messaggio bloccato",
"Messages have been marked unread.": "I messaggi sono stati contrassegnati come non letti.",
+ "Missing permissions to upload the attachment": "Autorizzazioni mancanti per caricare l'allegato",
"Mute": "Silenzia",
"New": "Nuovo",
"New Messages!": "Nuovo messaggio!",
"No chats here yet…": "Non ci sono ancora messaggi qui...",
"No results found": "Nessun risultato trovato",
"Nothing yet...": "Ancora niente...",
+ "Ok": "Ok",
"Open emoji picker": "Apri il selettore dellle emoji",
"People matching": "Persone che corrispondono",
"Pin": "Pin",
"Pinned by": "Appuntato da",
+ "Recording format is not supported and cannot be reproduced": "Il formato di registrazione non è supportato e non può essere riprodotto",
"Reply": "Rispondere",
"Reply to Message": "Rispondi al messaggio",
"Search": "Ricerca",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Questo messaggio non soddisfa le nostre linee guida sui contenuti",
"This message was deleted...": "Questo messaggio é stato cancellato",
"Thread": "Thread",
+ "To start recording, allow the camera access in your browser": "Per iniziare la registrazione, consenti l'accesso alla fotocamera nel tuo browser",
+ "To start recording, allow the microphone access in your browser": "Per iniziare la registrazione, consenti l'accesso al microfono nel tuo browser",
"Type your message": "Scrivi il tuo messaggio",
"Unmute": "Riattiva le notifiche",
"Unpin": "Sblocca",
diff --git a/src/i18n/ja.json b/src/i18n/ja.json
index 8d0d58ea3..fef297786 100644
--- a/src/i18n/ja.json
+++ b/src/i18n/ja.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "カメラへのアクセスを許可する",
+ "Allow access to microphone": "マイクロフォンへのアクセスを許可する",
+ "An error has occurred during recording": "録音中にエラーが発生しました",
+ "An error has occurred during the recording processing": "録音処理中にエラーが発生しました",
"Attach files": "ファイルを添付する",
"Cancel": "キャンセル",
+ "Cannot seek in the recording": "録音中にシークできません",
"Channel Missing": "チャネルがありません",
"Close": "閉める",
"Close emoji picker": "絵文字ピッカーを閉める",
@@ -24,13 +29,16 @@
"Error pinning message": "メッセージをピンのエラーが発生しました",
"Error removing message pin": "メッセージのピンを削除のエラーが発生しました",
"Error reproducing the recording": "録音の再生中にエラーが発生しました",
+ "Error starting recording": "録音の開始時にエラーが発生しました",
"Error unmuting a user ...": "ユーザーの無音解除のエラーが発生しました...",
+ "Error uploading attachment": "添付ファイルのアップロード中にエラーが発生しました",
"Error uploading file": "ファイルをアップロードのエラーが発生しました",
"Error uploading image": "画像をアップロードのエラーが発生しました",
"Error · Unsent": "エラー・未送信",
"Error: {{ errorMessage }}": "エラー: {{ errorMessage }}",
"Failed to jump to the first unread message": "最初の未読メッセージにジャンプできませんでした",
"Failed to mark channel as read": "チャンネルを既読にすることができませんでした",
+ "Failed to play the recording": "録音の再生に失敗しました",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "ファイルが大きすぎます:{{ size }}、最大アップロードサイズは{{ limit }}です",
"Flag": "フラグ",
"Latest Messages": "最新のメッセージ",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "メッセージに正常にフラグが付けられました",
"Message pinned": "メッセージにピンが付けられました",
"Messages have been marked unread.": "メッセージは未読としてマークされました。",
+ "Missing permissions to upload the attachment": "添付ファイルをアップロードするための許可がありません",
"Mute": "無音",
"New": "新しい",
"New Messages!": "新しいメッセージ!",
"No chats here yet…": "ここにはまだチャットはありません…",
"No results found": "結果が見つかりません",
"Nothing yet...": "まだ何もありません...",
+ "Ok": "OK",
"Open emoji picker": "絵文字ピッカーを開く",
"People matching": "一致する人",
"Pin": "ピン",
"Pinned by": "ピンした方",
+ "Recording format is not supported and cannot be reproduced": "録音形式はサポートされておらず、再生できません",
"Reply": "返事",
"Reply to Message": "メッセージに返信",
"Search": "探す",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "このメッセージはコンテンツガイドラインに適合していません",
"This message was deleted...": "このメッセージは削除されました...",
"Thread": "スレッド",
+ "To start recording, allow the camera access in your browser": "録音を開始するには、ブラウザーでカメラへのアクセスを許可してください",
+ "To start recording, allow the microphone access in your browser": "録音を開始するには、ブラウザーでマイクロフォンへのアクセスを許可してください",
"Type your message": "メッセージを入力してください",
"Unmute": "無音を解除する",
"Unpin": "ピンを解除する",
diff --git a/src/i18n/ko.json b/src/i18n/ko.json
index b32462414..bf25fa2cd 100644
--- a/src/i18n/ko.json
+++ b/src/i18n/ko.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "카메라에 대한 액세스 허용",
+ "Allow access to microphone": "마이크로폰에 대한 액세스 허용",
+ "An error has occurred during recording": "녹음 중 오류가 발생했습니다",
+ "An error has occurred during the recording processing": "녹음 처리 중 오류가 발생했습니다",
"Attach files": "파일 첨부",
"Cancel": "취소",
+ "Cannot seek in the recording": "녹음에서 찾을 수 없습니다",
"Channel Missing": "채널 누락",
"Close": "닫기",
"Close emoji picker": "이모티콘 선택기 닫기",
@@ -24,13 +29,16 @@
"Error pinning message": "메시지를 핀하는 중에 오류가 발생했습니다.",
"Error removing message pin": "메시지 핀을 제거하는 중에 오류가 발생했습니다.",
"Error reproducing the recording": "녹음 재생 중 오류 발생",
+ "Error starting recording": "녹음 시작 중 오류가 발생했습니다",
"Error unmuting a user ...": "사용자 음소거 해제 중 오류 발생...",
+ "Error uploading attachment": "첨부 파일 업로드 중 오류가 발생했습니다",
"Error uploading file": "파일 업로드 오류",
"Error uploading image": "이미지를 업로드하는 동안 오류가 발생했습니다.",
"Error · Unsent": "오류 · 전송되지 않음",
"Error: {{ errorMessage }}": "오류: {{ errorMessage }}",
"Failed to jump to the first unread message": "첫 번째 읽지 않은 메시지로 이동하지 못했습니다",
"Failed to mark channel as read": "채널을 읽음으로 표시하는 데 실패했습니다",
+ "Failed to play the recording": "녹음을 재생하지 못했습니다",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "파일이 너무 큽니다: {{ size }}, 최대 업로드 크기는 {{ limit }}입니다",
"Flag": "플래그",
"Latest Messages": "최신 메시지",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "메시지에 플래그가 지정되었습니다.",
"Message pinned": "메시지 핀했습니다",
"Messages have been marked unread.": "메시지가 읽지 않음으로 표시되었습니다.",
+ "Missing permissions to upload the attachment": "첨부 파일을 업로드하려면 권한이 필요합니다",
"Mute": "무음",
"New": "새로운",
"New Messages!": "새 메시지!",
"No chats here yet…": "아직 채팅이 없습니다...",
"No results found": "검색 결과가 없습니다",
"Nothing yet...": "아직 아무것도...",
+ "Ok": "확인",
"Open emoji picker": "이모티콘 선택기 열기",
"People matching": "일치하는 사람",
"Pin": "핀",
"Pinned by": "핀했던 분:",
+ "Recording format is not supported and cannot be reproduced": "녹음 형식이 지원되지 않으므로 재생할 수 없습니다",
"Reply": "답장",
"Reply to Message": "메시지에 답장",
"Search": "찾다",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "이 메시지는 콘텐츠 가이드라인을 충족하지 않습니다.",
"This message was deleted...": "이 메시지는 삭제되었습니다...",
"Thread": "스레드",
+ "To start recording, allow the camera access in your browser": "브라우저에서 카메라 액세스를 허용하여 녹음을 시작합니다",
+ "To start recording, allow the microphone access in your browser": "브라우저에서 마이크로폰 액세스를 허용하여 녹음을 시작합니다",
"Type your message": "메시지 입력",
"Unmute": "음소거 해제",
"Unpin": "핀 해제",
diff --git a/src/i18n/nl.json b/src/i18n/nl.json
index 7e077813c..45483e46a 100644
--- a/src/i18n/nl.json
+++ b/src/i18n/nl.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Toegang tot camera toestaan",
+ "Allow access to microphone": "Toegang tot microfoon toestaan",
+ "An error has occurred during recording": "Er is een fout opgetreden tijdens het opnemen",
+ "An error has occurred during the recording processing": "Er is een fout opgetreden tijdens de verwerking van de opname",
"Attach files": "Bijlage toevoegen",
"Cancel": "Annuleer",
+ "Cannot seek in the recording": "Kan niet zoeken in de opname",
"Channel Missing": "Kanaal niet gevonden",
"Close": "Sluit",
"Close emoji picker": "Sluit de emoji-kiezer",
@@ -24,13 +29,16 @@
"Error pinning message": "Fout bij vastzetten van bericht",
"Error removing message pin": "Fout bij verwijderen van berichtpin",
"Error reproducing the recording": "Fout bij het afspelen van de opname",
+ "Error starting recording": "Fout bij het starten van de opname",
"Error unmuting a user ...": "Fout bij het unmuten van de gebruiker",
+ "Error uploading attachment": "Fout bij het uploaden van de bijlage",
"Error uploading file": "Fout bij uploaden bestand",
"Error uploading image": "Fout bij uploaden afbeelding",
"Error · Unsent": "Error: · niet verzonden",
"Error: {{ errorMessage }}": "Error: {{ errorMessage }}",
"Failed to jump to the first unread message": "Niet gelukt om naar het eerste ongelezen bericht te springen",
"Failed to mark channel as read": "Kanaal kon niet als gelezen worden gemarkeerd",
+ "Failed to play the recording": "Kan de opname niet afspelen",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "Bestand is te groot: {{ size }}, maximale uploadgrootte is {{ limit }}",
"Flag": "Markeer",
"Latest Messages": "Laatste berichten",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "Bericht is succesvol gemarkeerd",
"Message pinned": "Bericht vastgezet",
"Messages have been marked unread.": "Berichten zijn gemarkeerd als ongelezen.",
+ "Missing permissions to upload the attachment": "Missende toestemmingen om de bijlage te uploaden",
"Mute": "Mute",
"New": "Nieuwe",
"New Messages!": "Nieuwe Berichten!",
"No chats here yet…": "Nog geen chats hier...",
"No results found": "Geen resultaten gevonden",
"Nothing yet...": "Nog niets ...",
+ "Ok": "Oké",
"Open emoji picker": "Open emojipicker",
"People matching": "Mensen die matchen",
"Pin": "Pin",
"Pinned by": "Vastgemaakt door",
+ "Recording format is not supported and cannot be reproduced": "Opnameformaat wordt niet ondersteund en kan niet worden gereproduceerd",
"Reply": "Antwoord",
"Reply to Message": "Antwoord op bericht",
"Search": "Zoeken",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Dit bericht voldeed niet aan onze inhoudsrichtlijnen",
"This message was deleted...": "Dit bericht was verwijderd",
"Thread": "Draadje",
+ "To start recording, allow the camera access in your browser": "Om te beginnen met opnemen, sta toegang tot de camera toe in uw browser",
+ "To start recording, allow the microphone access in your browser": "Om te beginnen met opnemen, sta toegang tot de microfoon toe in uw browser",
"Type your message": "Type je bericht",
"Unmute": "Unmute",
"Unpin": "Losmaken",
diff --git a/src/i18n/pt.json b/src/i18n/pt.json
index 3718c733d..d66f86bf5 100644
--- a/src/i18n/pt.json
+++ b/src/i18n/pt.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Permitir acesso à câmera",
+ "Allow access to microphone": "Permitir acesso ao microfone",
+ "An error has occurred during recording": "Ocorreu um erro durante a gravação",
+ "An error has occurred during the recording processing": "Ocorreu um erro durante o processamento da gravação",
"Attach files": "Anexar arquivos",
"Cancel": "Cancelar",
+ "Cannot seek in the recording": "Não é possível buscar na gravação",
"Channel Missing": "Canal ausente",
"Close": "Fechar",
"Close emoji picker": "Fechar seletor de emoji",
@@ -24,13 +29,16 @@
"Error pinning message": "Erro ao fixar mensagem",
"Error removing message pin": "Erro ao remover o PIN da mensagem",
"Error reproducing the recording": "Erro ao reproduzir a gravação",
+ "Error starting recording": "Erro ao iniciar a gravação",
"Error unmuting a user ...": "Erro ao ativar o som de um usuário...",
+ "Error uploading attachment": "Erro ao carregar o anexo",
"Error uploading file": "Erro ao enviar arquivo",
"Error uploading image": "Erro ao carregar a imagem",
"Error · Unsent": "Erro · Não enviado",
"Error: {{ errorMessage }}": "Erro: {{ errorMessage }}",
"Failed to jump to the first unread message": "Falha ao pular para a primeira mensagem não lida",
"Failed to mark channel as read": "Falha ao marcar o canal como lido",
+ "Failed to play the recording": "Permissões faltando para carregar o anexo",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "O arquivo é muito grande: {{ size }}, o tamanho máximo de upload é {{ limit }}",
"Flag": "Reportar",
"Latest Messages": "Mensagens mais recentes",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "A mensagem foi reportada com sucesso",
"Message pinned": "Mensagem fixada",
"Messages have been marked unread.": "Mensagens foram marcadas como não lidas.",
+ "Missing permissions to upload the attachment": "Missing permissions to upload the attachment",
"Mute": "Mudo",
"New": "Novo",
"New Messages!": "Novas mensagens!",
"No chats here yet…": "Ainda não há conversas aqui...",
"No results found": "Nenhum resultado encontrado",
"Nothing yet...": "Nada ainda...",
+ "Ok": "Ok",
"Open emoji picker": "Abrir seletor de emoji",
"People matching": "Pessoas correspondentes",
"Pin": "Fixar",
"Pinned by": "Fixado por",
+ "Recording format is not supported and cannot be reproduced": "Formato de gravação não é suportado e não pode ser reproduzido",
"Reply": "Responder",
"Reply to Message": "Responder a mensagem",
"Search": "Procurar",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Esta mensagem não corresponde às nossas directrizes de conteúdo",
"This message was deleted...": "Esta mensagem foi excluída...",
"Thread": "Fio",
+ "To start recording, allow the camera access in your browser": "Para começar a gravar, permita o acesso à câmera no seu navegador",
+ "To start recording, allow the microphone access in your browser": "Para começar a gravar, permita o acesso ao microfone no seu navegador",
"Type your message": "Digite sua mensagem",
"Unmute": "Ativar som",
"Unpin": "Liberar",
diff --git a/src/i18n/ru.json b/src/i18n/ru.json
index c000ea885..acc259fd7 100644
--- a/src/i18n/ru.json
+++ b/src/i18n/ru.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Разрешить доступ к камере",
+ "Allow access to microphone": "Разрешить доступ к микрофон",
+ "An error has occurred during recording": "Произошла ошибка во время записи",
+ "An error has occurred during the recording processing": "Произошла ошибка во время обработки записи",
"Attach files": "Прикрепить файлы",
"Cancel": "Отмена",
+ "Cannot seek in the recording": "Невозможно осуществить поиск в записи",
"Channel Missing": "Канал не найден",
"Close": "Закрыть",
"Close emoji picker": "Закрыть окно выбора смайлов",
@@ -24,13 +29,16 @@
"Error pinning message": "Сообщение об ошибке при закреплении",
"Error removing message pin": "Ошибка при удалении булавки сообщения",
"Error reproducing the recording": "Ошибка воспроизведения записи",
+ "Error starting recording": "Ошибка при запуске записи",
"Error unmuting a user ...": "Ошибка включения уведомлений...",
+ "Error uploading attachment": "Ошибка при загрузке вложения",
"Error uploading file": "Ошибка при загрузке файла",
"Error uploading image": "Ошибка загрузки изображения",
"Error · Unsent": "Ошибка · Не отправлено",
"Error: {{ errorMessage }}": "Ошибка: {{ errorMessage }}",
"Failed to jump to the first unread message": "Не удалось перейти к первому непрочитанному сообщению",
"Failed to mark channel as read": "Не удалось пометить канал как прочитанный",
+ "Failed to play the recording": "Не удалось воспроизвести запись",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "Файл слишком большой: {{ size }}, максимальный размер загрузки составляет {{ limit }}",
"Flag": "Пожаловаться",
"Latest Messages": "Последние сообщения",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "Жалоба на сообщение была принята",
"Message pinned": "Сообщение закреплено",
"Messages have been marked unread.": "Сообщения были отмечены как непрочитанные.",
+ "Missing permissions to upload the attachment": "Отсутствуют разрешения для загрузки вложения",
"Mute": "Отключить уведомления",
"New": "Новые",
"New Messages!": "Новые сообщения!",
"No chats here yet…": "Здесь еще нет чатов...",
"No results found": "результаты не найдены",
"Nothing yet...": "Пока ничего нет...",
+ "Ok": "Ok",
"Open emoji picker": "Выбрать emoji",
"People matching": "Соответствующие люди",
"Pin": "Штырь",
"Pinned by": "Закреплено",
+ "Recording format is not supported and cannot be reproduced": "Формат записи не поддерживается и не может быть воспроизведен",
"Reply": "Отвечать",
"Reply to Message": "Ответить на сообщение",
"Search": "Поиск",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Сообщение не соответствует правилам",
"This message was deleted...": "Сообщение было удалено...",
"Thread": "Ветка",
+ "To start recording, allow the camera access in your browser": "Для начала записи разрешите доступ к камере в вашем браузере",
+ "To start recording, allow the microphone access in your browser": "Для начала записи разрешите доступ к микрофону в вашем браузере",
"Type your message": "Ваше сообщение",
"Unmute": "Включить уведомления",
"Unpin": "Открепить",
diff --git a/src/i18n/tr.json b/src/i18n/tr.json
index 2f80861ba..d094dce0f 100644
--- a/src/i18n/tr.json
+++ b/src/i18n/tr.json
@@ -1,6 +1,11 @@
{
+ "Allow access to camera": "Kameraya erişime izin ver",
+ "Allow access to microphone": "Mikrofona erişime izin ver",
+ "An error has occurred during recording": "Kayıt sırasında bir hata oluştu",
+ "An error has occurred during the recording processing": "Kayıt işlemi sırasında bir hata oluştu",
"Attach files": "Dosya ekle",
"Cancel": "İptal",
+ "Cannot seek in the recording": "Kayıtta arama yapılamıyor",
"Channel Missing": "Kanal bulunamıyor",
"Close": "Kapat",
"Close emoji picker": "Emoji seçiciyi kapat",
@@ -24,13 +29,16 @@
"Error pinning message": "Mesaj sabitlenirken hata oluştu",
"Error removing message pin": "Mesaj PIN'i kaldırılırken hata oluştu",
"Error reproducing the recording": "Kaydı yeniden üretme hatası",
+ "Error starting recording": "Kayıt başlatılırken hata oluştu",
"Error unmuting a user ...": "Kullanıcının sesini açarken hata oluştu ...",
+ "Error uploading attachment": "Ek yüklenirken hata oluştu",
"Error uploading file": "Dosya yüklenirken hata oluştu",
"Error uploading image": "Resmi yüklerken hata",
"Error · Unsent": "Hata · Gönderilemedi",
"Error: {{ errorMessage }}": "Hata: {{ errorMessage }}",
"Failed to jump to the first unread message": "İlk okunmamış mesaja atlamada hata oluştu",
"Failed to mark channel as read": "Kanalı okundu olarak işaretleme başarısız oldu",
+ "Failed to play the recording": "Kayıt oynatılamadı",
"File is too large: {{ size }}, maximum upload size is {{ limit }}": "Dosya çok büyük: {{ size }}, maksimum yükleme boyutu {{ limit }}",
"Flag": "Bayrak",
"Latest Messages": "Son Mesajlar",
@@ -43,16 +51,19 @@
"Message has been successfully flagged": "Mesaj başarıyla bayraklandı",
"Message pinned": "Mesaj sabitlendi",
"Messages have been marked unread.": "Mesajlar okunmamış olarak işaretlendi.",
+ "Missing permissions to upload the attachment": "Ek yüklemek için izinler eksik",
"Mute": "Sessiz",
"New": "Yeni",
"New Messages!": "Yeni Mesajlar!",
"No chats here yet…": "Henüz burada sohbet yok...",
"No results found": "Sonuç bulunamadı",
"Nothing yet...": "Şimdilik hiçbir şey...",
+ "Ok": "Tamam",
"Open emoji picker": "Emoji klavyesini aç",
"People matching": "Eşleşen kişiler",
"Pin": "Toplu iğne",
"Pinned by": "Sabitleyen",
+ "Recording format is not supported and cannot be reproduced": "Kayıt formatı desteklenmiyor ve çoğaltılamıyor",
"Reply": "Cevapla",
"Reply to Message": "Mesajı Cevapla",
"Search": "Arama",
@@ -67,6 +78,8 @@
"This message did not meet our content guidelines": "Bu mesaj içerik yönergelerimize uygun değil",
"This message was deleted...": "Bu mesaj silindi",
"Thread": "Konu",
+ "To start recording, allow the camera access in your browser": "Kayıt yapmaya başlamak için tarayıcınızda kameraya erişime izin verin",
+ "To start recording, allow the microphone access in your browser": "Kayıt yapmaya başlamak için tarayıcınızda mikrofona erişime izin verin",
"Type your message": "Mesajınızı yazın",
"Unmute": "Sesini aç",
"Unpin": "Sabitlemeyi kaldır",
diff --git a/src/mock-builders/browser/AnalyserNode.js b/src/mock-builders/browser/AnalyserNode.js
new file mode 100644
index 000000000..3aabfd0bb
--- /dev/null
+++ b/src/mock-builders/browser/AnalyserNode.js
@@ -0,0 +1,4 @@
+export class AnalyserNodeMock {
+ disconnect = jest.fn();
+ getByteFrequencyData = jest.fn();
+}
diff --git a/src/mock-builders/browser/AudioContext.js b/src/mock-builders/browser/AudioContext.js
new file mode 100644
index 000000000..ae1b187ca
--- /dev/null
+++ b/src/mock-builders/browser/AudioContext.js
@@ -0,0 +1,12 @@
+class Connectable {
+ connect = jest.fn();
+ disconnect = jest.fn();
+}
+
+export class AudioContextMock {
+ constructor() {}
+
+ createAnalyser = jest.fn(() => new Connectable());
+ createMediaStreamSource = jest.fn(() => new Connectable());
+ close = jest.fn();
+}
diff --git a/src/mock-builders/browser/EventEmitter.js b/src/mock-builders/browser/EventEmitter.js
new file mode 100644
index 000000000..ab6e94a0e
--- /dev/null
+++ b/src/mock-builders/browser/EventEmitter.js
@@ -0,0 +1,4 @@
+export class EventEmitterMock {
+ addEventListener = jest.fn();
+ removeEventListener = jest.fn();
+}
diff --git a/src/mock-builders/browser/HTMLMediaElement.js b/src/mock-builders/browser/HTMLMediaElement.js
new file mode 100644
index 000000000..feb79d7c0
--- /dev/null
+++ b/src/mock-builders/browser/HTMLMediaElement.js
@@ -0,0 +1 @@
+jest.spyOn(window.HTMLMediaElement.prototype, 'pause').mockImplementation();
diff --git a/src/mock-builders/browser/MediaRecorder.js b/src/mock-builders/browser/MediaRecorder.js
new file mode 100644
index 000000000..95906b3c7
--- /dev/null
+++ b/src/mock-builders/browser/MediaRecorder.js
@@ -0,0 +1,12 @@
+import { EventEmitterMock } from './EventEmitter';
+
+export class MediaRecorderMock extends EventEmitterMock {
+ constructor() {
+ super();
+ }
+
+ start = jest.fn();
+ pause = jest.fn();
+ resume = jest.fn();
+ stop = jest.fn();
+}
diff --git a/src/mock-builders/browser/events/dataavailable.js b/src/mock-builders/browser/events/dataavailable.js
new file mode 100644
index 000000000..85ff1c2a8
--- /dev/null
+++ b/src/mock-builders/browser/events/dataavailable.js
@@ -0,0 +1,23 @@
+export const generateDataavailableEvent = (
+ { dataOverrides, mediaRecorder } = {
+ dataOverrides: {},
+ mediaRecorder: new window.MediaRecorder(),
+ },
+) => ({
+ bubbles: false,
+ cancelable: false,
+ cancelBubble: false,
+ composed: false,
+ currentTarget: mediaRecorder,
+ data: new Blob([0x48], { type: 'audio/webm' }),
+ defaultPrevented: false,
+ eventPhase: 0,
+ isTrusted: true,
+ returnValue: true,
+ srcElement: mediaRecorder,
+ target: mediaRecorder,
+ timecode: 1713214079256.997,
+ timeStamp: 11853.20000000298,
+ type: 'dataavailable',
+ ...dataOverrides,
+});
diff --git a/src/mock-builders/browser/index.js b/src/mock-builders/browser/index.js
new file mode 100644
index 000000000..438c57ac2
--- /dev/null
+++ b/src/mock-builders/browser/index.js
@@ -0,0 +1,4 @@
+export * from './AnalyserNode';
+export * from './AudioContext';
+export * from './EventEmitter';
+export * from './MediaRecorder';
diff --git a/src/mock-builders/generator/upload.js b/src/mock-builders/generator/upload.js
index 2c82106b9..22817bb95 100644
--- a/src/mock-builders/generator/upload.js
+++ b/src/mock-builders/generator/upload.js
@@ -1,21 +1,22 @@
import { nanoid } from 'nanoid';
-export const generateUpload = ({ fileOverrides = {}, objectOverrides = {} } = {}) => {
+export const generateFile = (data = {}) => {
const date = new Date();
-
return {
- file: {
- lastModified: +date,
- lastModifiedDate: date,
- name: nanoid(),
- size: 10,
- type: 'file',
- uri: null,
- ...fileOverrides,
- },
- id: nanoid(),
- state: 'uploading',
- url: 'url',
- ...objectOverrides,
+ lastModified: +date,
+ lastModifiedDate: date,
+ name: nanoid(),
+ size: 10,
+ type: 'file',
+ uri: null,
+ ...data,
};
};
+
+export const generateUpload = ({ fileOverrides = {}, objectOverrides = {} } = {}) => ({
+ file: generateFile(fileOverrides),
+ id: nanoid(),
+ state: 'uploading',
+ url: 'url',
+ ...objectOverrides,
+});
diff --git a/src/utils/mergeDeep.ts b/src/utils/mergeDeep.ts
new file mode 100644
index 000000000..f22aa9a8e
--- /dev/null
+++ b/src/utils/mergeDeep.ts
@@ -0,0 +1,16 @@
+import mergeWith from 'lodash.mergewith';
+import { UR } from 'stream-chat';
+
+const overrideEverything = (_: unknown, source: unknown) => source;
+
+export const mergeDeep = (
+ target: TObject,
+ source: TSource,
+) => mergeWith(target, source, overrideEverything);
+
+const overrideUndefinedOnly = (object: unknown, source: unknown) => object ?? source;
+
+export const mergeDeepUndefined = (
+ target: TObject,
+ source: TSource,
+) => mergeWith(target, source, overrideUndefinedOnly);
diff --git a/yarn.lock b/yarn.lock
index bea1b84bb..9977f9e08 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1095,6 +1095,11 @@
resolved "https://registry.yarnpkg.com/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz#923ca57e173c6b232bbbb07347b1be982f03e783"
integrity sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==
+"@breezystack/lamejs@^1.2.7":
+ version "1.2.7"
+ resolved "https://registry.yarnpkg.com/@breezystack/lamejs/-/lamejs-1.2.7.tgz#c4779f7f0b6b685da675ebbaaff85e52187a51ad"
+ integrity sha512-6wc7ck65ctA75Hq7FYHTtTvGnYs6msgdxiSUICQ+A01nVOWg6rqouZB8IdyteRlfpYYiFovkf67dIeOgWIUzTA==
+
"@colors/colors@1.5.0":
version "1.5.0"
resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9"
@@ -2503,6 +2508,13 @@
dependencies:
"@types/lodash" "*"
+"@types/lodash.mergewith@^4.6.9":
+ version "4.6.9"
+ resolved "https://registry.yarnpkg.com/@types/lodash.mergewith/-/lodash.mergewith-4.6.9.tgz#7093028a36de3cae4495d03b9d92c351cab1f8bf"
+ integrity sha512-fgkoCAOF47K7sxrQ7Mlud2TH023itugZs2bUg8h/KzT+BnZNrR2jAOmaokbLunHNnobXVWOezAeNn/lZqwxkcw==
+ dependencies:
+ "@types/lodash" "*"
+
"@types/lodash.throttle@^4.1.7":
version "4.1.7"
resolved "https://registry.yarnpkg.com/@types/lodash.throttle/-/lodash.throttle-4.1.7.tgz#4ef379eb4f778068022310ef166625f420b6ba58"
@@ -6686,6 +6698,11 @@ findup-sync@^3.0.0:
micromatch "^3.0.4"
resolve-dir "^1.0.1"
+fix-webm-duration@^1.0.5:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/fix-webm-duration/-/fix-webm-duration-1.0.5.tgz#da4558a92196a677302bfc54780b494dd5336a96"
+ integrity sha512-b6oula3OfSknx0aWoLsxvp4DVIYbwsf+UAkr6EDAK3iuMYk/OSNKzmeSI61GXK0MmFTEuzle19BPvTxMIKjkZg==
+
flat-cache@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-2.0.1.tgz#5d296d6f04bda44a4630a301413bdbc2ec085ec0"