-
Notifications
You must be signed in to change notification settings - Fork 305
/
AudioRecordingInProgress.tsx
114 lines (97 loc) · 3.48 KB
/
AudioRecordingInProgress.tsx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import React from 'react';
import { StyleSheet, Text, View } from 'react-native';
import dayjs from 'dayjs';
import {
MessageInputContextValue,
useMessageInputContext,
} from '../../../../contexts/messageInputContext/MessageInputContext';
import { useTheme } from '../../../../contexts/themeContext/ThemeContext';
import type { DefaultStreamChatGenerics } from '../../../../types/types';
type AudioRecordingInProgressPropsWithContext<
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics,
> = Pick<MessageInputContextValue<StreamChatGenerics>, 'AudioRecordingWaveform'> & {
/**
* The waveform data to be presented to show the audio levels.
*/
waveformData: number[];
/**
* Maximum number of waveform lines that should be rendered in the UI.
*/
maxDataPointsDrawn?: number;
/**
* The duration of the voice recording.
*/
recordingDuration?: number;
};
const AudioRecordingInProgressWithContext = <
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics,
>(
props: AudioRecordingInProgressPropsWithContext<StreamChatGenerics>,
) => {
const {
AudioRecordingWaveform,
maxDataPointsDrawn = 80,
recordingDuration,
waveformData,
} = props;
const {
theme: {
colors: { grey_dark },
messageInput: {
audioRecordingInProgress: { container, durationText },
},
},
} = useTheme();
return (
<View style={[styles.container, container]}>
{/* `durationMillis` is for Expo apps, `currentPosition` is for Native CLI apps. */}
<Text style={[styles.durationText, { color: grey_dark }, durationText]}>
{recordingDuration ? dayjs.duration(recordingDuration).format('mm:ss') : null}
</Text>
<AudioRecordingWaveform maxDataPointsDrawn={maxDataPointsDrawn} waveformData={waveformData} />
</View>
);
};
const areEqual = <StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics>(
prevProps: AudioRecordingInProgressPropsWithContext<StreamChatGenerics>,
nextProps: AudioRecordingInProgressPropsWithContext<StreamChatGenerics>,
) => {
const { recordingDuration: prevRecordingDuration } = prevProps;
const { recordingDuration: nextRecordingDuration } = nextProps;
const recordingDurationEqual = prevRecordingDuration === nextRecordingDuration;
if (!recordingDurationEqual) return false;
return true;
};
const MemoizedAudioRecordingInProgress = React.memo(
AudioRecordingInProgressWithContext,
areEqual,
) as typeof AudioRecordingInProgressWithContext;
export type AudioRecordingInProgressProps<
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics,
> = Partial<AudioRecordingInProgressPropsWithContext<StreamChatGenerics>> & {
waveformData: number[];
};
/**
* Component displayed when the audio is in the recording state.
*/
export const AudioRecordingInProgress = <
StreamChatGenerics extends DefaultStreamChatGenerics = DefaultStreamChatGenerics,
>(
props: AudioRecordingInProgressProps<StreamChatGenerics>,
) => {
const { AudioRecordingWaveform } = useMessageInputContext<StreamChatGenerics>();
return <MemoizedAudioRecordingInProgress {...{ AudioRecordingWaveform }} {...props} />;
};
const styles = StyleSheet.create({
container: {
alignItems: 'center',
flexDirection: 'row',
justifyContent: 'space-between',
paddingBottom: 8,
paddingTop: 4,
},
durationText: {
fontSize: 16,
},
});
AudioRecordingInProgress.displayName = 'AudioRecordingInProgress{messageInput}';