Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement async method decode_audio_data (however still blocking IO) #500

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
135 changes: 133 additions & 2 deletions src/context/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ use crate::param::AudioParamDescriptor;
use crate::periodic_wave::{PeriodicWave, PeriodicWaveOptions};
use crate::{node, AudioListener};

use std::future::Future;

/// The interface representing an audio-processing graph built from audio modules linked together,
/// each represented by an `AudioNode`.
///
Expand All @@ -29,11 +31,11 @@ pub trait BaseAudioContext {
///
/// In addition to the official spec, the input parameter can be any byte stream (not just an
/// array). This means you can decode audio data from a file, network stream, or in memory
/// buffer, and any other [`std::io::Read`] implementer. The data if buffered internally so you
/// buffer, and any other [`std::io::Read`] implementer. The data is buffered internally so you
/// should not wrap the source in a `BufReader`.
///
/// This function operates synchronously, which may be undesirable on the control thread. The
/// example shows how to avoid this. An async version is currently not implemented.
/// example shows how to avoid this. See also the async method [`Self::decode_audio_data`].
///
/// # Errors
///
Expand Down Expand Up @@ -82,6 +84,50 @@ pub trait BaseAudioContext {
Ok(buffer)
}

/// Decode an [`AudioBuffer`] from a given input stream.
///
/// The current implementation can decode FLAC, Opus, PCM, Vorbis, and Wav.
///
/// In addition to the official spec, the input parameter can be any byte stream (not just an
/// array). This means you can decode audio data from a file, network stream, or in memory
/// buffer, and any other [`std::io::Read`] implementer. The data is buffered internally so you
/// should not wrap the source in a `BufReader`.
///
/// Warning, the current implementation still uses blocking IO so it's best to use Tokio's
/// `spawn_blocking` to run the decoding on a thread dedicated to blocking operations. See also
/// the async method [`Self::decode_audio_data_sync`].
///
/// # Errors
///
/// This method returns an Error in various cases (IO, mime sniffing, decoding).
// Use of `async fn` in public traits is discouraged as auto trait bounds cannot be specified,
// hence we use `-> impl Future + ..` instead.
fn decode_audio_data<R: std::io::Read + Send + Sync + 'static>(
&self,
input: R,
) -> impl Future<Output = Result<AudioBuffer, Box<dyn std::error::Error + Send + Sync>>>
+ Send
+ 'static {
let sample_rate = self.sample_rate();
async move {
// Set up a media decoder, consume the stream in full and construct a single buffer out of it
let mut buffer = MediaDecoder::try_new(input)?
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.reduce(|mut accum, item| {
accum.extend(&item);
accum
})
// if there are no samples decoded, return an empty buffer
.unwrap_or_else(|| AudioBuffer::from(vec![vec![]], sample_rate));

// resample to desired rate (no-op if already matching)
buffer.resample(sample_rate);

Ok(buffer)
}
}

/// Create an new "in-memory" `AudioBuffer` with the given number of channels,
/// length (i.e. number of samples per channel) and sample rate.
///
Expand Down Expand Up @@ -339,3 +385,88 @@ pub trait BaseAudioContext {
}
}
}

#[cfg(test)]
mod tests {
use super::*;
use crate::context::OfflineAudioContext;

use float_eq::assert_float_eq;

fn require_send_sync_static<T: Send + Sync + 'static>(_: T) {}

#[test]
fn test_decode_audio_data_sync() {
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/sample.wav").unwrap();
let audio_buffer = context.decode_audio_data_sync(file).unwrap();

assert_eq!(audio_buffer.sample_rate(), 44100.);
assert_eq!(audio_buffer.length(), 142_187);
assert_eq!(audio_buffer.number_of_channels(), 2);
assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001);

let left_start = &audio_buffer.get_channel_data(0)[0..100];
let right_start = &audio_buffer.get_channel_data(1)[0..100];
// assert distinct two channel data
assert!(left_start != right_start);
}

#[test]
fn test_decode_audio_data_future_send_static() {
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/sample.wav").unwrap();
let future = context.decode_audio_data(file);
require_send_sync_static(future);
}

#[test]
fn test_decode_audio_data_async() {
use futures::executor;
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/sample.wav").unwrap();
let future = context.decode_audio_data(file);
let audio_buffer = executor::block_on(future).unwrap();

assert_eq!(audio_buffer.sample_rate(), 44100.);
assert_eq!(audio_buffer.length(), 142_187);
assert_eq!(audio_buffer.number_of_channels(), 2);
assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001);

let left_start = &audio_buffer.get_channel_data(0)[0..100];
let right_start = &audio_buffer.get_channel_data(1)[0..100];
// assert distinct two channel data
assert!(left_start != right_start);
}

// #[test]
// disabled: symphonia cannot handle empty WAV-files
#[allow(dead_code)]
fn test_decode_audio_data_empty() {
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/empty_2c.wav").unwrap();
let audio_buffer = context.decode_audio_data_sync(file).unwrap();
assert_eq!(audio_buffer.length(), 0);
}

#[test]
fn test_decode_audio_data_decoding_error() {
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/corrupt.wav").unwrap();
assert!(context.decode_audio_data_sync(file).is_err());
}

#[test]
fn test_create_buffer() {
let number_of_channels = 3;
let length = 2000;
let sample_rate = 96_000.;

let context = OfflineAudioContext::new(1, 1, 44100.);
let buffer = context.create_buffer(number_of_channels, length, sample_rate);

assert_eq!(buffer.number_of_channels(), 3);
assert_eq!(buffer.length(), 2000);
assert_float_eq!(buffer.sample_rate(), 96000., abs_all <= 0.);
}
}
65 changes: 9 additions & 56 deletions src/context/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,6 @@ mod tests {
use super::*;
use crate::node::AudioNode;

use float_eq::assert_float_eq;

fn require_send_sync_static<T: Send + Sync + 'static>(_: T) {}

#[test]
Expand All @@ -156,7 +154,7 @@ mod tests {
let registration = context.mock_registration();

// we want to be able to ship AudioNodes to another thread, so the Registration should be
// Send Sync and 'static
// Send, Sync and 'static
require_send_sync_static(registration);
}

Expand All @@ -167,62 +165,17 @@ mod tests {
}

#[test]
fn test_sample_rate_length() {
let context = OfflineAudioContext::new(1, 48000, 96000.);
assert_float_eq!(context.sample_rate(), 96000., abs_all <= 0.);
assert_eq!(context.length(), 48000);
}

#[test]
fn test_decode_audio_data() {
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/sample.wav").unwrap();
let audio_buffer = context.decode_audio_data_sync(file).unwrap();

assert_eq!(audio_buffer.sample_rate(), 44100.);
assert_eq!(audio_buffer.length(), 142_187);
assert_eq!(audio_buffer.number_of_channels(), 2);
assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001);

let left_start = &audio_buffer.get_channel_data(0)[0..100];
let right_start = &audio_buffer.get_channel_data(1)[0..100];
// assert distinct two channel data
assert!(left_start != right_start);
}

// #[test]
// disabled: symphonia cannot handle empty WAV-files
#[allow(dead_code)]
fn test_decode_audio_data_empty() {
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/empty_2c.wav").unwrap();
let audio_buffer = context.decode_audio_data_sync(file).unwrap();
assert_eq!(audio_buffer.length(), 0);
}

#[test]
fn test_decode_audio_data_decoding_error() {
let context = OfflineAudioContext::new(1, 1, 44100.);
let file = std::fs::File::open("samples/corrupt.wav").unwrap();
assert!(context.decode_audio_data_sync(file).is_err());
}

#[test]
fn test_create_buffer() {
let number_of_channels = 3;
let length = 2000;
let sample_rate = 96_000.;

let context = OfflineAudioContext::new(1, 1, 44100.);
let buffer = context.create_buffer(number_of_channels, length, sample_rate);

assert_eq!(buffer.number_of_channels(), 3);
assert_eq!(buffer.length(), 2000);
assert_float_eq!(buffer.sample_rate(), 96000., abs_all <= 0.);
fn test_online_audio_context_send_sync() {
let options = AudioContextOptions {
sink_id: "none".into(),
..AudioContextOptions::default()
};
let context = AudioContext::new(options);
require_send_sync_static(context);
}

#[test]
fn test_registration() {
fn test_context_equals() {
let context = OfflineAudioContext::new(1, 48000, 96000.);
let dest = context.destination();
assert!(dest.context() == context.base());
Expand Down
7 changes: 7 additions & 0 deletions src/context/offline.rs
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,13 @@ mod tests {
use crate::node::AudioNode;
use crate::node::AudioScheduledSourceNode;

#[test]
fn test_sample_rate_length() {
let context = OfflineAudioContext::new(1, 48000, 96000.);
assert_float_eq!(context.sample_rate(), 96000., abs_all <= 0.);
assert_eq!(context.length(), 48000);
}

#[test]
fn render_empty_graph() {
let mut context = OfflineAudioContext::new(2, 555, 44_100.);
Expand Down
Loading