diff --git a/.github/workflows/msrv.yaml b/.github/workflows/msrv.yaml index 6b6de740..d657bc16 100644 --- a/.github/workflows/msrv.yaml +++ b/.github/workflows/msrv.yaml @@ -25,7 +25,7 @@ jobs: - name: Install Rust toolchain # Aligned with `rust-version` in `Cargo.toml` - uses: dtolnay/rust-toolchain@1.71 + uses: dtolnay/rust-toolchain@1.76 - name: Check out repository uses: actions/checkout@v3 diff --git a/Cargo.toml b/Cargo.toml index 43d407e8..8dc83329 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ include = [ "LICENSE", "README.md", ] -rust-version = "1.71" +rust-version = "1.76" [dependencies] almost = "0.2.0" diff --git a/src/context/base.rs b/src/context/base.rs index 9911a668..331c5f90 100644 --- a/src/context/base.rs +++ b/src/context/base.rs @@ -12,6 +12,8 @@ use crate::param::AudioParamDescriptor; use crate::periodic_wave::{PeriodicWave, PeriodicWaveOptions}; use crate::{node, AudioListener}; +use std::future::Future; + /// The interface representing an audio-processing graph built from audio modules linked together, /// each represented by an `AudioNode`. /// @@ -29,11 +31,11 @@ pub trait BaseAudioContext { /// /// In addition to the official spec, the input parameter can be any byte stream (not just an /// array). This means you can decode audio data from a file, network stream, or in memory - /// buffer, and any other [`std::io::Read`] implementer. The data if buffered internally so you + /// buffer, and any other [`std::io::Read`] implementer. The data is buffered internally so you /// should not wrap the source in a `BufReader`. /// /// This function operates synchronously, which may be undesirable on the control thread. The - /// example shows how to avoid this. An async version is currently not implemented. + /// example shows how to avoid this. See also the async method [`Self::decode_audio_data`]. /// /// # Errors /// @@ -82,6 +84,50 @@ pub trait BaseAudioContext { Ok(buffer) } + /// Decode an [`AudioBuffer`] from a given input stream. + /// + /// The current implementation can decode FLAC, Opus, PCM, Vorbis, and Wav. + /// + /// In addition to the official spec, the input parameter can be any byte stream (not just an + /// array). This means you can decode audio data from a file, network stream, or in memory + /// buffer, and any other [`std::io::Read`] implementer. The data is buffered internally so you + /// should not wrap the source in a `BufReader`. + /// + /// Warning, the current implementation still uses blocking IO so it's best to use Tokio's + /// `spawn_blocking` to run the decoding on a thread dedicated to blocking operations. See also + /// the async method [`Self::decode_audio_data_sync`]. + /// + /// # Errors + /// + /// This method returns an Error in various cases (IO, mime sniffing, decoding). + // Use of `async fn` in public traits is discouraged as auto trait bounds cannot be specified, + // hence we use `-> impl Future + ..` instead. + fn decode_audio_data( + &self, + input: R, + ) -> impl Future>> + + Send + + 'static { + let sample_rate = self.sample_rate(); + async move { + // Set up a media decoder, consume the stream in full and construct a single buffer out of it + let mut buffer = MediaDecoder::try_new(input)? + .collect::, _>>()? + .into_iter() + .reduce(|mut accum, item| { + accum.extend(&item); + accum + }) + // if there are no samples decoded, return an empty buffer + .unwrap_or_else(|| AudioBuffer::from(vec![vec![]], sample_rate)); + + // resample to desired rate (no-op if already matching) + buffer.resample(sample_rate); + + Ok(buffer) + } + } + /// Create an new "in-memory" `AudioBuffer` with the given number of channels, /// length (i.e. number of samples per channel) and sample rate. /// @@ -339,3 +385,88 @@ pub trait BaseAudioContext { } } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::context::OfflineAudioContext; + + use float_eq::assert_float_eq; + + fn require_send_sync_static(_: T) {} + + #[test] + fn test_decode_audio_data_sync() { + let context = OfflineAudioContext::new(1, 1, 44100.); + let file = std::fs::File::open("samples/sample.wav").unwrap(); + let audio_buffer = context.decode_audio_data_sync(file).unwrap(); + + assert_eq!(audio_buffer.sample_rate(), 44100.); + assert_eq!(audio_buffer.length(), 142_187); + assert_eq!(audio_buffer.number_of_channels(), 2); + assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001); + + let left_start = &audio_buffer.get_channel_data(0)[0..100]; + let right_start = &audio_buffer.get_channel_data(1)[0..100]; + // assert distinct two channel data + assert!(left_start != right_start); + } + + #[test] + fn test_decode_audio_data_future_send_static() { + let context = OfflineAudioContext::new(1, 1, 44100.); + let file = std::fs::File::open("samples/sample.wav").unwrap(); + let future = context.decode_audio_data(file); + require_send_sync_static(future); + } + + #[test] + fn test_decode_audio_data_async() { + use futures::executor; + let context = OfflineAudioContext::new(1, 1, 44100.); + let file = std::fs::File::open("samples/sample.wav").unwrap(); + let future = context.decode_audio_data(file); + let audio_buffer = executor::block_on(future).unwrap(); + + assert_eq!(audio_buffer.sample_rate(), 44100.); + assert_eq!(audio_buffer.length(), 142_187); + assert_eq!(audio_buffer.number_of_channels(), 2); + assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001); + + let left_start = &audio_buffer.get_channel_data(0)[0..100]; + let right_start = &audio_buffer.get_channel_data(1)[0..100]; + // assert distinct two channel data + assert!(left_start != right_start); + } + + // #[test] + // disabled: symphonia cannot handle empty WAV-files + #[allow(dead_code)] + fn test_decode_audio_data_empty() { + let context = OfflineAudioContext::new(1, 1, 44100.); + let file = std::fs::File::open("samples/empty_2c.wav").unwrap(); + let audio_buffer = context.decode_audio_data_sync(file).unwrap(); + assert_eq!(audio_buffer.length(), 0); + } + + #[test] + fn test_decode_audio_data_decoding_error() { + let context = OfflineAudioContext::new(1, 1, 44100.); + let file = std::fs::File::open("samples/corrupt.wav").unwrap(); + assert!(context.decode_audio_data_sync(file).is_err()); + } + + #[test] + fn test_create_buffer() { + let number_of_channels = 3; + let length = 2000; + let sample_rate = 96_000.; + + let context = OfflineAudioContext::new(1, 1, 44100.); + let buffer = context.create_buffer(number_of_channels, length, sample_rate); + + assert_eq!(buffer.number_of_channels(), 3); + assert_eq!(buffer.length(), 2000); + assert_float_eq!(buffer.sample_rate(), 96000., abs_all <= 0.); + } +} diff --git a/src/context/mod.rs b/src/context/mod.rs index 621b632b..4bba9c69 100644 --- a/src/context/mod.rs +++ b/src/context/mod.rs @@ -146,8 +146,6 @@ mod tests { use super::*; use crate::node::AudioNode; - use float_eq::assert_float_eq; - fn require_send_sync_static(_: T) {} #[test] @@ -156,7 +154,7 @@ mod tests { let registration = context.mock_registration(); // we want to be able to ship AudioNodes to another thread, so the Registration should be - // Send Sync and 'static + // Send, Sync and 'static require_send_sync_static(registration); } @@ -167,62 +165,17 @@ mod tests { } #[test] - fn test_sample_rate_length() { - let context = OfflineAudioContext::new(1, 48000, 96000.); - assert_float_eq!(context.sample_rate(), 96000., abs_all <= 0.); - assert_eq!(context.length(), 48000); - } - - #[test] - fn test_decode_audio_data() { - let context = OfflineAudioContext::new(1, 1, 44100.); - let file = std::fs::File::open("samples/sample.wav").unwrap(); - let audio_buffer = context.decode_audio_data_sync(file).unwrap(); - - assert_eq!(audio_buffer.sample_rate(), 44100.); - assert_eq!(audio_buffer.length(), 142_187); - assert_eq!(audio_buffer.number_of_channels(), 2); - assert_float_eq!(audio_buffer.duration(), 3.224, abs_all <= 0.001); - - let left_start = &audio_buffer.get_channel_data(0)[0..100]; - let right_start = &audio_buffer.get_channel_data(1)[0..100]; - // assert distinct two channel data - assert!(left_start != right_start); - } - - // #[test] - // disabled: symphonia cannot handle empty WAV-files - #[allow(dead_code)] - fn test_decode_audio_data_empty() { - let context = OfflineAudioContext::new(1, 1, 44100.); - let file = std::fs::File::open("samples/empty_2c.wav").unwrap(); - let audio_buffer = context.decode_audio_data_sync(file).unwrap(); - assert_eq!(audio_buffer.length(), 0); - } - - #[test] - fn test_decode_audio_data_decoding_error() { - let context = OfflineAudioContext::new(1, 1, 44100.); - let file = std::fs::File::open("samples/corrupt.wav").unwrap(); - assert!(context.decode_audio_data_sync(file).is_err()); - } - - #[test] - fn test_create_buffer() { - let number_of_channels = 3; - let length = 2000; - let sample_rate = 96_000.; - - let context = OfflineAudioContext::new(1, 1, 44100.); - let buffer = context.create_buffer(number_of_channels, length, sample_rate); - - assert_eq!(buffer.number_of_channels(), 3); - assert_eq!(buffer.length(), 2000); - assert_float_eq!(buffer.sample_rate(), 96000., abs_all <= 0.); + fn test_online_audio_context_send_sync() { + let options = AudioContextOptions { + sink_id: "none".into(), + ..AudioContextOptions::default() + }; + let context = AudioContext::new(options); + require_send_sync_static(context); } #[test] - fn test_registration() { + fn test_context_equals() { let context = OfflineAudioContext::new(1, 48000, 96000.); let dest = context.destination(); assert!(dest.context() == context.base()); diff --git a/src/context/offline.rs b/src/context/offline.rs index 6661fe71..9925fe81 100644 --- a/src/context/offline.rs +++ b/src/context/offline.rs @@ -433,6 +433,13 @@ mod tests { use crate::node::AudioNode; use crate::node::AudioScheduledSourceNode; + #[test] + fn test_sample_rate_length() { + let context = OfflineAudioContext::new(1, 48000, 96000.); + assert_float_eq!(context.sample_rate(), 96000., abs_all <= 0.); + assert_eq!(context.length(), 48000); + } + #[test] fn render_empty_graph() { let mut context = OfflineAudioContext::new(2, 555, 44_100.);