From 1dfdeace25b6d61a55714c76795b5b7f09e789f0 Mon Sep 17 00:00:00 2001 From: mitchmindtree Date: Mon, 25 May 2020 13:19:52 +0200 Subject: [PATCH 1/2] Add implementation of supported stream configs for webaudio The `supported_stream_configs` method now returns the range of configurations that are required to be supported for `BaseAudioContext.createBuffer()` as mentioned here: https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer That is, valid stream configurations are now considered to be any configuration that has: - 1 <= channel_count <= 32 and - 8khz <= sample_rate <= 96khz - sample_format == f32 Closes #410. Closes #411. --- src/host/webaudio/mod.rs | 123 +++++++++++++++++++++++---------------- 1 file changed, 74 insertions(+), 49 deletions(-) diff --git a/src/host/webaudio/mod.rs b/src/host/webaudio/mod.rs index 2d27dff..3153a90 100644 --- a/src/host/webaudio/mod.rs +++ b/src/host/webaudio/mod.rs @@ -7,15 +7,14 @@ use self::wasm_bindgen::prelude::*; use self::wasm_bindgen::JsCast; use self::web_sys::{AudioContext, AudioContextOptions}; use crate::{ - BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError, - InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleRate, - StreamConfig, StreamError, SupportedStreamConfig, SupportedStreamConfigRange, - SupportedStreamConfigsError, + BackendSpecificError, BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, + DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, + SampleFormat, SampleRate, StreamConfig, StreamError, SupportedStreamConfig, + SupportedStreamConfigRange, SupportedStreamConfigsError, }; use std::ops::DerefMut; use std::sync::{Arc, Mutex, RwLock}; use traits::{DeviceTrait, HostTrait, StreamTrait}; -use {BackendSpecificError, SampleFormat}; /// Content is false if the iterator is empty. pub struct Devices(bool); @@ -28,11 +27,20 @@ pub struct Host; pub struct Stream { ctx: Arc, on_ended_closures: Vec>>>>, + config: StreamConfig, + buffer_size_frames: usize, } pub type SupportedInputConfigs = ::std::vec::IntoIter; pub type SupportedOutputConfigs = ::std::vec::IntoIter; +const MIN_CHANNELS: u16 = 1; +const MAX_CHANNELS: u16 = 32; +const MIN_SAMPLE_RATE: SampleRate = SampleRate(8_000); +const MAX_SAMPLE_RATE: SampleRate = SampleRate(96_000); +const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100); +const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32; + impl Host { pub fn new() -> Result { Ok(Host) @@ -84,21 +92,15 @@ impl Device { fn supported_output_configs( &self, ) -> Result { - // TODO: right now cpal's API doesn't allow flexibility here - // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if - // this ever becomes more flexible, don't forget to change that - // According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer - // browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz. - // - // UPDATE: We can do this now. Might be best to use `crate::COMMON_SAMPLE_RATES` and - // filter out those that lay outside the range specified above. - Ok(vec![SupportedStreamConfigRange { - channels: 2, - min_sample_rate: SampleRate(44100), - max_sample_rate: SampleRate(44100), - sample_format: ::SampleFormat::F32, - }] - .into_iter()) + let configs: Vec<_> = (MIN_CHANNELS..=MAX_CHANNELS) + .map(|channels| SupportedStreamConfigRange { + channels, + min_sample_rate: MIN_SAMPLE_RATE, + max_sample_rate: MAX_SAMPLE_RATE, + sample_format: SUPPORTED_SAMPLE_FORMAT, + }) + .collect(); + Ok(configs.into_iter()) } #[inline] @@ -108,12 +110,15 @@ impl Device { #[inline] fn default_output_config(&self) -> Result { - // TODO: because it is hard coded, see supported_output_formats. - Ok(SupportedStreamConfig { - channels: 2, - sample_rate: ::SampleRate(44100), - sample_format: ::SampleFormat::F32, - }) + const EXPECT: &str = "expected at least one valid webaudio stream config"; + let mut configs: Vec<_> = self.supported_output_configs().expect(EXPECT).collect(); + configs.sort_by(|a, b| a.cmp_default_heuristics(b)); + let config = configs + .into_iter() + .next() + .expect(EXPECT) + .with_sample_rate(DEFAULT_SAMPLE_RATE); + Ok(config) } } @@ -177,14 +182,16 @@ impl DeviceTrait for Device { D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { - assert_eq!( - sample_format, - SampleFormat::F32, - "WebAudio backend currently only supports `f32` data", - ); + if !valid_config(config, sample_format) { + return Err(BuildStreamError::StreamConfigNotSupported); + } + let n_channels = config.channels as usize; // Use a buffer period of 1/3s for this early proof of concept. - let buffer_length = (config.sample_rate.0 as f64 / 3.0).round() as usize; + // TODO: Change this to the requested buffer size when updating for the buffer size API. + let buffer_size_frames = (config.sample_rate.0 as f64 / 3.0).round() as usize; + let buffer_size_samples = buffer_size_frames * n_channels; + let buffer_time_step_secs = buffer_time_step_secs(buffer_size_frames, config.sample_rate); let data_callback = Arc::new(Mutex::new(Box::new(data_callback))); // Create the WebAudio stream. @@ -206,22 +213,23 @@ impl DeviceTrait for Device { // A cursor keeping track of the current time at which new frames should be scheduled. let time = Arc::new(RwLock::new(0f64)); - // Create a set of closures / callbacks which will continuously fetch and schedule sample playback. - // Starting with two workers, eg a front and back buffer so that audio frames can be fetched in the background. + // Create a set of closures / callbacks which will continuously fetch and schedule sample + // playback. Starting with two workers, eg a front and back buffer so that audio frames + // can be fetched in the background. for _i in 0..2 { let data_callback_handle = data_callback.clone(); let ctx_handle = ctx.clone(); let time_handle = time.clone(); // A set of temporary buffers to be used for intermediate sample transformation steps. - let mut temporary_buffer = vec![0f32; buffer_length * config.channels as usize]; - let mut temporary_channel_buffer = vec![0f32; buffer_length]; + let mut temporary_buffer = vec![0f32; buffer_size_samples]; + let mut temporary_channel_buffer = vec![0f32; buffer_size_frames]; // Create a webaudio buffer which will be reused to avoid allocations. let ctx_buffer = ctx .create_buffer( config.channels as u32, - buffer_length as u32, + buffer_size_frames as u32, config.sample_rate.0 as f32, ) .map_err(|err| -> BuildStreamError { @@ -235,9 +243,6 @@ impl DeviceTrait for Device { Arc::new(RwLock::new(None)); let on_ended_closure_handle = on_ended_closure.clone(); - let n_channels = config.channels as usize; - let sample_rate = config.sample_rate.0 as f64; - on_ended_closure .write() .unwrap() @@ -247,11 +252,13 @@ impl DeviceTrait for Device { let time_at_start_of_buffer = time_handle .read() .expect("Unable to get a read lock on the time cursor"); - // Synchronise first buffer as necessary (eg. keep the time value referenced to the context clock). + // Synchronise first buffer as necessary (eg. keep the time value + // referenced to the context clock). if *time_at_start_of_buffer > 0.001 { *time_at_start_of_buffer } else { - // 25ms of time to fetch the first sample data, increase to avoid initial underruns. + // 25ms of time to fetch the first sample data, increase to avoid + // initial underruns. now + 0.025 } }; @@ -260,7 +267,6 @@ impl DeviceTrait for Device { { let len = temporary_buffer.len(); let data = temporary_buffer.as_mut_ptr() as *mut (); - let sample_format = SampleFormat::F32; let mut data = unsafe { Data::from_parts(data, len, sample_format) }; let mut data_callback = data_callback_handle.lock().unwrap(); let callback = crate::StreamInstant::from_secs_f64(now); @@ -274,7 +280,7 @@ impl DeviceTrait for Device { // We do not reference the audio context buffer directly eg getChannelData. // As wasm-bindgen only gives us a copy, not a direct reference. for channel in 0..n_channels { - for i in 0..buffer_length { + for i in 0..buffer_size_frames { temporary_channel_buffer[i] = temporary_buffer[n_channels * i + channel]; } @@ -283,7 +289,8 @@ impl DeviceTrait for Device { .expect("Unable to write sample data into the audio context buffer"); } - // Create an AudioBufferSourceNode, scheduled it to playback the reused buffer in the future. + // Create an AudioBufferSourceNode, schedule it to playback the reused buffer + // in the future. let source = ctx_handle .create_buffer_source() .expect("Unable to create a webaudio buffer source"); @@ -308,8 +315,7 @@ impl DeviceTrait for Device { .expect("Unable to start the webaudio buffer source"); // Keep track of when the next buffer worth of samples should be played. - *time_handle.write().unwrap() = - time_at_start_of_buffer + (buffer_length as f64 / sample_rate); + *time_handle.write().unwrap() = time_at_start_of_buffer + buffer_time_step_secs; }) as Box)); on_ended_closures.push(on_ended_closure); @@ -318,6 +324,8 @@ impl DeviceTrait for Device { Ok(Stream { ctx, on_ended_closures, + config: config.clone(), + buffer_size_frames, }) } } @@ -327,8 +335,12 @@ impl StreamTrait for Stream { let window = web_sys::window().unwrap(); match self.ctx.resume() { Ok(_) => { - // Begin webaudio playback, initially scheduling the closures to fire on a timeout event. + // Begin webaudio playback, initially scheduling the closures to fire on a timeout + // event. let mut offset_ms = 10; + let time_step_secs = + buffer_time_step_secs(self.buffer_size_frames, self.config.sample_rate); + let time_step_ms = (time_step_secs * 1_000.0) as i32; for on_ended_closure in self.on_ended_closures.iter() { window .set_timeout_with_callback_and_timeout_and_arguments_0( @@ -342,7 +354,7 @@ impl StreamTrait for Stream { offset_ms, ) .unwrap(); - offset_ms += 333 / 2; + offset_ms += time_step_ms; } Ok(()) } @@ -414,3 +426,16 @@ fn is_webaudio_available() -> bool { false } } + +// Whether or not the given stream configuration is valid for building a stream. +fn valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool { + conf.channels <= MAX_CHANNELS + && conf.channels >= MIN_CHANNELS + && conf.sample_rate <= MAX_SAMPLE_RATE + && conf.sample_rate >= MIN_SAMPLE_RATE + && sample_format == SUPPORTED_SAMPLE_FORMAT +} + +fn buffer_time_step_secs(buffer_size_frames: usize, sample_rate: SampleRate) -> f64 { + buffer_size_frames as f64 / sample_rate.0 as f64 +} From f03fd69b654d554ca9a7d4cd4b680354ca058af6 Mon Sep 17 00:00:00 2001 From: mitchmindtree Date: Mon, 25 May 2020 19:22:23 +0200 Subject: [PATCH 2/2] [webaudio] Return Err instead of panicking on input device requests Currently we are yet to implement input stream support for CPAL's webaudio host. Instead of panicking, we should return an error, None or empty iterator in order to let the user write well behaved cross-platform apps and notify the user accordingly rather than crashing. --- src/host/webaudio/mod.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/host/webaudio/mod.rs b/src/host/webaudio/mod.rs index 3153a90..94818a9 100644 --- a/src/host/webaudio/mod.rs +++ b/src/host/webaudio/mod.rs @@ -85,7 +85,8 @@ impl Device { fn supported_input_configs( &self, ) -> Result { - unimplemented!(); + // TODO + Ok(Vec::new().into_iter()) } #[inline] @@ -105,7 +106,8 @@ impl Device { #[inline] fn default_input_config(&self) -> Result { - unimplemented!(); + // TODO + Err(DefaultStreamConfigError::StreamTypeNotSupported) } #[inline] @@ -167,7 +169,8 @@ impl DeviceTrait for Device { D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { - unimplemented!() + // TODO + Err(BuildStreamError::StreamConfigNotSupported) } /// Create an output stream. @@ -406,7 +409,8 @@ impl Iterator for Devices { #[inline] fn default_input_device() -> Option { - unimplemented!(); + // TODO + None } #[inline]