diff --git a/src/host/alsa/mod.rs b/src/host/alsa/mod.rs index 90cc34a..e4ef012 100644 --- a/src/host/alsa/mod.rs +++ b/src/host/alsa/mod.rs @@ -346,7 +346,6 @@ impl Device { let buffer_size_range = SupportedBufferSize::Range { min: min_buffer_size as u32, max: max_buffer_size as u32, - requires_power_of_two: false, }; let mut output = Vec::with_capacity( diff --git a/src/host/asio/device.rs b/src/host/asio/device.rs index 05aa723..1a1a1aa 100644 --- a/src/host/asio/device.rs +++ b/src/host/asio/device.rs @@ -135,7 +135,6 @@ impl Device { let buffer_size = SupportedBufferSize::Range { min: min as u32, max: max as u32, - requires_power_of_two: false, }; // Map th ASIO sample type to a CPAL sample type let data_type = self.driver.input_data_type().map_err(default_config_err)?; @@ -157,7 +156,6 @@ impl Device { let buffer_size = SupportedBufferSize::Range { min: min as u32, max: max as u32, - requires_power_of_two: false, }; let data_type = self.driver.output_data_type().map_err(default_config_err)?; let sample_format = convert_data_type(&data_type) diff --git a/src/host/coreaudio/mod.rs b/src/host/coreaudio/mod.rs index 640370b..b674b40 100644 --- a/src/host/coreaudio/mod.rs +++ b/src/host/coreaudio/mod.rs @@ -925,6 +925,5 @@ fn get_io_buffer_frame_size_range( Ok(SupportedBufferSize::Range { min: buffer_size_range.mMinimum as u32, max: buffer_size_range.mMaximum as u32, - requires_power_of_two: false, }) } diff --git a/src/host/emscripten/mod.rs b/src/host/emscripten/mod.rs index de5ac0a..d9e9de8 100644 --- a/src/host/emscripten/mod.rs +++ b/src/host/emscripten/mod.rs @@ -8,10 +8,10 @@ use stdweb::web::TypedArray; use stdweb::Reference; use crate::{ - BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError, + BufferSize, BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleFormat, - StreamConfig, StreamError, SupportedStreamConfig, SupportedStreamConfigRange, - SupportedStreamConfigsError, + SampleRate, StreamConfig, StreamError, SupportedBufferSize, SupportedStreamConfig, + SupportedStreamConfigRange, SupportedStreamConfigsError, }; use traits::{DeviceTrait, HostTrait, StreamTrait}; @@ -41,6 +41,16 @@ pub struct StreamId(usize); pub type SupportedInputConfigs = ::std::vec::IntoIter; pub type SupportedOutputConfigs = ::std::vec::IntoIter; +const MIN_CHANNELS: u16 = 1; +const MAX_CHANNELS: u16 = 32; +const MIN_SAMPLE_RATE: SampleRate = SampleRate(8_000); +const MAX_SAMPLE_RATE: SampleRate = SampleRate(96_000); +const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100); +const MIN_BUFFER_SIZE: u32 = 1; +const MAX_BUFFER_SIZE: u32 = std::u32::MAX; +const DEFAULT_BUFFER_SIZE: usize = 2048; +const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32; + impl Host { pub fn new() -> Result { stdweb::initialize(); @@ -71,21 +81,20 @@ impl Device { fn supported_output_configs( &self, ) -> Result { - // TODO: right now cpal's API doesn't allow flexibility here - // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if - // this ever becomes more flexible, don't forget to change that - // According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer - // browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz. - // - // UPDATE: We can do this now. Might be best to use `crate::COMMON_SAMPLE_RATES` and - // filter out those that lay outside the range specified above. - Ok(vec![SupportedStreamConfigRange { - channels: 2, - min_sample_rate: ::SampleRate(44100), - max_sample_rate: ::SampleRate(44100), - sample_format: ::SampleFormat::F32, - }] - .into_iter()) + let buffer_size = SupportedBufferSize::Range { + min: MIN_BUFFER_SIZE, + max: MAX_BUFFER_SIZE, + }; + let configs: Vec<_> = (MIN_CHANNELS..=MAX_CHANNELS) + .map(|channels| SupportedStreamConfigRange { + channels, + min_sample_rate: MIN_SAMPLE_RATE, + max_sample_rate: MAX_SAMPLE_RATE, + buffer_size: buffer_size.clone(), + sample_format: SUPPORTED_SAMPLE_FORMAT, + }) + .collect(); + Ok(configs.into_iter()) } fn default_input_config(&self) -> Result { @@ -93,12 +102,15 @@ impl Device { } fn default_output_config(&self) -> Result { - // TODO: because it is hard coded, see supported_output_configs. - Ok(SupportedStreamConfig { - channels: 2, - sample_rate: ::SampleRate(44100), - sample_format: ::SampleFormat::F32, - }) + const EXPECT: &str = "expected at least one valid webaudio stream config"; + let mut configs: Vec<_> = self.supported_output_configs().expect(EXPECT).collect(); + configs.sort_by(|a, b| a.cmp_default_heuristics(b)); + let config = configs + .into_iter() + .next() + .expect(EXPECT) + .with_sample_rate(DEFAULT_SAMPLE_RATE); + Ok(config) } } @@ -169,7 +181,7 @@ impl DeviceTrait for Device { fn build_output_stream_raw( &self, - _config: &StreamConfig, + config: &StreamConfig, sample_format: SampleFormat, data_callback: D, error_callback: E, @@ -178,11 +190,20 @@ impl DeviceTrait for Device { D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { - assert_eq!( - sample_format, - SampleFormat::F32, - "emscripten backend currently only supports `f32` data", - ); + if !valid_config(config, sample_format) { + return Err(BuildStreamError::StreamConfigNotSupported); + } + + let buffer_size_frames = match config.buffer_size { + BufferSize::Fixed(v) => { + if v == 0 { + return Err(BuildStreamError::StreamConfigNotSupported); + } else { + v as usize + } + } + BufferSize::Default => DEFAULT_BUFFER_SIZE, + }; // Create the stream. let audio_ctxt_ref = js!(return new AudioContext()).into_reference().unwrap(); @@ -199,7 +220,14 @@ impl DeviceTrait for Device { // See also: The call to `set_timeout` at the end of the `audio_callback_fn` which creates // the loop. set_timeout( - || audio_callback_fn::(user_data_ptr as *mut c_void), + || { + audio_callback_fn::( + user_data_ptr as *mut c_void, + config, + sample_format, + buffer_size_frames, + ) + }, 10, ); @@ -223,12 +251,18 @@ impl StreamTrait for Stream { // The first argument of the callback function (a `void*`) is a casted pointer to `self` // and to the `callback` parameter that was passed to `run`. -fn audio_callback_fn(user_data_ptr: *mut c_void) -where +fn audio_callback_fn( + user_data_ptr: *mut c_void, + config: &StreamConfig, + sample_format: SampleFormat, + buffer_size_frames: usize, +) where D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { - const SAMPLE_RATE: usize = 44100; + let num_channels = config.channels as usize; + let sample_rate = config.sample_rate.0; + let buffer_size_samples = buffer_size_frames * num_channels; unsafe { let user_data_ptr2 = user_data_ptr as *mut (&Stream, D, E); @@ -237,12 +271,11 @@ where let audio_ctxt = &stream.audio_ctxt_ref; // TODO: We should be re-using a buffer. - let mut temporary_buffer = vec![0.0; SAMPLE_RATE * 2 / 3]; + let mut temporary_buffer = vec![0f32; buffer_size_samples]; { let len = temporary_buffer.len(); let data = temporary_buffer.as_mut_ptr() as *mut (); - let sample_format = SampleFormat::F32; let mut data = Data::from_parts(data, len, sample_format); let now_secs: f64 = js!(@{audio_ctxt}.getOutputTimestamp().currentTime) @@ -253,7 +286,7 @@ where // we estimate based on buffer size instead. Probably should use this, but it's only // supported by firefox (2020-04-28). // let latency_secs: f64 = js!(@{audio_ctxt}.outputLatency).try_into().unwrap(); - let buffer_duration = frames_to_duration(len, SAMPLE_RATE); + let buffer_duration = frames_to_duration(len, sample_rate as usize); let playback = callback .add(buffer_duration) .expect("`playback` occurs beyond representation supported by `StreamInstant`"); @@ -273,19 +306,19 @@ where typed_array }; - let num_channels = 2u32; // TODO: correct value debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0); js!( var src_buffer = new Float32Array(@{typed_array}.buffer); var context = @{audio_ctxt}; - var buf_len = @{temporary_buffer.len() as u32}; - var num_channels = @{num_channels}; + var buffer_size_frames = @{buffer_size_frames as u32}; + var num_channels = @{num_channels as u32}; + var sample_rate = sample_rate; - var buffer = context.createBuffer(num_channels, buf_len / num_channels, 44100); + var buffer = context.createBuffer(num_channels, buffer_size_frames, sample_rate); for (var channel = 0; channel < num_channels; ++channel) { var buffer_content = buffer.getChannelData(channel); - for (var i = 0; i < buf_len / num_channels; ++i) { + for (var i = 0; i < buffer_size_frames; ++i) { buffer_content[i] = src_buffer[i * num_channels + channel]; } } @@ -299,7 +332,10 @@ where // TODO: handle latency better ; right now we just use setInterval with the amount of sound // data that is in each buffer ; this is obviously bad, and also the schedule is too tight // and there may be underflows - set_timeout(|| audio_callback_fn::(user_data_ptr), 330); + set_timeout( + || audio_callback_fn::(user_data_ptr, config, sample_format, buffer_size_frames), + 330, + ); } } @@ -348,6 +384,15 @@ fn is_webaudio_available() -> bool { .unwrap() } +// Whether or not the given stream configuration is valid for building a stream. +fn valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool { + conf.channels <= MAX_CHANNELS + && conf.channels >= MIN_CHANNELS + && conf.sample_rate <= MAX_SAMPLE_RATE + && conf.sample_rate >= MIN_SAMPLE_RATE + && sample_format == SUPPORTED_SAMPLE_FORMAT +} + // Convert the given duration in frames at the given sample rate to a `std::time::Duration`. fn frames_to_duration(frames: usize, rate: usize) -> std::time::Duration { let secsf = frames as f64 / rate as f64; diff --git a/src/host/wasapi/device.rs b/src/host/wasapi/device.rs index 8a176b8..6173b82 100644 --- a/src/host/wasapi/device.rs +++ b/src/host/wasapi/device.rs @@ -1,7 +1,7 @@ use crate::{ - BackendSpecificError, BufferSize, Data, DefaultStreamConfigError, DeviceNameError, - DevicesError, InputCallbackInfo, OutputCallbackInfo, SampleFormat, SampleRate, - StreamConfig, SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange, + BackendSpecificError, BufferSize, Data, DefaultStreamConfigError, DeviceNameError, + DevicesError, InputCallbackInfo, OutputCallbackInfo, SampleFormat, SampleRate, StreamConfig, + SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange, SupportedStreamConfigsError, COMMON_SAMPLE_RATES, }; use std; @@ -652,8 +652,8 @@ impl Device { // TO DO: We need IAudioClient3 to get buffersize ranges first // Otherwise the supported ranges are unknown. In the mean time // the smallest buffersize is selected and used. - return Err(BuildStreamError::StreamConfigNotSupported) - }, + return Err(BuildStreamError::StreamConfigNotSupported); + } BufferSize::Default => (), }; @@ -814,8 +814,8 @@ impl Device { // TO DO: We need IAudioClient3 to get buffersize ranges first // Otherwise the supported ranges are unknown. In the mean time // the smallest buffersize is selected and used. - return Err(BuildStreamError::StreamConfigNotSupported) - }, + return Err(BuildStreamError::StreamConfigNotSupported); + } BufferSize::Default => (), }; diff --git a/src/host/webaudio/mod.rs b/src/host/webaudio/mod.rs index 94818a9..f58f18c 100644 --- a/src/host/webaudio/mod.rs +++ b/src/host/webaudio/mod.rs @@ -7,10 +7,10 @@ use self::wasm_bindgen::prelude::*; use self::wasm_bindgen::JsCast; use self::web_sys::{AudioContext, AudioContextOptions}; use crate::{ - BackendSpecificError, BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, - DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, - SampleFormat, SampleRate, StreamConfig, StreamError, SupportedStreamConfig, - SupportedStreamConfigRange, SupportedStreamConfigsError, + BackendSpecificError, BufferSize, BuildStreamError, Data, DefaultStreamConfigError, + DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError, + PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, SupportedBufferSize, + SupportedStreamConfig, SupportedStreamConfigRange, SupportedStreamConfigsError, }; use std::ops::DerefMut; use std::sync::{Arc, Mutex, RwLock}; @@ -39,6 +39,9 @@ const MAX_CHANNELS: u16 = 32; const MIN_SAMPLE_RATE: SampleRate = SampleRate(8_000); const MAX_SAMPLE_RATE: SampleRate = SampleRate(96_000); const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100); +const MIN_BUFFER_SIZE: u32 = 1; +const MAX_BUFFER_SIZE: u32 = std::u32::MAX; +const DEFAULT_BUFFER_SIZE: usize = 2048; const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32; impl Host { @@ -93,11 +96,16 @@ impl Device { fn supported_output_configs( &self, ) -> Result { + let buffer_size = SupportedBufferSize::Range { + min: MIN_BUFFER_SIZE, + max: MAX_BUFFER_SIZE, + }; let configs: Vec<_> = (MIN_CHANNELS..=MAX_CHANNELS) .map(|channels| SupportedStreamConfigRange { channels, min_sample_rate: MIN_SAMPLE_RATE, max_sample_rate: MAX_SAMPLE_RATE, + buffer_size: buffer_size.clone(), sample_format: SUPPORTED_SAMPLE_FORMAT, }) .collect(); @@ -190,11 +198,20 @@ impl DeviceTrait for Device { } let n_channels = config.channels as usize; - // Use a buffer period of 1/3s for this early proof of concept. - // TODO: Change this to the requested buffer size when updating for the buffer size API. - let buffer_size_frames = (config.sample_rate.0 as f64 / 3.0).round() as usize; + + let buffer_size_frames = match config.buffer_size { + BufferSize::Fixed(v) => { + if v == 0 { + return Err(BuildStreamError::StreamConfigNotSupported); + } else { + v as usize + } + } + BufferSize::Default => DEFAULT_BUFFER_SIZE, + }; let buffer_size_samples = buffer_size_frames * n_channels; let buffer_time_step_secs = buffer_time_step_secs(buffer_size_frames, config.sample_rate); + let data_callback = Arc::new(Mutex::new(Box::new(data_callback))); // Create the WebAudio stream. diff --git a/src/lib.rs b/src/lib.rs index a5c3a76..0a3b749 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -200,13 +200,11 @@ pub struct StreamConfig { } /// Describes the minimum and maximum supported buffer size for the device -/// and if requested buffersize must be a power of 2 value. #[derive(Clone, Debug, Eq, PartialEq)] pub enum SupportedBufferSize { Range { min: FrameCount, max: FrameCount, - requires_power_of_two: bool, }, /// In the case that the platform provides no way of getting the default /// buffersize before starting a stream.