From 6f43fc2b6083890d1a167036c704dab77df834d4 Mon Sep 17 00:00:00 2001 From: Damian Peckett Date: Tue, 28 Jan 2020 22:00:53 +0100 Subject: [PATCH 1/3] Add a wasm-bindgen based generic Web Audio backend. --- .github/workflows/cpal.yml | 19 ++ Cargo.toml | 5 + src/host/mod.rs | 2 + src/host/webaudio/mod.rs | 401 +++++++++++++++++++++++++++++++++++++ src/platform/mod.rs | 21 +- 5 files changed, 447 insertions(+), 1 deletion(-) create mode 100644 src/host/webaudio/mod.rs diff --git a/.github/workflows/cpal.yml b/.github/workflows/cpal.yml index 539cc27..b2f8e85 100644 --- a/.github/workflows/cpal.yml +++ b/.github/workflows/cpal.yml @@ -108,6 +108,25 @@ jobs: - name: Build beep example run: cargo build --example beep --target ${{ matrix.target }} + wasm32-bindgen-test: + + strategy: + matrix: + target: [wasm32-unknown-unknown] + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + - name: Install stable + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: stable + target: ${{ matrix.target }} + - name: Build beep example + run: cargo build --example beep --target ${{ matrix.target }} --features=wasm-bindgen + windows-test: strategy: matrix: diff --git a/Cargo.toml b/Cargo.toml index 65db108..56140d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,3 +38,8 @@ mach = "0.3" # For access to mach_timebase type. [target.'cfg(target_os = "emscripten")'.dependencies] stdweb = { version = "0.1.3", default-features = false } + +[target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] +wasm-bindgen = { version = "0.2.58", optional = true } +js-sys = { version = "0.3.35" } +web-sys = { version = "0.3.35", features = [ "AudioContext", "AudioContextOptions", "AudioBuffer", "AudioBufferSourceNode", "AudioNode", "AudioDestinationNode", "Window", "AudioContextState"] } diff --git a/src/host/mod.rs b/src/host/mod.rs index b64fa12..9c70253 100644 --- a/src/host/mod.rs +++ b/src/host/mod.rs @@ -9,3 +9,5 @@ pub(crate) mod emscripten; pub(crate) mod null; #[cfg(windows)] pub(crate) mod wasapi; +#[cfg(all(target_arch = "wasm32", feature = "wasm-bindgen"))] +pub(crate) mod webaudio; diff --git a/src/host/webaudio/mod.rs b/src/host/webaudio/mod.rs new file mode 100644 index 0000000..12a78ca --- /dev/null +++ b/src/host/webaudio/mod.rs @@ -0,0 +1,401 @@ +extern crate js_sys; +extern crate wasm_bindgen; +extern crate web_sys; + +use self::js_sys::eval; +use self::wasm_bindgen::prelude::*; +use self::wasm_bindgen::JsCast; +use self::web_sys::{AudioContext, AudioContextOptions}; +use crate::{ + BuildStreamError, Data, DefaultFormatError, DeviceNameError, DevicesError, Format, + PauseStreamError, PlayStreamError, StreamError, SupportedFormat, SupportedFormatsError, +}; +use std::ops::DerefMut; +use std::sync::{Arc, Mutex, RwLock}; +use traits::{DeviceTrait, HostTrait, StreamTrait}; +use {BackendSpecificError, SampleFormat}; + +/// Content is false if the iterator is empty. +pub struct Devices(bool); + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Device; + +pub struct Host; + +pub struct Stream { + ctx: Arc, + on_ended_closures: Vec>>>>, +} + +pub type SupportedInputFormats = ::std::vec::IntoIter; +pub type SupportedOutputFormats = ::std::vec::IntoIter; + +impl Host { + pub fn new() -> Result { + Ok(Host) + } +} + +impl HostTrait for Host { + type Devices = Devices; + type Device = Device; + + fn is_available() -> bool { + // Assume this host is always available on webaudio. + true + } + + fn devices(&self) -> Result { + Devices::new() + } + + fn default_input_device(&self) -> Option { + default_input_device() + } + + fn default_output_device(&self) -> Option { + default_output_device() + } +} + +impl Devices { + fn new() -> Result { + Ok(Self::default()) + } +} + +impl Device { + #[inline] + fn name(&self) -> Result { + Ok("Default Device".to_owned()) + } + + #[inline] + fn supported_input_formats(&self) -> Result { + unimplemented!(); + } + + #[inline] + fn supported_output_formats(&self) -> Result { + // TODO: right now cpal's API doesn't allow flexibility here + // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if + // this ever becomes more flexible, don't forget to change that + // According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer + // browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz. + // + // UPDATE: We can do this now. Might be best to use `crate::COMMON_SAMPLE_RATES` and + // filter out those that lay outside the range specified above. + Ok(vec![SupportedFormat { + channels: 2, + min_sample_rate: ::SampleRate(44100), + max_sample_rate: ::SampleRate(44100), + data_type: ::SampleFormat::F32, + }] + .into_iter()) + } + + #[inline] + fn default_input_format(&self) -> Result { + unimplemented!(); + } + + #[inline] + fn default_output_format(&self) -> Result { + // TODO: because it is hard coded, see supported_output_formats. + Ok(Format { + channels: 2, + sample_rate: ::SampleRate(44100), + data_type: ::SampleFormat::F32, + }) + } +} + +impl DeviceTrait for Device { + type SupportedInputFormats = SupportedInputFormats; + type SupportedOutputFormats = SupportedOutputFormats; + type Stream = Stream; + + #[inline] + fn name(&self) -> Result { + Device::name(self) + } + + #[inline] + fn supported_input_formats( + &self, + ) -> Result { + Device::supported_input_formats(self) + } + + #[inline] + fn supported_output_formats( + &self, + ) -> Result { + Device::supported_output_formats(self) + } + + #[inline] + fn default_input_format(&self) -> Result { + Device::default_input_format(self) + } + + #[inline] + fn default_output_format(&self) -> Result { + Device::default_output_format(self) + } + + fn build_input_stream_raw( + &self, + _format: &Format, + _data_callback: D, + _error_callback: E, + ) -> Result + where + D: FnMut(&Data) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + unimplemented!() + } + + /// Create an output stream. + fn build_output_stream_raw( + &self, + format: &Format, + data_callback: D, + _error_callback: E, + ) -> Result + where + D: FnMut(&mut Data) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + assert_eq!( + format.data_type, + SampleFormat::F32, + "WebAudio backend currently only supports `f32` data", + ); + + // Use a buffer period of 1/3s for this early proof of concept. + let buffer_length = (format.sample_rate.0 as f64 / 3.0).round() as usize; + let data_callback = Arc::new(Mutex::new(Box::new(data_callback))); + + // Create the WebAudio stream. + let mut stream_opts = AudioContextOptions::new(); + stream_opts.sample_rate(format.sample_rate.0 as f32); + let ctx = Arc::new( + AudioContext::new_with_context_options(&stream_opts).map_err( + |err| -> BuildStreamError { + let description = format!("{:?}", err); + let err = BackendSpecificError { description }; + err.into() + }, + )?, + ); + + // A container for managing the lifecycle of the audio callbacks. + let mut on_ended_closures: Vec>>>> = Vec::new(); + + // A cursor keeping track of the current time at which new frames should be scheduled. + let time = Arc::new(RwLock::new(0f64)); + + // Create a set of closures / callbacks which will continuously fetch and schedule sample playback. + // Starting with two workers, eg a front and back buffer so that audio frames can be fetched in the background. + for _i in 0..2 { + let format = format.clone(); + let data_callback_handle = data_callback.clone(); + let ctx_handle = ctx.clone(); + let time_handle = time.clone(); + + // A set of temporary buffers to be used for intermediate sample transformation steps. + let mut temporary_buffer = vec![0f32; buffer_length * format.channels as usize]; + let mut temporary_channel_buffer = vec![0f32; buffer_length]; + + // Create a webaudio buffer which will be reused to avoid allocations. + let ctx_buffer = ctx + .create_buffer( + format.channels as u32, + buffer_length as u32, + format.sample_rate.0 as f32, + ) + .map_err(|err| -> BuildStreamError { + let description = format!("{:?}", err); + let err = BackendSpecificError { description }; + err.into() + })?; + + // A self reference to this closure for passing to future audio event calls. + let on_ended_closure: Arc>>> = + Arc::new(RwLock::new(None)); + let on_ended_closure_handle = on_ended_closure.clone(); + + on_ended_closure + .write() + .unwrap() + .replace(Closure::wrap(Box::new(move || { + let time_at_start_of_buffer = { + let time_at_start_of_buffer = time_handle + .read() + .expect("Unable to get a read lock on the time cursor"); + // Synchronise first buffer as necessary (eg. keep the time value referenced to the context clock). + if *time_at_start_of_buffer > 0.001 { + *time_at_start_of_buffer + } else { + // 25ms of time to fetch the first sample data, increase to avoid initial underruns. + ctx_handle.current_time() + 0.025 + } + }; + + // Populate the sample data into an interleaved temporary buffer. + { + let len = temporary_buffer.len(); + let data = temporary_buffer.as_mut_ptr() as *mut (); + let sample_format = SampleFormat::F32; + let mut data = unsafe { Data::from_parts(data, len, sample_format) }; + let mut data_callback = data_callback_handle.lock().unwrap(); + (data_callback.deref_mut())(&mut data); + } + + // Deinterleave the sample data and copy into the audio context buffer. + // We do not reference the audio context buffer directly eg getChannelData. + // As wasm-bindgen only gives us a copy, not a direct reference. + for channel in 0..(format.channels as usize) { + for i in 0..buffer_length { + temporary_channel_buffer[i] = + temporary_buffer[(format.channels as usize) * i + channel]; + } + ctx_buffer + .copy_to_channel(&mut temporary_channel_buffer, channel as i32) + .expect("Unable to write sample data into the audio context buffer"); + } + + // Create an AudioBufferSourceNode, scheduled it to playback the reused buffer in the future. + let source = ctx_handle + .create_buffer_source() + .expect("Unable to create a webaudio buffer source"); + source.set_buffer(Some(&ctx_buffer)); + source + .connect_with_audio_node(&ctx_handle.destination()) + .expect( + "Unable to connect the web audio buffer source to the context destination", + ); + source.set_onended(Some( + on_ended_closure_handle + .read() + .unwrap() + .as_ref() + .unwrap() + .as_ref() + .unchecked_ref(), + )); + + source + .start_with_when(time_at_start_of_buffer) + .expect("Unable to start the webaudio buffer source"); + + // Keep track of when the next buffer worth of samples should be played. + *time_handle.write().unwrap() = time_at_start_of_buffer + + (buffer_length as f64 / format.sample_rate.0 as f64); + }) as Box)); + + on_ended_closures.push(on_ended_closure); + } + + Ok(Stream { + ctx, + on_ended_closures, + }) + } +} + +impl StreamTrait for Stream { + fn play(&self) -> Result<(), PlayStreamError> { + let window = web_sys::window().unwrap(); + match self.ctx.resume() { + Ok(_) => { + // Begin webaudio playback, initially scheduling the closures to fire on a timeout event. + let mut offset_ms = 10; + for on_ended_closure in self.on_ended_closures.iter() { + window + .set_timeout_with_callback_and_timeout_and_arguments_0( + on_ended_closure + .read() + .unwrap() + .as_ref() + .unwrap() + .as_ref() + .unchecked_ref(), + offset_ms, + ) + .unwrap(); + offset_ms += 333 / 2; + } + Ok(()) + } + Err(err) => { + let description = format!("{:?}", err); + let err = BackendSpecificError { description }; + Err(err.into()) + } + } + } + + fn pause(&self) -> Result<(), PauseStreamError> { + match self.ctx.suspend() { + Ok(_) => Ok(()), + Err(err) => { + let description = format!("{:?}", err); + let err = BackendSpecificError { description }; + Err(err.into()) + } + } + } +} + +impl Drop for Stream { + fn drop(&mut self) { + let _ = self.ctx.close(); + } +} + +impl Default for Devices { + fn default() -> Devices { + // We produce an empty iterator if the WebAudio API isn't available. + Devices(is_webaudio_available()) + } +} + +impl Iterator for Devices { + type Item = Device; + #[inline] + fn next(&mut self) -> Option { + if self.0 { + self.0 = false; + Some(Device) + } else { + None + } + } +} + +#[inline] +fn default_input_device() -> Option { + unimplemented!(); +} + +#[inline] +fn default_output_device() -> Option { + if is_webaudio_available() { + Some(Device) + } else { + None + } +} + +// Detects whether the `AudioContext` global variable is available. +fn is_webaudio_available() -> bool { + if let Ok(audio_context_is_defined) = eval("typeof AudioContext !== 'undefined'") { + audio_context_is_defined.as_bool().unwrap() + } else { + false + } +} diff --git a/src/platform/mod.rs b/src/platform/mod.rs index 3c6db74..f3be04c 100644 --- a/src/platform/mod.rs +++ b/src/platform/mod.rs @@ -500,6 +500,24 @@ mod platform_impl { } } +#[cfg(all(target_arch = "wasm32", feature = "wasm-bindgen"))] +mod platform_impl { + pub use crate::host::webaudio::{ + Device as WebAudioDevice, Devices as WebAudioDevices, Host as WebAudioHost, + Stream as WebAudioStream, SupportedInputFormats as WebAudioSupportedInputFormats, + SupportedOutputFormats as WebAudioSupportedOutputFormats, + }; + + impl_platform_host!(WebAudio webaudio "WebAudio"); + + /// The default host for the current compilation target platform. + pub fn default_host() -> Host { + WebAudioHost::new() + .expect("the default host should always be available") + .into() + } +} + #[cfg(windows)] mod platform_impl { #[cfg(feature = "asio")] @@ -535,7 +553,8 @@ mod platform_impl { target_os = "freebsd", target_os = "macos", target_os = "ios", - target_os = "emscripten" + target_os = "emscripten", + all(target_arch = "wasm32", feature = "wasm-bindgen"), )))] mod platform_impl { pub use crate::host::null::{ From 640a1d39edcf53091cf1e87f45c11539dee9431b Mon Sep 17 00:00:00 2001 From: mitchmindtree Date: Thu, 21 May 2020 16:05:13 +0200 Subject: [PATCH 2/3] Rebase/Update webaudio PR for recent breaking changes This rebases #372, addressing the recent changes introduced by #397, #395, and #371 in the process. TODO: - [ ] Complete implementation of `callback` and `playback` timestamps in the output stream callback. --- src/host/webaudio/mod.rs | 98 +++++++++++++++++++++++----------------- src/platform/mod.rs | 4 +- 2 files changed, 58 insertions(+), 44 deletions(-) diff --git a/src/host/webaudio/mod.rs b/src/host/webaudio/mod.rs index 12a78ca..8b2f293 100644 --- a/src/host/webaudio/mod.rs +++ b/src/host/webaudio/mod.rs @@ -7,8 +7,10 @@ use self::wasm_bindgen::prelude::*; use self::wasm_bindgen::JsCast; use self::web_sys::{AudioContext, AudioContextOptions}; use crate::{ - BuildStreamError, Data, DefaultFormatError, DeviceNameError, DevicesError, Format, - PauseStreamError, PlayStreamError, StreamError, SupportedFormat, SupportedFormatsError, + BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError, + InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleRate, + StreamConfig, StreamError, SupportedStreamConfig, SupportedStreamConfigRange, + SupportedStreamConfigsError, }; use std::ops::DerefMut; use std::sync::{Arc, Mutex, RwLock}; @@ -28,8 +30,8 @@ pub struct Stream { on_ended_closures: Vec>>>>, } -pub type SupportedInputFormats = ::std::vec::IntoIter; -pub type SupportedOutputFormats = ::std::vec::IntoIter; +pub type SupportedInputConfigs = ::std::vec::IntoIter; +pub type SupportedOutputConfigs = ::std::vec::IntoIter; impl Host { pub fn new() -> Result { @@ -72,12 +74,16 @@ impl Device { } #[inline] - fn supported_input_formats(&self) -> Result { + fn supported_input_configs( + &self, + ) -> Result { unimplemented!(); } #[inline] - fn supported_output_formats(&self) -> Result { + fn supported_output_configs( + &self, + ) -> Result { // TODO: right now cpal's API doesn't allow flexibility here // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if // this ever becomes more flexible, don't forget to change that @@ -86,34 +92,34 @@ impl Device { // // UPDATE: We can do this now. Might be best to use `crate::COMMON_SAMPLE_RATES` and // filter out those that lay outside the range specified above. - Ok(vec![SupportedFormat { + Ok(vec![SupportedStreamConfigRange { channels: 2, - min_sample_rate: ::SampleRate(44100), - max_sample_rate: ::SampleRate(44100), - data_type: ::SampleFormat::F32, + min_sample_rate: SampleRate(44100), + max_sample_rate: SampleRate(44100), + sample_format: ::SampleFormat::F32, }] .into_iter()) } #[inline] - fn default_input_format(&self) -> Result { + fn default_input_config(&self) -> Result { unimplemented!(); } #[inline] - fn default_output_format(&self) -> Result { + fn default_output_config(&self) -> Result { // TODO: because it is hard coded, see supported_output_formats. - Ok(Format { + Ok(SupportedStreamConfig { channels: 2, sample_rate: ::SampleRate(44100), - data_type: ::SampleFormat::F32, + sample_format: ::SampleFormat::F32, }) } } impl DeviceTrait for Device { - type SupportedInputFormats = SupportedInputFormats; - type SupportedOutputFormats = SupportedOutputFormats; + type SupportedInputConfigs = SupportedInputConfigs; + type SupportedOutputConfigs = SupportedOutputConfigs; type Stream = Stream; #[inline] @@ -122,37 +128,38 @@ impl DeviceTrait for Device { } #[inline] - fn supported_input_formats( + fn supported_input_configs( &self, - ) -> Result { - Device::supported_input_formats(self) + ) -> Result { + Device::supported_input_configs(self) } #[inline] - fn supported_output_formats( + fn supported_output_configs( &self, - ) -> Result { - Device::supported_output_formats(self) + ) -> Result { + Device::supported_output_configs(self) } #[inline] - fn default_input_format(&self) -> Result { - Device::default_input_format(self) + fn default_input_config(&self) -> Result { + Device::default_input_config(self) } #[inline] - fn default_output_format(&self) -> Result { - Device::default_output_format(self) + fn default_output_config(&self) -> Result { + Device::default_output_config(self) } fn build_input_stream_raw( &self, - _format: &Format, + _config: &StreamConfig, + _sample_format: SampleFormat, _data_callback: D, _error_callback: E, ) -> Result where - D: FnMut(&Data) + Send + 'static, + D: FnMut(&Data, &InputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { unimplemented!() @@ -161,27 +168,28 @@ impl DeviceTrait for Device { /// Create an output stream. fn build_output_stream_raw( &self, - format: &Format, + config: &StreamConfig, + sample_format: SampleFormat, data_callback: D, _error_callback: E, ) -> Result where - D: FnMut(&mut Data) + Send + 'static, + D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { assert_eq!( - format.data_type, + sample_format, SampleFormat::F32, "WebAudio backend currently only supports `f32` data", ); // Use a buffer period of 1/3s for this early proof of concept. - let buffer_length = (format.sample_rate.0 as f64 / 3.0).round() as usize; + let buffer_length = (config.sample_rate.0 as f64 / 3.0).round() as usize; let data_callback = Arc::new(Mutex::new(Box::new(data_callback))); // Create the WebAudio stream. let mut stream_opts = AudioContextOptions::new(); - stream_opts.sample_rate(format.sample_rate.0 as f32); + stream_opts.sample_rate(config.sample_rate.0 as f32); let ctx = Arc::new( AudioContext::new_with_context_options(&stream_opts).map_err( |err| -> BuildStreamError { @@ -201,21 +209,20 @@ impl DeviceTrait for Device { // Create a set of closures / callbacks which will continuously fetch and schedule sample playback. // Starting with two workers, eg a front and back buffer so that audio frames can be fetched in the background. for _i in 0..2 { - let format = format.clone(); let data_callback_handle = data_callback.clone(); let ctx_handle = ctx.clone(); let time_handle = time.clone(); // A set of temporary buffers to be used for intermediate sample transformation steps. - let mut temporary_buffer = vec![0f32; buffer_length * format.channels as usize]; + let mut temporary_buffer = vec![0f32; buffer_length * config.channels as usize]; let mut temporary_channel_buffer = vec![0f32; buffer_length]; // Create a webaudio buffer which will be reused to avoid allocations. let ctx_buffer = ctx .create_buffer( - format.channels as u32, + config.channels as u32, buffer_length as u32, - format.sample_rate.0 as f32, + config.sample_rate.0 as f32, ) .map_err(|err| -> BuildStreamError { let description = format!("{:?}", err); @@ -228,6 +235,9 @@ impl DeviceTrait for Device { Arc::new(RwLock::new(None)); let on_ended_closure_handle = on_ended_closure.clone(); + let n_channels = config.channels as usize; + let sample_rate = config.sample_rate.0 as f64; + on_ended_closure .write() .unwrap() @@ -252,16 +262,20 @@ impl DeviceTrait for Device { let sample_format = SampleFormat::F32; let mut data = unsafe { Data::from_parts(data, len, sample_format) }; let mut data_callback = data_callback_handle.lock().unwrap(); - (data_callback.deref_mut())(&mut data); + let callback = unimplemented!(); + let playback = unimplemented!(); + let timestamp = crate::OutputStreamTimestamp { callback, playback }; + let info = OutputCallbackInfo { timestamp }; + (data_callback.deref_mut())(&mut data, &info); } // Deinterleave the sample data and copy into the audio context buffer. // We do not reference the audio context buffer directly eg getChannelData. // As wasm-bindgen only gives us a copy, not a direct reference. - for channel in 0..(format.channels as usize) { + for channel in 0..n_channels { for i in 0..buffer_length { temporary_channel_buffer[i] = - temporary_buffer[(format.channels as usize) * i + channel]; + temporary_buffer[n_channels * i + channel]; } ctx_buffer .copy_to_channel(&mut temporary_channel_buffer, channel as i32) @@ -293,8 +307,8 @@ impl DeviceTrait for Device { .expect("Unable to start the webaudio buffer source"); // Keep track of when the next buffer worth of samples should be played. - *time_handle.write().unwrap() = time_at_start_of_buffer - + (buffer_length as f64 / format.sample_rate.0 as f64); + *time_handle.write().unwrap() = + time_at_start_of_buffer + (buffer_length as f64 / sample_rate); }) as Box)); on_ended_closures.push(on_ended_closure); diff --git a/src/platform/mod.rs b/src/platform/mod.rs index f3be04c..033c91e 100644 --- a/src/platform/mod.rs +++ b/src/platform/mod.rs @@ -504,8 +504,8 @@ mod platform_impl { mod platform_impl { pub use crate::host::webaudio::{ Device as WebAudioDevice, Devices as WebAudioDevices, Host as WebAudioHost, - Stream as WebAudioStream, SupportedInputFormats as WebAudioSupportedInputFormats, - SupportedOutputFormats as WebAudioSupportedOutputFormats, + Stream as WebAudioStream, SupportedInputConfigs as WebAudioSupportedInputConfigs, + SupportedOutputConfigs as WebAudioSupportedOutputConfigs, }; impl_platform_host!(WebAudio webaudio "WebAudio"); From 78e1796ba8d6fc10e51801d2f5fa2b58016328c4 Mon Sep 17 00:00:00 2001 From: mitchmindtree Date: Fri, 22 May 2020 14:35:29 +0200 Subject: [PATCH 3/3] Add callback and playback timestamp implementation for webaudio stream --- src/host/emscripten/mod.rs | 6 +----- src/host/webaudio/mod.rs | 7 ++++--- src/lib.rs | 9 +++++++++ 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/src/host/emscripten/mod.rs b/src/host/emscripten/mod.rs index d3a31bd..de5ac0a 100644 --- a/src/host/emscripten/mod.rs +++ b/src/host/emscripten/mod.rs @@ -248,11 +248,7 @@ where let now_secs: f64 = js!(@{audio_ctxt}.getOutputTimestamp().currentTime) .try_into() .expect("failed to retrieve Value as f64"); - let callback = { - let secs = now_secs as i64; - let nanos = ((now_secs * 1_000_000_000.0) - secs as f64 * 1_000_000_000.0) as u32; - crate::StreamInstant::new(secs, nanos) - }; + let callback = crate::StreamInstant::from_secs_f64(now_secs); // TODO: Use proper latency instead. Currently unsupported on most browsers though so // we estimate based on buffer size instead. Probably should use this, but it's only // supported by firefox (2020-04-28). diff --git a/src/host/webaudio/mod.rs b/src/host/webaudio/mod.rs index 8b2f293..2d27dff 100644 --- a/src/host/webaudio/mod.rs +++ b/src/host/webaudio/mod.rs @@ -242,6 +242,7 @@ impl DeviceTrait for Device { .write() .unwrap() .replace(Closure::wrap(Box::new(move || { + let now = ctx_handle.current_time(); let time_at_start_of_buffer = { let time_at_start_of_buffer = time_handle .read() @@ -251,7 +252,7 @@ impl DeviceTrait for Device { *time_at_start_of_buffer } else { // 25ms of time to fetch the first sample data, increase to avoid initial underruns. - ctx_handle.current_time() + 0.025 + now + 0.025 } }; @@ -262,8 +263,8 @@ impl DeviceTrait for Device { let sample_format = SampleFormat::F32; let mut data = unsafe { Data::from_parts(data, len, sample_format) }; let mut data_callback = data_callback_handle.lock().unwrap(); - let callback = unimplemented!(); - let playback = unimplemented!(); + let callback = crate::StreamInstant::from_secs_f64(now); + let playback = crate::StreamInstant::from_secs_f64(time_at_start_of_buffer); let timestamp = crate::OutputStreamTimestamp { callback, playback }; let info = OutputCallbackInfo { timestamp }; (data_callback.deref_mut())(&mut data, &info); diff --git a/src/lib.rs b/src/lib.rs index 79cefe7..1c3e2c8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -341,12 +341,14 @@ impl StreamInstant { (self.secs as i128 * 1_000_000_000) + self.nanos as i128 } + #[allow(dead_code)] fn from_nanos(nanos: i64) -> Self { let secs = nanos / 1_000_000_000; let subsec_nanos = nanos - secs * 1_000_000_000; Self::new(secs as i64, subsec_nanos as u32) } + #[allow(dead_code)] fn from_nanos_i128(nanos: i128) -> Option { let secs = nanos / 1_000_000_000; if secs > std::i64::MAX as i128 || secs < std::i64::MIN as i128 { @@ -358,6 +360,13 @@ impl StreamInstant { } } + #[allow(dead_code)] + fn from_secs_f64(secs: f64) -> crate::StreamInstant { + let s = secs.floor() as i64; + let ns = ((secs - s as f64) * 1_000_000_000.0) as u32; + Self::new(s, ns) + } + fn new(secs: i64, nanos: u32) -> Self { StreamInstant { secs, nanos } }