From 6fc2185c99307aa7dbf9e37d1bdbc2b64819826c Mon Sep 17 00:00:00 2001 From: mitchmindtree Date: Tue, 14 Jan 2020 21:56:38 +0100 Subject: [PATCH] Update emscripten backend for removal of `UnknownTypeBuffer` --- src/host/emscripten/mod.rs | 55 ++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 23 deletions(-) diff --git a/src/host/emscripten/mod.rs b/src/host/emscripten/mod.rs index eb51b58..176671a 100644 --- a/src/host/emscripten/mod.rs +++ b/src/host/emscripten/mod.rs @@ -7,18 +7,22 @@ use stdweb::unstable::TryInto; use stdweb::web::TypedArray; use stdweb::web::set_timeout; -use BuildStreamError; -use DefaultFormatError; -use DeviceNameError; -use DevicesError; -use Format; -use PauseStreamError; -use PlayStreamError; -use SupportedFormatsError; -use StreamData; -use StreamError; -use SupportedFormat; -use UnknownTypeOutputBuffer; +use crate::{ + BuildStreamError, + DefaultFormatError, + DeviceNameError, + DevicesError, + Format, + InputData, + OutputData, + PauseStreamError, + PlayStreamError, + Sample, + SampleFormat, + StreamError, + SupportedFormat, + SupportedFormatsError, +}; use traits::{DeviceTrait, HostTrait, StreamTrait}; // The emscripten backend currently works by instantiating an `AudioContext` object per `Stream`. @@ -156,29 +160,33 @@ impl DeviceTrait for Device { Device::default_output_format(self) } - fn build_input_stream( + fn build_input_stream( &self, _format: &Format, _data_callback: D, _error_callback: E, ) -> Result where - D: FnMut(StreamData) + Send + 'static, + T: Sample, + D: FnMut(InputData) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { unimplemented!() } - fn build_output_stream( + fn build_output_stream( &self, _format: &Format, data_callback: D, error_callback: E, ) -> Result where - D: FnMut(StreamData) + Send + 'static, + T: Sample, + D: FnMut(OutputData) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { + assert_eq!(T::FORMAT, SampleFormat::F32, "emscripten backend only supports `f32` data"); + // Create the stream. let audio_ctxt_ref = js!(return new AudioContext()).into_reference().unwrap(); let stream = Stream { audio_ctxt_ref }; @@ -193,7 +201,7 @@ impl DeviceTrait for Device { // // See also: The call to `set_timeout` at the end of the `audio_callback_fn` which creates // the loop. - set_timeout(|| audio_callback_fn::(user_data_ptr as *mut c_void), 10); + set_timeout(|| audio_callback_fn::(user_data_ptr as *mut c_void), 10); Ok(stream) } @@ -215,9 +223,10 @@ impl StreamTrait for Stream { // The first argument of the callback function (a `void*`) is a casted pointer to `self` // and to the `callback` parameter that was passed to `run`. -fn audio_callback_fn(user_data_ptr: *mut c_void) +fn audio_callback_fn(user_data_ptr: *mut c_void) where - D: FnMut(StreamData) + Send + 'static, + T: Sample, + D: FnMut(OutputData) + Send + 'static, E: FnMut(StreamError) + Send + 'static, { unsafe { @@ -227,11 +236,11 @@ where let audio_ctxt = &stream.audio_ctxt_ref; // TODO: We should be re-using a buffer. - let mut temporary_buffer = vec![0.0; 44100 * 2 / 3]; + let mut temporary_buffer: Vec<_> = (0..44100 * 2 / 3).map(|_| T::from(&0.0)).collect(); { - let buffer = UnknownTypeOutputBuffer::F32(::OutputBuffer { buffer: &mut temporary_buffer }); - let data = StreamData::Output { buffer: buffer }; + let buffer = &mut temporary_buffer; + let data = OutputData { buffer }; data_cb(data); } @@ -272,7 +281,7 @@ where // TODO: handle latency better ; right now we just use setInterval with the amount of sound // data that is in each buffer ; this is obviously bad, and also the schedule is too tight // and there may be underflows - set_timeout(|| audio_callback_fn::(user_data_ptr), 330); + set_timeout(|| audio_callback_fn::(user_data_ptr), 330); } }