From 37d80b98cff6ac067c19f702936f3422256755a9 Mon Sep 17 00:00:00 2001 From: mitchmindtree Date: Sat, 14 Dec 2019 11:50:57 +0100 Subject: [PATCH] Update the `emscripten` backend for the removal of the `EventLoop` For the most part, behaviour should be largely unchanged, however each individual stream now has its own `set_timeout` callback loop, rather than using one for processing all streams at once. Many TODOs remain within the `emscripten` backend. These were left untouched for the most part in favour of addressing this in a more web-focused, future PR. --- src/host/emscripten/mod.rs | 444 ++++++++++++++++--------------------- src/platform/mod.rs | 3 +- 2 files changed, 189 insertions(+), 258 deletions(-) diff --git a/src/host/emscripten/mod.rs b/src/host/emscripten/mod.rs index 136c88f..eb51b58 100644 --- a/src/host/emscripten/mod.rs +++ b/src/host/emscripten/mod.rs @@ -1,7 +1,6 @@ use std::mem; use std::os::raw::c_void; use std::slice::from_raw_parts; -use std::sync::Mutex; use stdweb; use stdweb::Reference; use stdweb::unstable::TryInto; @@ -17,25 +16,102 @@ use PauseStreamError; use PlayStreamError; use SupportedFormatsError; use StreamData; -use StreamDataResult; +use StreamError; use SupportedFormat; use UnknownTypeOutputBuffer; -use traits::{DeviceTrait, EventLoopTrait, HostTrait, StreamIdTrait}; +use traits::{DeviceTrait, HostTrait, StreamTrait}; + +// The emscripten backend currently works by instantiating an `AudioContext` object per `Stream`. +// Creating a stream creates a new `AudioContext`. Destroying a stream destroys it. Creation of a +// `Host` instance initializes the `stdweb` context. /// The default emscripten host type. #[derive(Debug)] pub struct Host; +/// Content is false if the iterator is empty. +pub struct Devices(bool); + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Device; + +pub struct Stream { + // A reference to an `AudioContext` object. + audio_ctxt_ref: Reference, +} + +// Index within the `streams` array of the events loop. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct StreamId(usize); + +pub type SupportedInputFormats = ::std::vec::IntoIter; +pub type SupportedOutputFormats = ::std::vec::IntoIter; + impl Host { pub fn new() -> Result { + stdweb::initialize(); Ok(Host) } } +impl Devices { + fn new() -> Result { + Ok(Self::default()) + } +} + +impl Device { + #[inline] + fn name(&self) -> Result { + Ok("Default Device".to_owned()) + } + + #[inline] + fn supported_input_formats(&self) -> Result { + unimplemented!(); + } + + #[inline] + fn supported_output_formats(&self) -> Result { + // TODO: right now cpal's API doesn't allow flexibility here + // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if + // this ever becomes more flexible, don't forget to change that + // According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer + // browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz. + // + // UPDATE: We can do this now. Might be best to use `crate::COMMON_SAMPLE_RATES` and + // filter out those that lay outside the range specified above. + Ok( + vec![ + SupportedFormat { + channels: 2, + min_sample_rate: ::SampleRate(44100), + max_sample_rate: ::SampleRate(44100), + data_type: ::SampleFormat::F32, + }, + ].into_iter(), + ) + } + + fn default_input_format(&self) -> Result { + unimplemented!(); + } + + fn default_output_format(&self) -> Result { + // TODO: because it is hard coded, see supported_output_formats. + Ok( + Format { + channels: 2, + sample_rate: ::SampleRate(44100), + data_type: ::SampleFormat::F32, + }, + ) + } +} + impl HostTrait for Host { type Devices = Devices; type Device = Device; - type EventLoop = EventLoop; fn is_available() -> bool { // Assume this host is always available on emscripten. @@ -53,15 +129,12 @@ impl HostTrait for Host { fn default_output_device(&self) -> Option { default_output_device() } - - fn event_loop(&self) -> Self::EventLoop { - EventLoop::new() - } } impl DeviceTrait for Device { type SupportedInputFormats = SupportedInputFormats; type SupportedOutputFormats = SupportedOutputFormats; + type Stream = Stream; fn name(&self) -> Result { Device::name(self) @@ -82,224 +155,124 @@ impl DeviceTrait for Device { fn default_output_format(&self) -> Result { Device::default_output_format(self) } -} -impl EventLoopTrait for EventLoop { - type Device = Device; - type StreamId = StreamId; - - fn build_input_stream( + fn build_input_stream( &self, - device: &Self::Device, - format: &Format, - ) -> Result { - EventLoop::build_input_stream(self, device, format) - } - - fn build_output_stream( - &self, - device: &Self::Device, - format: &Format, - ) -> Result { - EventLoop::build_output_stream(self, device, format) - } - - fn play_stream(&self, stream: Self::StreamId) -> Result<(), PlayStreamError> { - EventLoop::play_stream(self, stream) - } - - fn pause_stream(&self, stream: Self::StreamId) -> Result<(), PauseStreamError> { - EventLoop::pause_stream(self, stream) - } - - fn destroy_stream(&self, stream: Self::StreamId) { - EventLoop::destroy_stream(self, stream) - } - - fn run(&self, callback: F) -> ! + _format: &Format, + _data_callback: D, + _error_callback: E, + ) -> Result where - F: FnMut(Self::StreamId, StreamDataResult) + Send, + D: FnMut(StreamData) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, { - EventLoop::run(self, callback) + unimplemented!() + } + + fn build_output_stream( + &self, + _format: &Format, + data_callback: D, + error_callback: E, + ) -> Result + where + D: FnMut(StreamData) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, + { + // Create the stream. + let audio_ctxt_ref = js!(return new AudioContext()).into_reference().unwrap(); + let stream = Stream { audio_ctxt_ref }; + + // Specify the callback. + let mut user_data = (self, data_callback, error_callback); + let user_data_ptr = &mut user_data as *mut (_, _, _); + + // Use `set_timeout` to invoke a Rust callback repeatedly. + // + // The job of this callback is to fill the content of the audio buffers. + // + // See also: The call to `set_timeout` at the end of the `audio_callback_fn` which creates + // the loop. + set_timeout(|| audio_callback_fn::(user_data_ptr as *mut c_void), 10); + + Ok(stream) } } -impl StreamIdTrait for StreamId {} - -// The emscripten backend works by having a global variable named `_cpal_audio_contexts`, which -// is an array of `AudioContext` objects. A stream ID corresponds to an entry in this array. -// -// Creating a stream creates a new `AudioContext`. Destroying a stream destroys it. - -// TODO: handle latency better ; right now we just use setInterval with the amount of sound data -// that is in each buffer ; this is obviously bad, and also the schedule is too tight and there may -// be underflows - -pub struct EventLoop { - streams: Mutex>>, -} - -impl EventLoop { - #[inline] - pub fn new() -> EventLoop { - stdweb::initialize(); - EventLoop { - streams: Mutex::new(Vec::new()), - } +impl StreamTrait for Stream { + fn play(&self) -> Result<(), PlayStreamError> { + let audio_ctxt = &self.audio_ctxt_ref; + js!(@{audio_ctxt}.resume()); + Ok(()) } - #[inline] - fn run(&self, callback: F) -> ! - where F: FnMut(StreamId, StreamDataResult), - { - // The `run` function uses `set_timeout` to invoke a Rust callback repeatidely. The job - // of this callback is to fill the content of the audio buffers. + fn pause(&self) -> Result<(), PauseStreamError> { + let audio_ctxt = &self.audio_ctxt_ref; + js!(@{audio_ctxt}.suspend()); + Ok(()) + } +} - // The first argument of the callback function (a `void*`) is a casted pointer to `self` - // and to the `callback` parameter that was passed to `run`. +// The first argument of the callback function (a `void*`) is a casted pointer to `self` +// and to the `callback` parameter that was passed to `run`. +fn audio_callback_fn(user_data_ptr: *mut c_void) +where + D: FnMut(StreamData) + Send + 'static, + E: FnMut(StreamError) + Send + 'static, +{ + unsafe { + let user_data_ptr2 = user_data_ptr as *mut (&Stream, D, E); + let user_data = &mut *user_data_ptr2; + let (ref stream, ref mut data_cb, ref mut _err_cb) = user_data; + let audio_ctxt = &stream.audio_ctxt_ref; + + // TODO: We should be re-using a buffer. + let mut temporary_buffer = vec![0.0; 44100 * 2 / 3]; - fn callback_fn(user_data_ptr: *mut c_void) - where F: FnMut(StreamId, StreamDataResult) { - unsafe { - let user_data_ptr2 = user_data_ptr as *mut (&EventLoop, F); - let user_data = &mut *user_data_ptr2; - let user_cb = &mut user_data.1; - - let streams = user_data.0.streams.lock().unwrap().clone(); - for (stream_id, stream) in streams.iter().enumerate() { - let stream = match stream.as_ref() { - Some(v) => v, - None => continue, - }; - - let mut temporary_buffer = vec![0.0; 44100 * 2 / 3]; - - { - let buffer = UnknownTypeOutputBuffer::F32(::OutputBuffer { buffer: &mut temporary_buffer }); - let data = StreamData::Output { buffer: buffer }; - user_cb(StreamId(stream_id), Ok(data)); - // TODO: directly use a TypedArray once this is supported by stdweb - } - - let typed_array = { - let f32_slice = temporary_buffer.as_slice(); - let u8_slice: &[u8] = from_raw_parts( - f32_slice.as_ptr() as *const _, - f32_slice.len() * mem::size_of::(), - ); - let typed_array: TypedArray = u8_slice.into(); - typed_array - }; - - let num_channels = 2u32; // TODO: correct value - debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0); - - js!( - var src_buffer = new Float32Array(@{typed_array}.buffer); - var context = @{stream}; - var buf_len = @{temporary_buffer.len() as u32}; - var num_channels = @{num_channels}; - - var buffer = context.createBuffer(num_channels, buf_len / num_channels, 44100); - for (var channel = 0; channel < num_channels; ++channel) { - var buffer_content = buffer.getChannelData(channel); - for (var i = 0; i < buf_len / num_channels; ++i) { - buffer_content[i] = src_buffer[i * num_channels + channel]; - } - } - - var node = context.createBufferSource(); - node.buffer = buffer; - node.connect(context.destination); - node.start(); - ); - } - - set_timeout(|| callback_fn::(user_data_ptr), 330); - } + let buffer = UnknownTypeOutputBuffer::F32(::OutputBuffer { buffer: &mut temporary_buffer }); + let data = StreamData::Output { buffer: buffer }; + data_cb(data); } - let mut user_data = (self, callback); - let user_data_ptr = &mut user_data as *mut (_, _); - - set_timeout(|| callback_fn::(user_data_ptr as *mut _), 10); - - stdweb::event_loop(); - } - - #[inline] - fn build_input_stream(&self, _: &Device, _format: &Format) -> Result { - unimplemented!(); - } - - #[inline] - fn build_output_stream(&self, _: &Device, _format: &Format) -> Result { - let stream = js!(return new AudioContext()).into_reference().unwrap(); - - let mut streams = self.streams.lock().unwrap(); - let stream_id = if let Some(pos) = streams.iter().position(|v| v.is_none()) { - streams[pos] = Some(stream); - pos - } else { - let l = streams.len(); - streams.push(Some(stream)); - l + // TODO: directly use a TypedArray once this is supported by stdweb + let typed_array = { + let f32_slice = temporary_buffer.as_slice(); + let u8_slice: &[u8] = from_raw_parts( + f32_slice.as_ptr() as *const _, + f32_slice.len() * mem::size_of::(), + ); + let typed_array: TypedArray = u8_slice.into(); + typed_array }; - Ok(StreamId(stream_id)) - } + let num_channels = 2u32; // TODO: correct value + debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0); - #[inline] - fn destroy_stream(&self, stream_id: StreamId) { - self.streams.lock().unwrap()[stream_id.0] = None; - } + js!( + var src_buffer = new Float32Array(@{typed_array}.buffer); + var context = @{audio_ctxt}; + var buf_len = @{temporary_buffer.len() as u32}; + var num_channels = @{num_channels}; - #[inline] - fn play_stream(&self, stream_id: StreamId) -> Result<(), PlayStreamError> { - let streams = self.streams.lock().unwrap(); - let stream = streams - .get(stream_id.0) - .and_then(|v| v.as_ref()) - .expect("invalid stream ID"); - js!(@{stream}.resume()); - Ok(()) - } + var buffer = context.createBuffer(num_channels, buf_len / num_channels, 44100); + for (var channel = 0; channel < num_channels; ++channel) { + var buffer_content = buffer.getChannelData(channel); + for (var i = 0; i < buf_len / num_channels; ++i) { + buffer_content[i] = src_buffer[i * num_channels + channel]; + } + } - #[inline] - fn pause_stream(&self, stream_id: StreamId) -> Result<(), PauseStreamError> { - let streams = self.streams.lock().unwrap(); - let stream = streams - .get(stream_id.0) - .and_then(|v| v.as_ref()) - .expect("invalid stream ID"); - js!(@{stream}.suspend()); - Ok(()) - } -} + var node = context.createBufferSource(); + node.buffer = buffer; + node.connect(context.destination); + node.start(); + ); -// Index within the `streams` array of the events loop. -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct StreamId(usize); - -// Detects whether the `AudioContext` global variable is available. -fn is_webaudio_available() -> bool { - stdweb::initialize(); - - js!(if (!AudioContext) { - return false; - } else { - return true; - }).try_into() - .unwrap() -} - -// Content is false if the iterator is empty. -pub struct Devices(bool); - -impl Devices { - fn new() -> Result { - Ok(Self::default()) + // TODO: handle latency better ; right now we just use setInterval with the amount of sound + // data that is in each buffer ; this is obviously bad, and also the schedule is too tight + // and there may be underflows + set_timeout(|| audio_callback_fn::(user_data_ptr), 330); } } @@ -336,54 +309,13 @@ fn default_output_device() -> Option { } } -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct Device; - -impl Device { - #[inline] - fn name(&self) -> Result { - Ok("Default Device".to_owned()) - } - - #[inline] - fn supported_input_formats(&self) -> Result { - unimplemented!(); - } - - #[inline] - fn supported_output_formats(&self) -> Result { - // TODO: right now cpal's API doesn't allow flexibility here - // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if - // this ever becomes more flexible, don't forget to change that - // According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer - // browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz. - Ok( - vec![ - SupportedFormat { - channels: 2, - min_sample_rate: ::SampleRate(44100), - max_sample_rate: ::SampleRate(44100), - data_type: ::SampleFormat::F32, - }, - ].into_iter(), - ) - } - - fn default_input_format(&self) -> Result { - unimplemented!(); - } - - fn default_output_format(&self) -> Result { - // TODO: because it is hard coded, see supported_output_formats. - Ok( - Format { - channels: 2, - sample_rate: ::SampleRate(44100), - data_type: ::SampleFormat::F32, - }, - ) - } +// Detects whether the `AudioContext` global variable is available. +fn is_webaudio_available() -> bool { + stdweb::initialize(); + js!(if (!AudioContext) { + return false; + } else { + return true; + }).try_into() + .unwrap() } - -pub type SupportedInputFormats = ::std::vec::IntoIter; -pub type SupportedOutputFormats = ::std::vec::IntoIter; diff --git a/src/platform/mod.rs b/src/platform/mod.rs index cfa88c4..1571c11 100644 --- a/src/platform/mod.rs +++ b/src/platform/mod.rs @@ -435,9 +435,8 @@ mod platform_impl { pub use crate::host::emscripten::{ Device as EmscriptenDevice, Devices as EmscriptenDevices, - EventLoop as EmscriptenEventLoop, Host as EmscriptenHost, - StreamId as EmscriptenStreamId, + Stream as EmscriptenStream, SupportedInputFormats as EmscriptenSupportedInputFormats, SupportedOutputFormats as EmscriptenSupportedOutputFormats, };