use std::mem; use std::os::raw::c_void; use std::slice::from_raw_parts; use std::sync::{Arc, Mutex}; use stdweb; use stdweb::Reference; use stdweb::unstable::TryInto; use stdweb::web::TypedArray; use stdweb::web::set_timeout; use BuildStreamError; use DefaultFormatError; use DeviceNameError; use DevicesError; use Format; use PauseStreamError; use PlayStreamError; use SupportedFormatsError; use StreamCloseCause; use StreamData; use StreamEvent; use SupportedFormat; use UnknownTypeOutputBuffer; // The emscripten backend works by having a global variable named `_cpal_audio_contexts`, which // is an array of `AudioContext` objects. A stream ID corresponds to an entry in this array. // // Creating a stream creates a new `AudioContext`. Destroying a stream destroys it. // TODO: handle latency better ; right now we just use setInterval with the amount of sound data // that is in each buffer ; this is obviously bad, and also the schedule is too tight and there may // be underflows pub struct EventLoop { streams: Mutex>>, // The `EventLoop` requires a handle to the callbacks in order to be able to emit necessary // events for `Play`, `Pause` and `Close`. user_callback: Arc> } enum UserCallback { // When `run` is called with a callback, that callback will be stored here. // // It is essential for the safety of the program that this callback is removed before `run` // returns (not possible with the current CPAL API). Active(&'static mut (dyn FnMut(StreamId, StreamEvent) + Send)), // A queue of events that have occurred but that have not yet been emitted to the user as we // don't yet have a callback to do so. Inactive { pending_events: Vec<(StreamId, StreamEvent<'static>)> }, } impl EventLoop { #[inline] pub fn new() -> EventLoop { stdweb::initialize(); EventLoop { streams: Mutex::new(Vec::new()), user_callback: Arc::new(Mutex::new(UserCallback::Inactive { pending_events: vec![] })), } } #[inline] pub fn run(&self, mut callback: F) -> ! where F: FnMut(StreamId, StreamEvent) + Send, { // Retrieve and process any pending events. // // Then, set the callback ready to be shared between audio processing and the event loop // handle. { let mut guard = self.user_callback.lock().unwrap(); let pending_events = match *guard { UserCallback::Inactive { ref mut pending_events } => { mem::replace(pending_events, vec![]) } UserCallback::Active(_) => { panic!("`EventLoop::run` was called when the event loop was already running"); } }; let callback: &mut (dyn FnMut(StreamId, StreamEvent) + Send) = &mut callback; for (stream_id, event) in pending_events { callback(stream_id, event); } *guard = UserCallback::Active(unsafe { mem::transmute(callback) }); } // The `run` function uses `set_timeout` to invoke a Rust callback repeatidely. The job // of this callback is to fill the content of the audio buffers. // The first argument of the callback function (a `void*`) is a casted pointer to `self` // and to the `callback` parameter that was passed to `run`. fn callback_fn(user_data_ptr: *mut c_void) where F: FnMut(StreamId, StreamEvent) { unsafe { let user_data_ptr2 = user_data_ptr as *mut (&EventLoop, F); let user_data = &mut *user_data_ptr2; let user_cb = &mut user_data.1; let streams = user_data.0.streams.lock().unwrap().clone(); for (stream_id, stream) in streams.iter().enumerate() { let stream = match stream.as_ref() { Some(v) => v, None => continue, }; let mut temporary_buffer = vec![0.0; 44100 * 2 / 3]; { let buffer = UnknownTypeOutputBuffer::F32(::OutputBuffer { buffer: &mut temporary_buffer }); let data = StreamData::Output { buffer: buffer }; let event = StreamEvent::Data(data); user_cb(StreamId(stream_id), event); // TODO: directly use a TypedArray once this is supported by stdweb } let typed_array = { let f32_slice = temporary_buffer.as_slice(); let u8_slice: &[u8] = from_raw_parts( f32_slice.as_ptr() as *const _, f32_slice.len() * mem::size_of::(), ); let typed_array: TypedArray = u8_slice.into(); typed_array }; let num_channels = 2u32; // TODO: correct value debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0); js!( var src_buffer = new Float32Array(@{typed_array}.buffer); var context = @{stream}; var buf_len = @{temporary_buffer.len() as u32}; var num_channels = @{num_channels}; var buffer = context.createBuffer(num_channels, buf_len / num_channels, 44100); for (var channel = 0; channel < num_channels; ++channel) { var buffer_content = buffer.getChannelData(channel); for (var i = 0; i < buf_len / num_channels; ++i) { buffer_content[i] = src_buffer[i * num_channels + channel]; } } var node = context.createBufferSource(); node.buffer = buffer; node.connect(context.destination); node.start(); ); } set_timeout(|| callback_fn::(user_data_ptr), 330); } } let mut user_data = (self, callback); let user_data_ptr = &mut user_data as *mut (_, _); set_timeout(|| callback_fn::(user_data_ptr as *mut _), 10); stdweb::event_loop(); // It is critical that we remove the callback before returning (currently not possible). // *self.user_callback.lock().unwrap() = UserCallback::Inactive { pending_events: vec![] }; } #[inline] pub fn build_input_stream(&self, _: &Device, _format: &Format) -> Result { unimplemented!(); } #[inline] pub fn build_output_stream(&self, _: &Device, _format: &Format) -> Result { let stream = js!(return new AudioContext()).into_reference().unwrap(); let mut streams = self.streams.lock().unwrap(); let stream_id = if let Some(pos) = streams.iter().position(|v| v.is_none()) { streams[pos] = Some(stream); pos } else { let l = streams.len(); streams.push(Some(stream)); l }; Ok(StreamId(stream_id)) } fn emit_or_enqueue_event(&self, id: StreamId, event: StreamEvent<'static>) { let mut guard = self.user_callback.lock().unwrap(); match *guard { UserCallback::Active(ref mut callback) => callback(id, event), UserCallback::Inactive { ref mut pending_events } => pending_events.push((id, event)), } } #[inline] pub fn destroy_stream(&self, stream_id: StreamId) { self.streams.lock().unwrap()[stream_id.0] = None; let event = StreamEvent::Close(StreamCloseCause::UserDestroyed); self.emit_or_enqueue_event(stream_id, event); } #[inline] pub fn play_stream(&self, stream_id: StreamId) -> Result<(), PlayStreamError> { let streams = self.streams.lock().unwrap(); let stream = streams .get(stream_id.0) .and_then(|v| v.as_ref()) .expect("invalid stream ID"); self.emit_or_enqueue_event(stream_id, StreamEvent::Play); js!(@{stream}.resume()); Ok(()) } #[inline] pub fn pause_stream(&self, stream_id: StreamId) -> Result<(), PauseStreamError> { let streams = self.streams.lock().unwrap(); let stream = streams .get(stream_id.0) .and_then(|v| v.as_ref()) .expect("invalid stream ID"); js!(@{stream}.suspend()); self.emit_or_enqueue_event(stream_id, StreamEvent::Pause); Ok(()) } } // Index within the `streams` array of the events loop. #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StreamId(usize); // Detects whether the `AudioContext` global variable is available. fn is_webaudio_available() -> bool { stdweb::initialize(); js!(if (!AudioContext) { return false; } else { return true; }).try_into() .unwrap() } // Content is false if the iterator is empty. pub struct Devices(bool); impl Devices { pub fn new() -> Result { Ok(Self::default()) } } impl Default for Devices { fn default() -> Devices { // We produce an empty iterator if the WebAudio API isn't available. Devices(is_webaudio_available()) } } impl Iterator for Devices { type Item = Device; #[inline] fn next(&mut self) -> Option { if self.0 { self.0 = false; Some(Device) } else { None } } } #[inline] pub fn default_input_device() -> Option { unimplemented!(); } #[inline] pub fn default_output_device() -> Option { if is_webaudio_available() { Some(Device) } else { None } } #[derive(Clone, Debug, PartialEq, Eq)] pub struct Device; impl Device { #[inline] pub fn name(&self) -> Result { Ok("Default Device".to_owned()) } #[inline] pub fn supported_input_formats(&self) -> Result { unimplemented!(); } #[inline] pub fn supported_output_formats(&self) -> Result { // TODO: right now cpal's API doesn't allow flexibility here // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if // this ever becomes more flexible, don't forget to change that // According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer // browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz. Ok( vec![ SupportedFormat { channels: 2, min_sample_rate: ::SampleRate(44100), max_sample_rate: ::SampleRate(44100), data_type: ::SampleFormat::F32, }, ].into_iter(), ) } pub fn default_input_format(&self) -> Result { unimplemented!(); } pub fn default_output_format(&self) -> Result { // TODO: because it is hard coded, see supported_output_formats. Ok( Format { channels: 2, sample_rate: ::SampleRate(44100), data_type: ::SampleFormat::F32, }, ) } } pub type SupportedInputFormats = ::std::vec::IntoIter; pub type SupportedOutputFormats = ::std::vec::IntoIter;