Update the `emscripten` backend for the removal of the `EventLoop`

For the most part, behaviour should be largely unchanged, however each
individual stream now has its own `set_timeout` callback loop, rather
than using one for processing all streams at once.

Many TODOs remain within the `emscripten` backend. These were left
untouched for the most part in favour of addressing this in a more
web-focused, future PR.
This commit is contained in:
mitchmindtree 2019-12-14 11:50:57 +01:00
parent 70dcf2390a
commit 37d80b98cf
2 changed files with 189 additions and 258 deletions

View File

@ -1,7 +1,6 @@
use std::mem; use std::mem;
use std::os::raw::c_void; use std::os::raw::c_void;
use std::slice::from_raw_parts; use std::slice::from_raw_parts;
use std::sync::Mutex;
use stdweb; use stdweb;
use stdweb::Reference; use stdweb::Reference;
use stdweb::unstable::TryInto; use stdweb::unstable::TryInto;
@ -17,25 +16,102 @@ use PauseStreamError;
use PlayStreamError; use PlayStreamError;
use SupportedFormatsError; use SupportedFormatsError;
use StreamData; use StreamData;
use StreamDataResult; use StreamError;
use SupportedFormat; use SupportedFormat;
use UnknownTypeOutputBuffer; use UnknownTypeOutputBuffer;
use traits::{DeviceTrait, EventLoopTrait, HostTrait, StreamIdTrait}; use traits::{DeviceTrait, HostTrait, StreamTrait};
// The emscripten backend currently works by instantiating an `AudioContext` object per `Stream`.
// Creating a stream creates a new `AudioContext`. Destroying a stream destroys it. Creation of a
// `Host` instance initializes the `stdweb` context.
/// The default emscripten host type. /// The default emscripten host type.
#[derive(Debug)] #[derive(Debug)]
pub struct Host; pub struct Host;
/// Content is false if the iterator is empty.
pub struct Devices(bool);
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Device;
pub struct Stream {
// A reference to an `AudioContext` object.
audio_ctxt_ref: Reference,
}
// Index within the `streams` array of the events loop.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct StreamId(usize);
pub type SupportedInputFormats = ::std::vec::IntoIter<SupportedFormat>;
pub type SupportedOutputFormats = ::std::vec::IntoIter<SupportedFormat>;
impl Host { impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> { pub fn new() -> Result<Self, crate::HostUnavailable> {
stdweb::initialize();
Ok(Host) Ok(Host)
} }
} }
impl Devices {
fn new() -> Result<Self, DevicesError> {
Ok(Self::default())
}
}
impl Device {
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Ok("Default Device".to_owned())
}
#[inline]
fn supported_input_formats(&self) -> Result<SupportedInputFormats, SupportedFormatsError> {
unimplemented!();
}
#[inline]
fn supported_output_formats(&self) -> Result<SupportedOutputFormats, SupportedFormatsError> {
// TODO: right now cpal's API doesn't allow flexibility here
// "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if
// this ever becomes more flexible, don't forget to change that
// According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer
// browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz.
//
// UPDATE: We can do this now. Might be best to use `crate::COMMON_SAMPLE_RATES` and
// filter out those that lay outside the range specified above.
Ok(
vec![
SupportedFormat {
channels: 2,
min_sample_rate: ::SampleRate(44100),
max_sample_rate: ::SampleRate(44100),
data_type: ::SampleFormat::F32,
},
].into_iter(),
)
}
fn default_input_format(&self) -> Result<Format, DefaultFormatError> {
unimplemented!();
}
fn default_output_format(&self) -> Result<Format, DefaultFormatError> {
// TODO: because it is hard coded, see supported_output_formats.
Ok(
Format {
channels: 2,
sample_rate: ::SampleRate(44100),
data_type: ::SampleFormat::F32,
},
)
}
}
impl HostTrait for Host { impl HostTrait for Host {
type Devices = Devices; type Devices = Devices;
type Device = Device; type Device = Device;
type EventLoop = EventLoop;
fn is_available() -> bool { fn is_available() -> bool {
// Assume this host is always available on emscripten. // Assume this host is always available on emscripten.
@ -53,15 +129,12 @@ impl HostTrait for Host {
fn default_output_device(&self) -> Option<Self::Device> { fn default_output_device(&self) -> Option<Self::Device> {
default_output_device() default_output_device()
} }
fn event_loop(&self) -> Self::EventLoop {
EventLoop::new()
}
} }
impl DeviceTrait for Device { impl DeviceTrait for Device {
type SupportedInputFormats = SupportedInputFormats; type SupportedInputFormats = SupportedInputFormats;
type SupportedOutputFormats = SupportedOutputFormats; type SupportedOutputFormats = SupportedOutputFormats;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> { fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self) Device::name(self)
@ -82,224 +155,124 @@ impl DeviceTrait for Device {
fn default_output_format(&self) -> Result<Format, DefaultFormatError> { fn default_output_format(&self) -> Result<Format, DefaultFormatError> {
Device::default_output_format(self) Device::default_output_format(self)
} }
}
impl EventLoopTrait for EventLoop { fn build_input_stream<D, E>(
type Device = Device;
type StreamId = StreamId;
fn build_input_stream(
&self, &self,
device: &Self::Device, _format: &Format,
format: &Format, _data_callback: D,
) -> Result<Self::StreamId, BuildStreamError> { _error_callback: E,
EventLoop::build_input_stream(self, device, format) ) -> Result<Self::Stream, BuildStreamError>
}
fn build_output_stream(
&self,
device: &Self::Device,
format: &Format,
) -> Result<Self::StreamId, BuildStreamError> {
EventLoop::build_output_stream(self, device, format)
}
fn play_stream(&self, stream: Self::StreamId) -> Result<(), PlayStreamError> {
EventLoop::play_stream(self, stream)
}
fn pause_stream(&self, stream: Self::StreamId) -> Result<(), PauseStreamError> {
EventLoop::pause_stream(self, stream)
}
fn destroy_stream(&self, stream: Self::StreamId) {
EventLoop::destroy_stream(self, stream)
}
fn run<F>(&self, callback: F) -> !
where where
F: FnMut(Self::StreamId, StreamDataResult) + Send, D: FnMut(StreamData) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{ {
EventLoop::run(self, callback) unimplemented!()
}
fn build_output_stream<D, E>(
&self,
_format: &Format,
data_callback: D,
error_callback: E,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(StreamData) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
// Create the stream.
let audio_ctxt_ref = js!(return new AudioContext()).into_reference().unwrap();
let stream = Stream { audio_ctxt_ref };
// Specify the callback.
let mut user_data = (self, data_callback, error_callback);
let user_data_ptr = &mut user_data as *mut (_, _, _);
// Use `set_timeout` to invoke a Rust callback repeatedly.
//
// The job of this callback is to fill the content of the audio buffers.
//
// See also: The call to `set_timeout` at the end of the `audio_callback_fn` which creates
// the loop.
set_timeout(|| audio_callback_fn::<D, E>(user_data_ptr as *mut c_void), 10);
Ok(stream)
} }
} }
impl StreamIdTrait for StreamId {} impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
// The emscripten backend works by having a global variable named `_cpal_audio_contexts`, which let audio_ctxt = &self.audio_ctxt_ref;
// is an array of `AudioContext` objects. A stream ID corresponds to an entry in this array. js!(@{audio_ctxt}.resume());
// Ok(())
// Creating a stream creates a new `AudioContext`. Destroying a stream destroys it.
// TODO: handle latency better ; right now we just use setInterval with the amount of sound data
// that is in each buffer ; this is obviously bad, and also the schedule is too tight and there may
// be underflows
pub struct EventLoop {
streams: Mutex<Vec<Option<Reference>>>,
}
impl EventLoop {
#[inline]
pub fn new() -> EventLoop {
stdweb::initialize();
EventLoop {
streams: Mutex::new(Vec::new()),
}
} }
#[inline] fn pause(&self) -> Result<(), PauseStreamError> {
fn run<F>(&self, callback: F) -> ! let audio_ctxt = &self.audio_ctxt_ref;
where F: FnMut(StreamId, StreamDataResult), js!(@{audio_ctxt}.suspend());
{ Ok(())
// The `run` function uses `set_timeout` to invoke a Rust callback repeatidely. The job }
// of this callback is to fill the content of the audio buffers. }
// The first argument of the callback function (a `void*`) is a casted pointer to `self` // The first argument of the callback function (a `void*`) is a casted pointer to `self`
// and to the `callback` parameter that was passed to `run`. // and to the `callback` parameter that was passed to `run`.
fn audio_callback_fn<D, E>(user_data_ptr: *mut c_void)
where
D: FnMut(StreamData) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
unsafe {
let user_data_ptr2 = user_data_ptr as *mut (&Stream, D, E);
let user_data = &mut *user_data_ptr2;
let (ref stream, ref mut data_cb, ref mut _err_cb) = user_data;
let audio_ctxt = &stream.audio_ctxt_ref;
// TODO: We should be re-using a buffer.
let mut temporary_buffer = vec![0.0; 44100 * 2 / 3];
fn callback_fn<F>(user_data_ptr: *mut c_void)
where F: FnMut(StreamId, StreamDataResult)
{ {
unsafe { let buffer = UnknownTypeOutputBuffer::F32(::OutputBuffer { buffer: &mut temporary_buffer });
let user_data_ptr2 = user_data_ptr as *mut (&EventLoop, F); let data = StreamData::Output { buffer: buffer };
let user_data = &mut *user_data_ptr2; data_cb(data);
let user_cb = &mut user_data.1;
let streams = user_data.0.streams.lock().unwrap().clone();
for (stream_id, stream) in streams.iter().enumerate() {
let stream = match stream.as_ref() {
Some(v) => v,
None => continue,
};
let mut temporary_buffer = vec![0.0; 44100 * 2 / 3];
{
let buffer = UnknownTypeOutputBuffer::F32(::OutputBuffer { buffer: &mut temporary_buffer });
let data = StreamData::Output { buffer: buffer };
user_cb(StreamId(stream_id), Ok(data));
// TODO: directly use a TypedArray<f32> once this is supported by stdweb
}
let typed_array = {
let f32_slice = temporary_buffer.as_slice();
let u8_slice: &[u8] = from_raw_parts(
f32_slice.as_ptr() as *const _,
f32_slice.len() * mem::size_of::<f32>(),
);
let typed_array: TypedArray<u8> = u8_slice.into();
typed_array
};
let num_channels = 2u32; // TODO: correct value
debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0);
js!(
var src_buffer = new Float32Array(@{typed_array}.buffer);
var context = @{stream};
var buf_len = @{temporary_buffer.len() as u32};
var num_channels = @{num_channels};
var buffer = context.createBuffer(num_channels, buf_len / num_channels, 44100);
for (var channel = 0; channel < num_channels; ++channel) {
var buffer_content = buffer.getChannelData(channel);
for (var i = 0; i < buf_len / num_channels; ++i) {
buffer_content[i] = src_buffer[i * num_channels + channel];
}
}
var node = context.createBufferSource();
node.buffer = buffer;
node.connect(context.destination);
node.start();
);
}
set_timeout(|| callback_fn::<F>(user_data_ptr), 330);
}
} }
let mut user_data = (self, callback); // TODO: directly use a TypedArray<f32> once this is supported by stdweb
let user_data_ptr = &mut user_data as *mut (_, _); let typed_array = {
let f32_slice = temporary_buffer.as_slice();
set_timeout(|| callback_fn::<F>(user_data_ptr as *mut _), 10); let u8_slice: &[u8] = from_raw_parts(
f32_slice.as_ptr() as *const _,
stdweb::event_loop(); f32_slice.len() * mem::size_of::<f32>(),
} );
let typed_array: TypedArray<u8> = u8_slice.into();
#[inline] typed_array
fn build_input_stream(&self, _: &Device, _format: &Format) -> Result<StreamId, BuildStreamError> {
unimplemented!();
}
#[inline]
fn build_output_stream(&self, _: &Device, _format: &Format) -> Result<StreamId, BuildStreamError> {
let stream = js!(return new AudioContext()).into_reference().unwrap();
let mut streams = self.streams.lock().unwrap();
let stream_id = if let Some(pos) = streams.iter().position(|v| v.is_none()) {
streams[pos] = Some(stream);
pos
} else {
let l = streams.len();
streams.push(Some(stream));
l
}; };
Ok(StreamId(stream_id)) let num_channels = 2u32; // TODO: correct value
} debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0);
#[inline] js!(
fn destroy_stream(&self, stream_id: StreamId) { var src_buffer = new Float32Array(@{typed_array}.buffer);
self.streams.lock().unwrap()[stream_id.0] = None; var context = @{audio_ctxt};
} var buf_len = @{temporary_buffer.len() as u32};
var num_channels = @{num_channels};
#[inline] var buffer = context.createBuffer(num_channels, buf_len / num_channels, 44100);
fn play_stream(&self, stream_id: StreamId) -> Result<(), PlayStreamError> { for (var channel = 0; channel < num_channels; ++channel) {
let streams = self.streams.lock().unwrap(); var buffer_content = buffer.getChannelData(channel);
let stream = streams for (var i = 0; i < buf_len / num_channels; ++i) {
.get(stream_id.0) buffer_content[i] = src_buffer[i * num_channels + channel];
.and_then(|v| v.as_ref()) }
.expect("invalid stream ID"); }
js!(@{stream}.resume());
Ok(())
}
#[inline] var node = context.createBufferSource();
fn pause_stream(&self, stream_id: StreamId) -> Result<(), PauseStreamError> { node.buffer = buffer;
let streams = self.streams.lock().unwrap(); node.connect(context.destination);
let stream = streams node.start();
.get(stream_id.0) );
.and_then(|v| v.as_ref())
.expect("invalid stream ID");
js!(@{stream}.suspend());
Ok(())
}
}
// Index within the `streams` array of the events loop. // TODO: handle latency better ; right now we just use setInterval with the amount of sound
#[derive(Debug, Clone, PartialEq, Eq, Hash)] // data that is in each buffer ; this is obviously bad, and also the schedule is too tight
pub struct StreamId(usize); // and there may be underflows
set_timeout(|| audio_callback_fn::<D, E>(user_data_ptr), 330);
// Detects whether the `AudioContext` global variable is available.
fn is_webaudio_available() -> bool {
stdweb::initialize();
js!(if (!AudioContext) {
return false;
} else {
return true;
}).try_into()
.unwrap()
}
// Content is false if the iterator is empty.
pub struct Devices(bool);
impl Devices {
fn new() -> Result<Self, DevicesError> {
Ok(Self::default())
} }
} }
@ -336,54 +309,13 @@ fn default_output_device() -> Option<Device> {
} }
} }
#[derive(Clone, Debug, PartialEq, Eq)] // Detects whether the `AudioContext` global variable is available.
pub struct Device; fn is_webaudio_available() -> bool {
stdweb::initialize();
impl Device { js!(if (!AudioContext) {
#[inline] return false;
fn name(&self) -> Result<String, DeviceNameError> { } else {
Ok("Default Device".to_owned()) return true;
} }).try_into()
.unwrap()
#[inline]
fn supported_input_formats(&self) -> Result<SupportedInputFormats, SupportedFormatsError> {
unimplemented!();
}
#[inline]
fn supported_output_formats(&self) -> Result<SupportedOutputFormats, SupportedFormatsError> {
// TODO: right now cpal's API doesn't allow flexibility here
// "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if
// this ever becomes more flexible, don't forget to change that
// According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer
// browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz.
Ok(
vec![
SupportedFormat {
channels: 2,
min_sample_rate: ::SampleRate(44100),
max_sample_rate: ::SampleRate(44100),
data_type: ::SampleFormat::F32,
},
].into_iter(),
)
}
fn default_input_format(&self) -> Result<Format, DefaultFormatError> {
unimplemented!();
}
fn default_output_format(&self) -> Result<Format, DefaultFormatError> {
// TODO: because it is hard coded, see supported_output_formats.
Ok(
Format {
channels: 2,
sample_rate: ::SampleRate(44100),
data_type: ::SampleFormat::F32,
},
)
}
} }
pub type SupportedInputFormats = ::std::vec::IntoIter<SupportedFormat>;
pub type SupportedOutputFormats = ::std::vec::IntoIter<SupportedFormat>;

View File

@ -435,9 +435,8 @@ mod platform_impl {
pub use crate::host::emscripten::{ pub use crate::host::emscripten::{
Device as EmscriptenDevice, Device as EmscriptenDevice,
Devices as EmscriptenDevices, Devices as EmscriptenDevices,
EventLoop as EmscriptenEventLoop,
Host as EmscriptenHost, Host as EmscriptenHost,
StreamId as EmscriptenStreamId, Stream as EmscriptenStream,
SupportedInputFormats as EmscriptenSupportedInputFormats, SupportedInputFormats as EmscriptenSupportedInputFormats,
SupportedOutputFormats as EmscriptenSupportedOutputFormats, SupportedOutputFormats as EmscriptenSupportedOutputFormats,
}; };