cpal/src/host/emscripten/mod.rs

334 lines
10 KiB
Rust
Raw Normal View History

use std::mem;
use std::os::raw::c_void;
use std::slice::from_raw_parts;
use stdweb;
use stdweb::unstable::TryInto;
use stdweb::web::set_timeout;
use stdweb::web::TypedArray;
use stdweb::Reference;
use crate::{
BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError,
InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleFormat,
StreamConfig, StreamError, SupportedStreamConfig, SupportedStreamConfigRange,
SupportedStreamConfigsError,
};
use traits::{DeviceTrait, HostTrait, StreamTrait};
// The emscripten backend currently works by instantiating an `AudioContext` object per `Stream`.
// Creating a stream creates a new `AudioContext`. Destroying a stream destroys it. Creation of a
// `Host` instance initializes the `stdweb` context.
/// The default emscripten host type.
#[derive(Debug)]
pub struct Host;
/// Content is false if the iterator is empty.
pub struct Devices(bool);
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Device;
pub struct Stream {
// A reference to an `AudioContext` object.
audio_ctxt_ref: Reference,
}
// Index within the `streams` array of the events loop.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct StreamId(usize);
pub type SupportedInputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
stdweb::initialize();
Ok(Host)
}
}
impl Devices {
fn new() -> Result<Self, DevicesError> {
Ok(Self::default())
}
}
impl Device {
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Ok("Default Device".to_owned())
}
#[inline]
fn supported_input_configs(
&self,
) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
unimplemented!();
}
#[inline]
fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
// TODO: right now cpal's API doesn't allow flexibility here
// "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if
// this ever becomes more flexible, don't forget to change that
// According to https://developer.mozilla.org/en-US/docs/Web/API/BaseAudioContext/createBuffer
// browsers must support 1 to 32 channels at leats and 8,000 Hz to 96,000 Hz.
//
// UPDATE: We can do this now. Might be best to use `crate::COMMON_SAMPLE_RATES` and
// filter out those that lay outside the range specified above.
Ok(vec![SupportedStreamConfigRange {
channels: 2,
min_sample_rate: ::SampleRate(44100),
max_sample_rate: ::SampleRate(44100),
sample_format: ::SampleFormat::F32,
}]
.into_iter())
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
unimplemented!();
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
// TODO: because it is hard coded, see supported_output_configs.
Ok(SupportedStreamConfig {
channels: 2,
sample_rate: ::SampleRate(44100),
sample_format: ::SampleFormat::F32,
})
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
// Assume this host is always available on emscripten.
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new()
}
fn default_input_device(&self) -> Option<Self::Device> {
default_input_device()
}
fn default_output_device(&self) -> Option<Self::Device> {
default_output_device()
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self)
}
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Device::supported_input_configs(self)
}
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Device::supported_output_configs(self)
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_input_config(self)
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_output_config(self)
}
2020-01-22 05:52:18 +00:00
fn build_input_stream_raw<D, E>(
&self,
_config: &StreamConfig,
_sample_format: SampleFormat,
_data_callback: D,
_error_callback: E,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
unimplemented!()
}
2020-01-22 05:52:18 +00:00
fn build_output_stream_raw<D, E>(
&self,
_config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
assert_eq!(
sample_format,
SampleFormat::F32,
"emscripten backend currently only supports `f32` data",
);
// Create the stream.
let audio_ctxt_ref = js!(return new AudioContext()).into_reference().unwrap();
let stream = Stream { audio_ctxt_ref };
// Specify the callback.
let mut user_data = (self, data_callback, error_callback);
let user_data_ptr = &mut user_data as *mut (_, _, _);
// Use `set_timeout` to invoke a Rust callback repeatedly.
//
// The job of this callback is to fill the content of the audio buffers.
//
// See also: The call to `set_timeout` at the end of the `audio_callback_fn` which creates
// the loop.
set_timeout(
|| audio_callback_fn::<D, E>(user_data_ptr as *mut c_void),
10,
);
Ok(stream)
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
let audio_ctxt = &self.audio_ctxt_ref;
js!(@{audio_ctxt}.resume());
Ok(())
}
fn pause(&self) -> Result<(), PauseStreamError> {
let audio_ctxt = &self.audio_ctxt_ref;
js!(@{audio_ctxt}.suspend());
Ok(())
}
}
// The first argument of the callback function (a `void*`) is a casted pointer to `self`
// and to the `callback` parameter that was passed to `run`.
fn audio_callback_fn<D, E>(user_data_ptr: *mut c_void)
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
unsafe {
let user_data_ptr2 = user_data_ptr as *mut (&Stream, D, E);
let user_data = &mut *user_data_ptr2;
let (ref stream, ref mut data_cb, ref mut _err_cb) = user_data;
let audio_ctxt = &stream.audio_ctxt_ref;
// TODO: We should be re-using a buffer.
let mut temporary_buffer = vec![0.0; 44100 * 2 / 3];
{
let len = temporary_buffer.len();
let data = temporary_buffer.as_mut_ptr() as *mut ();
let sample_format = SampleFormat::F32;
let mut data = Data::from_parts(data, len, sample_format);
let info = OutputCallbackInfo {};
data_cb(&mut data, &info);
}
// TODO: directly use a TypedArray<f32> once this is supported by stdweb
let typed_array = {
let f32_slice = temporary_buffer.as_slice();
let u8_slice: &[u8] = from_raw_parts(
f32_slice.as_ptr() as *const _,
f32_slice.len() * mem::size_of::<f32>(),
);
let typed_array: TypedArray<u8> = u8_slice.into();
typed_array
};
let num_channels = 2u32; // TODO: correct value
debug_assert_eq!(temporary_buffer.len() % num_channels as usize, 0);
js!(
var src_buffer = new Float32Array(@{typed_array}.buffer);
var context = @{audio_ctxt};
var buf_len = @{temporary_buffer.len() as u32};
var num_channels = @{num_channels};
var buffer = context.createBuffer(num_channels, buf_len / num_channels, 44100);
for (var channel = 0; channel < num_channels; ++channel) {
var buffer_content = buffer.getChannelData(channel);
for (var i = 0; i < buf_len / num_channels; ++i) {
buffer_content[i] = src_buffer[i * num_channels + channel];
}
}
var node = context.createBufferSource();
node.buffer = buffer;
node.connect(context.destination);
node.start();
);
// TODO: handle latency better ; right now we just use setInterval with the amount of sound
// data that is in each buffer ; this is obviously bad, and also the schedule is too tight
// and there may be underflows
set_timeout(|| audio_callback_fn::<D, E>(user_data_ptr), 330);
}
}
Update to a more general Device and Stream API. Add support for input streams (E.g. microphone). Add default format methods. (#201) * Update to a more general Device and Stream API This update prepares for adding input stream support by removing the `Endpoint` type (which only supports output streams) in favour of a more general `Device` type which may support any number of input or output streams. Previously discussed at #117. The name `Voice` has been replaced with the more ubiquitous name `Stream`. See #118 for justification. Also introduces a new `StreamData` which is now passed to the `EventLoop::run` callback rather than the `UnknownTypeBuffer`. `StreamData` allows for passing either `Input` data to be read, or `Output` data to be written. The `beep.rs` example has been updated for the API changes. None of the backends have been updated for this API change yet. Backends will be updated in the following commits. Closes #117. Closes #118. * Update ALSA backend for new `Device` and `Stream` API. * Update wasapi backend for new `Device` and `Stream` API. * Update enumerate.rs example for new `Device` and `Stream` API. * Update coreaudio backend for new `Device` and `Stream` API. * Fix lib doc tests for Device and Stream API update * Update emscripten backend for new `Device` and `Stream` API. * Update null backend for new `Device` and `Stream` API. * Merge match exprs in beep.rs example * Add Input variant along with UnknownTypeInputBuffer and InputBuffer UnknownTypeBuffer and Buffer have been renamed to UnknownTypeOutputBuffer and OutputBuffer respectively. No backends have yet been updated for this name change or the addition of the InputBuffer. * Update null backend for introduction of InputBuffer * Update emscripten backend for introduction of InputBuffer * Make InputBuffer inner field an option to call finish in drop * Update alsa backend for introduction of InputBuffer * Update wasapi backend for introduction of InputBuffer * Update coreaudio backend for introduction of InputBuffer * Update enumerate.rs example to provide more detail about devices The enumerate.rs example now also displays: - Supported input stream formats. - Default input stream format. - Default output stream format. This should also be useful for testing the progress of #201. * Add `record_wav.rs` example for demonstrating input streams Records a ~3 second WAV file to `$CARGO_MANIFEST_DIR/recorded.wav` using the default input device and default input format. Uses hound 3.0 to create and write to the WAV file. This should also be useful for testing the input stream implementations for each different cpal backend. * Implement input stream support for coreaudio backend This implements the following for the coreaudio backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream The `enumerate.rs` and `record_wav.rs` examples now work successfully on macos. * Add `SupportedFormat::cmp_default_heuristics` method This adds a comparison function which compares two `SupportedFormat`s in terms of their priority of use as a default stream format. Some backends (such as ALSA) do not provide a default stream format for their audio devices. In these cases, CPAL attempts to decide on a reasonable default format for the user. To do this we use the "greatest" of all supported stream formats when compared with this method. * Implement input stream support for ALSA backend This implements the following for the ALSA backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that ALSA itself does not give default stream formats for its devices. Thus the newly added `SupportedFormat::cmp_default_heuristics` method is used to determine the most suitable, supported stream format to use as the default. The `enumerate.rs` and `record_wav.rs` examples now work successfully on my linux machine. * Implement input stream support for wasapi backend This implements the following for the wasapi backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that wasapi does not enumerate supported input/output stream formats for its devices. Instead, we query the `IsFormatSupported` method for supported formats ourselves. * Fix some warnings in the alsa backend * Update CHANGELOG for introduction of input streams and related items * Update README to show latest features supported by CPAL * Simplify beep example using Device::default_output_format * Remove old commented code from wasapi/stream.rs
2018-02-12 13:10:24 +00:00
impl Default for Devices {
fn default() -> Devices {
// We produce an empty iterator if the WebAudio API isn't available.
Update to a more general Device and Stream API. Add support for input streams (E.g. microphone). Add default format methods. (#201) * Update to a more general Device and Stream API This update prepares for adding input stream support by removing the `Endpoint` type (which only supports output streams) in favour of a more general `Device` type which may support any number of input or output streams. Previously discussed at #117. The name `Voice` has been replaced with the more ubiquitous name `Stream`. See #118 for justification. Also introduces a new `StreamData` which is now passed to the `EventLoop::run` callback rather than the `UnknownTypeBuffer`. `StreamData` allows for passing either `Input` data to be read, or `Output` data to be written. The `beep.rs` example has been updated for the API changes. None of the backends have been updated for this API change yet. Backends will be updated in the following commits. Closes #117. Closes #118. * Update ALSA backend for new `Device` and `Stream` API. * Update wasapi backend for new `Device` and `Stream` API. * Update enumerate.rs example for new `Device` and `Stream` API. * Update coreaudio backend for new `Device` and `Stream` API. * Fix lib doc tests for Device and Stream API update * Update emscripten backend for new `Device` and `Stream` API. * Update null backend for new `Device` and `Stream` API. * Merge match exprs in beep.rs example * Add Input variant along with UnknownTypeInputBuffer and InputBuffer UnknownTypeBuffer and Buffer have been renamed to UnknownTypeOutputBuffer and OutputBuffer respectively. No backends have yet been updated for this name change or the addition of the InputBuffer. * Update null backend for introduction of InputBuffer * Update emscripten backend for introduction of InputBuffer * Make InputBuffer inner field an option to call finish in drop * Update alsa backend for introduction of InputBuffer * Update wasapi backend for introduction of InputBuffer * Update coreaudio backend for introduction of InputBuffer * Update enumerate.rs example to provide more detail about devices The enumerate.rs example now also displays: - Supported input stream formats. - Default input stream format. - Default output stream format. This should also be useful for testing the progress of #201. * Add `record_wav.rs` example for demonstrating input streams Records a ~3 second WAV file to `$CARGO_MANIFEST_DIR/recorded.wav` using the default input device and default input format. Uses hound 3.0 to create and write to the WAV file. This should also be useful for testing the input stream implementations for each different cpal backend. * Implement input stream support for coreaudio backend This implements the following for the coreaudio backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream The `enumerate.rs` and `record_wav.rs` examples now work successfully on macos. * Add `SupportedFormat::cmp_default_heuristics` method This adds a comparison function which compares two `SupportedFormat`s in terms of their priority of use as a default stream format. Some backends (such as ALSA) do not provide a default stream format for their audio devices. In these cases, CPAL attempts to decide on a reasonable default format for the user. To do this we use the "greatest" of all supported stream formats when compared with this method. * Implement input stream support for ALSA backend This implements the following for the ALSA backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that ALSA itself does not give default stream formats for its devices. Thus the newly added `SupportedFormat::cmp_default_heuristics` method is used to determine the most suitable, supported stream format to use as the default. The `enumerate.rs` and `record_wav.rs` examples now work successfully on my linux machine. * Implement input stream support for wasapi backend This implements the following for the wasapi backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that wasapi does not enumerate supported input/output stream formats for its devices. Instead, we query the `IsFormatSupported` method for supported formats ourselves. * Fix some warnings in the alsa backend * Update CHANGELOG for introduction of input streams and related items * Update README to show latest features supported by CPAL * Simplify beep example using Device::default_output_format * Remove old commented code from wasapi/stream.rs
2018-02-12 13:10:24 +00:00
Devices(is_webaudio_available())
}
}
Update to a more general Device and Stream API. Add support for input streams (E.g. microphone). Add default format methods. (#201) * Update to a more general Device and Stream API This update prepares for adding input stream support by removing the `Endpoint` type (which only supports output streams) in favour of a more general `Device` type which may support any number of input or output streams. Previously discussed at #117. The name `Voice` has been replaced with the more ubiquitous name `Stream`. See #118 for justification. Also introduces a new `StreamData` which is now passed to the `EventLoop::run` callback rather than the `UnknownTypeBuffer`. `StreamData` allows for passing either `Input` data to be read, or `Output` data to be written. The `beep.rs` example has been updated for the API changes. None of the backends have been updated for this API change yet. Backends will be updated in the following commits. Closes #117. Closes #118. * Update ALSA backend for new `Device` and `Stream` API. * Update wasapi backend for new `Device` and `Stream` API. * Update enumerate.rs example for new `Device` and `Stream` API. * Update coreaudio backend for new `Device` and `Stream` API. * Fix lib doc tests for Device and Stream API update * Update emscripten backend for new `Device` and `Stream` API. * Update null backend for new `Device` and `Stream` API. * Merge match exprs in beep.rs example * Add Input variant along with UnknownTypeInputBuffer and InputBuffer UnknownTypeBuffer and Buffer have been renamed to UnknownTypeOutputBuffer and OutputBuffer respectively. No backends have yet been updated for this name change or the addition of the InputBuffer. * Update null backend for introduction of InputBuffer * Update emscripten backend for introduction of InputBuffer * Make InputBuffer inner field an option to call finish in drop * Update alsa backend for introduction of InputBuffer * Update wasapi backend for introduction of InputBuffer * Update coreaudio backend for introduction of InputBuffer * Update enumerate.rs example to provide more detail about devices The enumerate.rs example now also displays: - Supported input stream formats. - Default input stream format. - Default output stream format. This should also be useful for testing the progress of #201. * Add `record_wav.rs` example for demonstrating input streams Records a ~3 second WAV file to `$CARGO_MANIFEST_DIR/recorded.wav` using the default input device and default input format. Uses hound 3.0 to create and write to the WAV file. This should also be useful for testing the input stream implementations for each different cpal backend. * Implement input stream support for coreaudio backend This implements the following for the coreaudio backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream The `enumerate.rs` and `record_wav.rs` examples now work successfully on macos. * Add `SupportedFormat::cmp_default_heuristics` method This adds a comparison function which compares two `SupportedFormat`s in terms of their priority of use as a default stream format. Some backends (such as ALSA) do not provide a default stream format for their audio devices. In these cases, CPAL attempts to decide on a reasonable default format for the user. To do this we use the "greatest" of all supported stream formats when compared with this method. * Implement input stream support for ALSA backend This implements the following for the ALSA backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that ALSA itself does not give default stream formats for its devices. Thus the newly added `SupportedFormat::cmp_default_heuristics` method is used to determine the most suitable, supported stream format to use as the default. The `enumerate.rs` and `record_wav.rs` examples now work successfully on my linux machine. * Implement input stream support for wasapi backend This implements the following for the wasapi backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that wasapi does not enumerate supported input/output stream formats for its devices. Instead, we query the `IsFormatSupported` method for supported formats ourselves. * Fix some warnings in the alsa backend * Update CHANGELOG for introduction of input streams and related items * Update README to show latest features supported by CPAL * Simplify beep example using Device::default_output_format * Remove old commented code from wasapi/stream.rs
2018-02-12 13:10:24 +00:00
impl Iterator for Devices {
type Item = Device;
#[inline]
Update to a more general Device and Stream API. Add support for input streams (E.g. microphone). Add default format methods. (#201) * Update to a more general Device and Stream API This update prepares for adding input stream support by removing the `Endpoint` type (which only supports output streams) in favour of a more general `Device` type which may support any number of input or output streams. Previously discussed at #117. The name `Voice` has been replaced with the more ubiquitous name `Stream`. See #118 for justification. Also introduces a new `StreamData` which is now passed to the `EventLoop::run` callback rather than the `UnknownTypeBuffer`. `StreamData` allows for passing either `Input` data to be read, or `Output` data to be written. The `beep.rs` example has been updated for the API changes. None of the backends have been updated for this API change yet. Backends will be updated in the following commits. Closes #117. Closes #118. * Update ALSA backend for new `Device` and `Stream` API. * Update wasapi backend for new `Device` and `Stream` API. * Update enumerate.rs example for new `Device` and `Stream` API. * Update coreaudio backend for new `Device` and `Stream` API. * Fix lib doc tests for Device and Stream API update * Update emscripten backend for new `Device` and `Stream` API. * Update null backend for new `Device` and `Stream` API. * Merge match exprs in beep.rs example * Add Input variant along with UnknownTypeInputBuffer and InputBuffer UnknownTypeBuffer and Buffer have been renamed to UnknownTypeOutputBuffer and OutputBuffer respectively. No backends have yet been updated for this name change or the addition of the InputBuffer. * Update null backend for introduction of InputBuffer * Update emscripten backend for introduction of InputBuffer * Make InputBuffer inner field an option to call finish in drop * Update alsa backend for introduction of InputBuffer * Update wasapi backend for introduction of InputBuffer * Update coreaudio backend for introduction of InputBuffer * Update enumerate.rs example to provide more detail about devices The enumerate.rs example now also displays: - Supported input stream formats. - Default input stream format. - Default output stream format. This should also be useful for testing the progress of #201. * Add `record_wav.rs` example for demonstrating input streams Records a ~3 second WAV file to `$CARGO_MANIFEST_DIR/recorded.wav` using the default input device and default input format. Uses hound 3.0 to create and write to the WAV file. This should also be useful for testing the input stream implementations for each different cpal backend. * Implement input stream support for coreaudio backend This implements the following for the coreaudio backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream The `enumerate.rs` and `record_wav.rs` examples now work successfully on macos. * Add `SupportedFormat::cmp_default_heuristics` method This adds a comparison function which compares two `SupportedFormat`s in terms of their priority of use as a default stream format. Some backends (such as ALSA) do not provide a default stream format for their audio devices. In these cases, CPAL attempts to decide on a reasonable default format for the user. To do this we use the "greatest" of all supported stream formats when compared with this method. * Implement input stream support for ALSA backend This implements the following for the ALSA backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that ALSA itself does not give default stream formats for its devices. Thus the newly added `SupportedFormat::cmp_default_heuristics` method is used to determine the most suitable, supported stream format to use as the default. The `enumerate.rs` and `record_wav.rs` examples now work successfully on my linux machine. * Implement input stream support for wasapi backend This implements the following for the wasapi backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that wasapi does not enumerate supported input/output stream formats for its devices. Instead, we query the `IsFormatSupported` method for supported formats ourselves. * Fix some warnings in the alsa backend * Update CHANGELOG for introduction of input streams and related items * Update README to show latest features supported by CPAL * Simplify beep example using Device::default_output_format * Remove old commented code from wasapi/stream.rs
2018-02-12 13:10:24 +00:00
fn next(&mut self) -> Option<Device> {
if self.0 {
self.0 = false;
Update to a more general Device and Stream API. Add support for input streams (E.g. microphone). Add default format methods. (#201) * Update to a more general Device and Stream API This update prepares for adding input stream support by removing the `Endpoint` type (which only supports output streams) in favour of a more general `Device` type which may support any number of input or output streams. Previously discussed at #117. The name `Voice` has been replaced with the more ubiquitous name `Stream`. See #118 for justification. Also introduces a new `StreamData` which is now passed to the `EventLoop::run` callback rather than the `UnknownTypeBuffer`. `StreamData` allows for passing either `Input` data to be read, or `Output` data to be written. The `beep.rs` example has been updated for the API changes. None of the backends have been updated for this API change yet. Backends will be updated in the following commits. Closes #117. Closes #118. * Update ALSA backend for new `Device` and `Stream` API. * Update wasapi backend for new `Device` and `Stream` API. * Update enumerate.rs example for new `Device` and `Stream` API. * Update coreaudio backend for new `Device` and `Stream` API. * Fix lib doc tests for Device and Stream API update * Update emscripten backend for new `Device` and `Stream` API. * Update null backend for new `Device` and `Stream` API. * Merge match exprs in beep.rs example * Add Input variant along with UnknownTypeInputBuffer and InputBuffer UnknownTypeBuffer and Buffer have been renamed to UnknownTypeOutputBuffer and OutputBuffer respectively. No backends have yet been updated for this name change or the addition of the InputBuffer. * Update null backend for introduction of InputBuffer * Update emscripten backend for introduction of InputBuffer * Make InputBuffer inner field an option to call finish in drop * Update alsa backend for introduction of InputBuffer * Update wasapi backend for introduction of InputBuffer * Update coreaudio backend for introduction of InputBuffer * Update enumerate.rs example to provide more detail about devices The enumerate.rs example now also displays: - Supported input stream formats. - Default input stream format. - Default output stream format. This should also be useful for testing the progress of #201. * Add `record_wav.rs` example for demonstrating input streams Records a ~3 second WAV file to `$CARGO_MANIFEST_DIR/recorded.wav` using the default input device and default input format. Uses hound 3.0 to create and write to the WAV file. This should also be useful for testing the input stream implementations for each different cpal backend. * Implement input stream support for coreaudio backend This implements the following for the coreaudio backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream The `enumerate.rs` and `record_wav.rs` examples now work successfully on macos. * Add `SupportedFormat::cmp_default_heuristics` method This adds a comparison function which compares two `SupportedFormat`s in terms of their priority of use as a default stream format. Some backends (such as ALSA) do not provide a default stream format for their audio devices. In these cases, CPAL attempts to decide on a reasonable default format for the user. To do this we use the "greatest" of all supported stream formats when compared with this method. * Implement input stream support for ALSA backend This implements the following for the ALSA backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that ALSA itself does not give default stream formats for its devices. Thus the newly added `SupportedFormat::cmp_default_heuristics` method is used to determine the most suitable, supported stream format to use as the default. The `enumerate.rs` and `record_wav.rs` examples now work successfully on my linux machine. * Implement input stream support for wasapi backend This implements the following for the wasapi backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that wasapi does not enumerate supported input/output stream formats for its devices. Instead, we query the `IsFormatSupported` method for supported formats ourselves. * Fix some warnings in the alsa backend * Update CHANGELOG for introduction of input streams and related items * Update README to show latest features supported by CPAL * Simplify beep example using Device::default_output_format * Remove old commented code from wasapi/stream.rs
2018-02-12 13:10:24 +00:00
Some(Device)
} else {
None
}
}
}
#[inline]
fn default_input_device() -> Option<Device> {
Update to a more general Device and Stream API. Add support for input streams (E.g. microphone). Add default format methods. (#201) * Update to a more general Device and Stream API This update prepares for adding input stream support by removing the `Endpoint` type (which only supports output streams) in favour of a more general `Device` type which may support any number of input or output streams. Previously discussed at #117. The name `Voice` has been replaced with the more ubiquitous name `Stream`. See #118 for justification. Also introduces a new `StreamData` which is now passed to the `EventLoop::run` callback rather than the `UnknownTypeBuffer`. `StreamData` allows for passing either `Input` data to be read, or `Output` data to be written. The `beep.rs` example has been updated for the API changes. None of the backends have been updated for this API change yet. Backends will be updated in the following commits. Closes #117. Closes #118. * Update ALSA backend for new `Device` and `Stream` API. * Update wasapi backend for new `Device` and `Stream` API. * Update enumerate.rs example for new `Device` and `Stream` API. * Update coreaudio backend for new `Device` and `Stream` API. * Fix lib doc tests for Device and Stream API update * Update emscripten backend for new `Device` and `Stream` API. * Update null backend for new `Device` and `Stream` API. * Merge match exprs in beep.rs example * Add Input variant along with UnknownTypeInputBuffer and InputBuffer UnknownTypeBuffer and Buffer have been renamed to UnknownTypeOutputBuffer and OutputBuffer respectively. No backends have yet been updated for this name change or the addition of the InputBuffer. * Update null backend for introduction of InputBuffer * Update emscripten backend for introduction of InputBuffer * Make InputBuffer inner field an option to call finish in drop * Update alsa backend for introduction of InputBuffer * Update wasapi backend for introduction of InputBuffer * Update coreaudio backend for introduction of InputBuffer * Update enumerate.rs example to provide more detail about devices The enumerate.rs example now also displays: - Supported input stream formats. - Default input stream format. - Default output stream format. This should also be useful for testing the progress of #201. * Add `record_wav.rs` example for demonstrating input streams Records a ~3 second WAV file to `$CARGO_MANIFEST_DIR/recorded.wav` using the default input device and default input format. Uses hound 3.0 to create and write to the WAV file. This should also be useful for testing the input stream implementations for each different cpal backend. * Implement input stream support for coreaudio backend This implements the following for the coreaudio backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream The `enumerate.rs` and `record_wav.rs` examples now work successfully on macos. * Add `SupportedFormat::cmp_default_heuristics` method This adds a comparison function which compares two `SupportedFormat`s in terms of their priority of use as a default stream format. Some backends (such as ALSA) do not provide a default stream format for their audio devices. In these cases, CPAL attempts to decide on a reasonable default format for the user. To do this we use the "greatest" of all supported stream formats when compared with this method. * Implement input stream support for ALSA backend This implements the following for the ALSA backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that ALSA itself does not give default stream formats for its devices. Thus the newly added `SupportedFormat::cmp_default_heuristics` method is used to determine the most suitable, supported stream format to use as the default. The `enumerate.rs` and `record_wav.rs` examples now work successfully on my linux machine. * Implement input stream support for wasapi backend This implements the following for the wasapi backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that wasapi does not enumerate supported input/output stream formats for its devices. Instead, we query the `IsFormatSupported` method for supported formats ourselves. * Fix some warnings in the alsa backend * Update CHANGELOG for introduction of input streams and related items * Update README to show latest features supported by CPAL * Simplify beep example using Device::default_output_format * Remove old commented code from wasapi/stream.rs
2018-02-12 13:10:24 +00:00
unimplemented!();
}
#[inline]
fn default_output_device() -> Option<Device> {
if is_webaudio_available() {
Update to a more general Device and Stream API. Add support for input streams (E.g. microphone). Add default format methods. (#201) * Update to a more general Device and Stream API This update prepares for adding input stream support by removing the `Endpoint` type (which only supports output streams) in favour of a more general `Device` type which may support any number of input or output streams. Previously discussed at #117. The name `Voice` has been replaced with the more ubiquitous name `Stream`. See #118 for justification. Also introduces a new `StreamData` which is now passed to the `EventLoop::run` callback rather than the `UnknownTypeBuffer`. `StreamData` allows for passing either `Input` data to be read, or `Output` data to be written. The `beep.rs` example has been updated for the API changes. None of the backends have been updated for this API change yet. Backends will be updated in the following commits. Closes #117. Closes #118. * Update ALSA backend for new `Device` and `Stream` API. * Update wasapi backend for new `Device` and `Stream` API. * Update enumerate.rs example for new `Device` and `Stream` API. * Update coreaudio backend for new `Device` and `Stream` API. * Fix lib doc tests for Device and Stream API update * Update emscripten backend for new `Device` and `Stream` API. * Update null backend for new `Device` and `Stream` API. * Merge match exprs in beep.rs example * Add Input variant along with UnknownTypeInputBuffer and InputBuffer UnknownTypeBuffer and Buffer have been renamed to UnknownTypeOutputBuffer and OutputBuffer respectively. No backends have yet been updated for this name change or the addition of the InputBuffer. * Update null backend for introduction of InputBuffer * Update emscripten backend for introduction of InputBuffer * Make InputBuffer inner field an option to call finish in drop * Update alsa backend for introduction of InputBuffer * Update wasapi backend for introduction of InputBuffer * Update coreaudio backend for introduction of InputBuffer * Update enumerate.rs example to provide more detail about devices The enumerate.rs example now also displays: - Supported input stream formats. - Default input stream format. - Default output stream format. This should also be useful for testing the progress of #201. * Add `record_wav.rs` example for demonstrating input streams Records a ~3 second WAV file to `$CARGO_MANIFEST_DIR/recorded.wav` using the default input device and default input format. Uses hound 3.0 to create and write to the WAV file. This should also be useful for testing the input stream implementations for each different cpal backend. * Implement input stream support for coreaudio backend This implements the following for the coreaudio backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream The `enumerate.rs` and `record_wav.rs` examples now work successfully on macos. * Add `SupportedFormat::cmp_default_heuristics` method This adds a comparison function which compares two `SupportedFormat`s in terms of their priority of use as a default stream format. Some backends (such as ALSA) do not provide a default stream format for their audio devices. In these cases, CPAL attempts to decide on a reasonable default format for the user. To do this we use the "greatest" of all supported stream formats when compared with this method. * Implement input stream support for ALSA backend This implements the following for the ALSA backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that ALSA itself does not give default stream formats for its devices. Thus the newly added `SupportedFormat::cmp_default_heuristics` method is used to determine the most suitable, supported stream format to use as the default. The `enumerate.rs` and `record_wav.rs` examples now work successfully on my linux machine. * Implement input stream support for wasapi backend This implements the following for the wasapi backend: - Device::supported_input_formats - Device::default_input_format - Device::default_output_format - EventLoop::build_input_stream Note that wasapi does not enumerate supported input/output stream formats for its devices. Instead, we query the `IsFormatSupported` method for supported formats ourselves. * Fix some warnings in the alsa backend * Update CHANGELOG for introduction of input streams and related items * Update README to show latest features supported by CPAL * Simplify beep example using Device::default_output_format * Remove old commented code from wasapi/stream.rs
2018-02-12 13:10:24 +00:00
Some(Device)
} else {
None
}
}
// Detects whether the `AudioContext` global variable is available.
fn is_webaudio_available() -> bool {
stdweb::initialize();
js!(if (!AudioContext) {
return false;
} else {
return true;
})
.try_into()
.unwrap()
}