Docs and style improvements (#174)

* Improve the crate root documentation

* Add entry in CHANGELOG

* Run rustfmt on the code

* More improvements to documentation
This commit is contained in:
tomaka 2017-10-23 16:41:38 +02:00 committed by GitHub
parent c524f63000
commit 91adc3e380
10 changed files with 271 additions and 145 deletions

View File

@ -1,6 +1,7 @@
# Unreleased # Unreleased
- Changed the emscripten backend to consume less CPU. - Changed the emscripten backend to consume less CPU.
- Added improvements to the crate documentation.
# Version 0.5.1 (2017-10-21) # Version 0.5.1 (2017-10-21)

View File

@ -1,7 +1,7 @@
use super::Endpoint; use super::Endpoint;
use super::alsa; use super::alsa;
use super::libc;
use super::check_errors; use super::check_errors;
use super::libc;
use std::ffi::CStr; use std::ffi::CStr;
use std::ffi::CString; use std::ffi::CString;

View File

@ -67,9 +67,7 @@ impl Drop for Trigger {
pub struct Endpoint(String); pub struct Endpoint(String);
impl Endpoint { impl Endpoint {
pub fn supported_formats( pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
unsafe { unsafe {
let mut playback_handle = mem::uninitialized(); let mut playback_handle = mem::uninitialized();
let device_name = ffi::CString::new(self.0.clone()).expect("Unable to get device name"); let device_name = ffi::CString::new(self.0.clone()).expect("Unable to get device name");
@ -105,8 +103,7 @@ impl Endpoint {
SND_PCM_FORMAT_S32_BE, SND_PCM_FORMAT_S32_BE,
SND_PCM_FORMAT_U32_LE, SND_PCM_FORMAT_U32_LE,
SND_PCM_FORMAT_U32_BE,*/ SND_PCM_FORMAT_U32_BE,*/
(SampleFormat::F32, alsa::SND_PCM_FORMAT_FLOAT_LE) (SampleFormat::F32, alsa::SND_PCM_FORMAT_FLOAT_LE) /*SND_PCM_FORMAT_FLOAT_BE,
/*SND_PCM_FORMAT_FLOAT_BE,
SND_PCM_FORMAT_FLOAT64_LE, SND_PCM_FORMAT_FLOAT64_LE,
SND_PCM_FORMAT_FLOAT64_BE, SND_PCM_FORMAT_FLOAT64_BE,
SND_PCM_FORMAT_IEC958_SUBFRAME_LE, SND_PCM_FORMAT_IEC958_SUBFRAME_LE,
@ -154,7 +151,11 @@ impl Endpoint {
let samples_rates = if min_rate == max_rate { let samples_rates = if min_rate == max_rate {
vec![(min_rate, max_rate)] vec![(min_rate, max_rate)]
} else if alsa::snd_pcm_hw_params_test_rate(playback_handle, hw_params.0, min_rate + 1, 0) == 0 { } else if alsa::snd_pcm_hw_params_test_rate(playback_handle,
hw_params.0,
min_rate + 1,
0) == 0
{
vec![(min_rate, max_rate)] vec![(min_rate, max_rate)]
} else { } else {
const RATES: [libc::c_uint; 13] = [ const RATES: [libc::c_uint; 13] = [
@ -252,7 +253,7 @@ impl Endpoint {
pub struct EventLoop { pub struct EventLoop {
// Each newly-created voice gets a new ID from this counter. The counter is then incremented. // Each newly-created voice gets a new ID from this counter. The counter is then incremented.
next_voice_id: AtomicUsize, // TODO: use AtomicU64 when stable? next_voice_id: AtomicUsize, // TODO: use AtomicU64 when stable?
// A trigger that uses a `pipe()` as backend. Signalled whenever a new command is ready, so // A trigger that uses a `pipe()` as backend. Signalled whenever a new command is ready, so
// that `poll()` can wake up and pick the changes. // that `poll()` can wake up and pick the changes.
@ -325,9 +326,9 @@ impl EventLoop {
let pending_trigger = Trigger::new(); let pending_trigger = Trigger::new();
let run_context = Mutex::new(RunContext { let run_context = Mutex::new(RunContext {
descriptors: Vec::new(), // TODO: clearify in doc initial value not necessary descriptors: Vec::new(), // TODO: clearify in doc initial value not necessary
voices: Vec::new(), voices: Vec::new(),
}); });
EventLoop { EventLoop {
next_voice_id: AtomicUsize::new(0), next_voice_id: AtomicUsize::new(0),
@ -369,13 +370,14 @@ impl EventLoop {
fd: self.pending_trigger.read_fd(), fd: self.pending_trigger.read_fd(),
events: libc::POLLIN, events: libc::POLLIN,
revents: 0, revents: 0,
} },
]; ];
for voice in run_context.voices.iter() { for voice in run_context.voices.iter() {
run_context.descriptors.reserve(voice.num_descriptors); run_context.descriptors.reserve(voice.num_descriptors);
let len = run_context.descriptors.len(); let len = run_context.descriptors.len();
let filled = alsa::snd_pcm_poll_descriptors(voice.channel, let filled = alsa::snd_pcm_poll_descriptors(voice.channel,
run_context.descriptors run_context
.descriptors
.as_mut_ptr() .as_mut_ptr()
.offset(len as isize), .offset(len as isize),
voice.num_descriptors as voice.num_descriptors as
@ -413,9 +415,13 @@ impl EventLoop {
{ {
let num_descriptors = voice_inner.num_descriptors as libc::c_uint; let num_descriptors = voice_inner.num_descriptors as libc::c_uint;
check_errors(alsa::snd_pcm_poll_descriptors_revents(voice_inner.channel, run_context.descriptors let desc_ptr =
.as_mut_ptr().offset(i_descriptor), run_context.descriptors.as_mut_ptr().offset(i_descriptor);
num_descriptors, &mut revent)).unwrap(); let res = alsa::snd_pcm_poll_descriptors_revents(voice_inner.channel,
desc_ptr,
num_descriptors,
&mut revent);
check_errors(res).unwrap();
} }
if (revent as libc::c_short & libc::POLLOUT) == 0 { if (revent as libc::c_short & libc::POLLOUT) == 0 {
@ -433,10 +439,12 @@ impl EventLoop {
// buffer underrun // buffer underrun
voice_inner.buffer_len voice_inner.buffer_len
} else if available < 0 { } else if available < 0 {
check_errors(available as libc::c_int).expect("buffer is not available"); check_errors(available as libc::c_int)
.expect("buffer is not available");
unreachable!() unreachable!()
} else { } else {
(available * voice_inner.num_channels as alsa::snd_pcm_sframes_t) as usize (available * voice_inner.num_channels as alsa::snd_pcm_sframes_t) as
usize
} }
}; };
@ -473,9 +481,8 @@ impl EventLoop {
SampleFormat::F32 => { SampleFormat::F32 => {
let buffer = Buffer { let buffer = Buffer {
voice_inner: voice_inner, voice_inner: voice_inner,
buffer: iter::repeat(0.0) // we don't use mem::uninitialized in case of sNaN // Note that we don't use `mem::uninitialized` because of sNaN.
.take(available) buffer: iter::repeat(0.0).take(available).collect(),
.collect(),
}; };
UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }) UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) })
@ -580,7 +587,7 @@ impl EventLoop {
}; };
let new_voice_id = VoiceId(self.next_voice_id.fetch_add(1, Ordering::Relaxed)); let new_voice_id = VoiceId(self.next_voice_id.fetch_add(1, Ordering::Relaxed));
assert_ne!(new_voice_id.0, usize::max_value()); // check for overflows assert_ne!(new_voice_id.0, usize::max_value()); // check for overflows
let voice_inner = VoiceInner { let voice_inner = VoiceInner {
id: new_voice_id.clone(), id: new_voice_id.clone(),
@ -594,7 +601,10 @@ impl EventLoop {
resume_trigger: Trigger::new(), resume_trigger: Trigger::new(),
}; };
self.commands.lock().unwrap().push(Command::NewVoice(voice_inner)); self.commands
.lock()
.unwrap()
.push(Command::NewVoice(voice_inner));
self.pending_trigger.wakeup(); self.pending_trigger.wakeup();
Ok(new_voice_id) Ok(new_voice_id)
} }
@ -602,7 +612,10 @@ impl EventLoop {
#[inline] #[inline]
pub fn destroy_voice(&self, voice_id: VoiceId) { pub fn destroy_voice(&self, voice_id: VoiceId) {
self.commands.lock().unwrap().push(Command::DestroyVoice(voice_id)); self.commands
.lock()
.unwrap()
.push(Command::DestroyVoice(voice_id));
self.pending_trigger.wakeup(); self.pending_trigger.wakeup();
} }
@ -670,8 +683,9 @@ impl<'a, T> Buffer<'a, T> {
unsafe { unsafe {
loop { loop {
let result = let result = alsa::snd_pcm_writei(self.voice_inner.channel,
alsa::snd_pcm_writei(self.voice_inner.channel, self.buffer.as_ptr() as *const _, to_write); self.buffer.as_ptr() as *const _,
to_write);
if result == -32 { if result == -32 {
// buffer underrun // buffer underrun

View File

@ -26,9 +26,7 @@ pub use self::enumerate::{EndpointsIterator, SupportedFormatsIterator, default_e
pub struct Endpoint; pub struct Endpoint;
impl Endpoint { impl Endpoint {
pub fn supported_formats( pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
Ok( Ok(
vec![ vec![
SupportedFormat { SupportedFormat {
@ -70,9 +68,7 @@ impl EventLoop {
#[inline] #[inline]
pub fn new() -> EventLoop { pub fn new() -> EventLoop {
EventLoop { EventLoop {
active_callbacks: Arc::new(ActiveCallbacks { active_callbacks: Arc::new(ActiveCallbacks { callbacks: Mutex::new(Vec::new()) }),
callbacks: Mutex::new(Vec::new()),
}),
voices: Mutex::new(Vec::new()), voices: Mutex::new(Vec::new()),
} }
} }
@ -82,7 +78,11 @@ impl EventLoop {
where F: FnMut(VoiceId, UnknownTypeBuffer) where F: FnMut(VoiceId, UnknownTypeBuffer)
{ {
let callback: &mut FnMut(VoiceId, UnknownTypeBuffer) = &mut callback; let callback: &mut FnMut(VoiceId, UnknownTypeBuffer) = &mut callback;
self.active_callbacks.callbacks.lock().unwrap().push(unsafe { mem::transmute(callback) }); self.active_callbacks
.callbacks
.lock()
.unwrap()
.push(unsafe { mem::transmute(callback) });
loop { loop {
// So the loop does not get optimised out in --release // So the loop does not get optimised out in --release
@ -95,8 +95,7 @@ impl EventLoop {
#[inline] #[inline]
pub fn build_voice(&self, endpoint: &Endpoint, format: &Format) pub fn build_voice(&self, endpoint: &Endpoint, format: &Format)
-> Result<VoiceId, CreationError> -> Result<VoiceId, CreationError> {
{
fn convert_error(err: coreaudio::Error) -> CreationError { fn convert_error(err: coreaudio::Error) -> CreationError {
match err { match err {
coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat | coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat |
@ -110,9 +109,9 @@ impl EventLoop {
let mut audio_unit = { let mut audio_unit = {
let au_type = if cfg!(target_os = "ios") { let au_type = if cfg!(target_os = "ios") {
// The DefaultOutput unit isn't available in iOS unfortunately. RemoteIO is a sensible replacement. // The DefaultOutput unit isn't available in iOS unfortunately.
// See // RemoteIO is a sensible replacement.
// https://developer.apple.com/library/content/documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/UsingSpecificAudioUnits/UsingSpecificAudioUnits.html // See https://goo.gl/CWwRTx
coreaudio::audio_unit::IOType::RemoteIO coreaudio::audio_unit::IOType::RemoteIO
} else { } else {
coreaudio::audio_unit::IOType::DefaultOutput coreaudio::audio_unit::IOType::DefaultOutput
@ -123,7 +122,10 @@ impl EventLoop {
// Determine the future ID of the voice. // Determine the future ID of the voice.
let mut voices_lock = self.voices.lock().unwrap(); let mut voices_lock = self.voices.lock().unwrap();
let voice_id = voices_lock.iter().position(|n| n.is_none()).unwrap_or(voices_lock.len()); let voice_id = voices_lock
.iter()
.position(|n| n.is_none())
.unwrap_or(voices_lock.len());
// TODO: iOS uses integer and fixed-point data // TODO: iOS uses integer and fixed-point data

View File

@ -5,8 +5,8 @@ use std::sync::Mutex;
use stdweb; use stdweb;
use stdweb::Reference; use stdweb::Reference;
use stdweb::unstable::TryInto; use stdweb::unstable::TryInto;
use stdweb::web::set_timeout;
use stdweb::web::TypedArray; use stdweb::web::TypedArray;
use stdweb::web::set_timeout;
use CreationError; use CreationError;
use Format; use Format;
@ -33,9 +33,7 @@ impl EventLoop {
pub fn new() -> EventLoop { pub fn new() -> EventLoop {
stdweb::initialize(); stdweb::initialize();
EventLoop { EventLoop { voices: Mutex::new(Vec::new()) }
voices: Mutex::new(Vec::new()),
}
} }
#[inline] #[inline]
@ -68,7 +66,8 @@ impl EventLoop {
voice: &voice, voice: &voice,
}; };
user_cb(VoiceId(voice_id), ::UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) })); user_cb(VoiceId(voice_id),
::UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }));
} }
set_timeout(|| callback_fn::<F>(user_data_ptr), 330); set_timeout(|| callback_fn::<F>(user_data_ptr), 330);
@ -84,9 +83,7 @@ impl EventLoop {
} }
#[inline] #[inline]
pub fn build_voice(&self, _: &Endpoint, _format: &Format) pub fn build_voice(&self, _: &Endpoint, _format: &Format) -> Result<VoiceId, CreationError> {
-> Result<VoiceId, CreationError>
{
let voice = js!(return new AudioContext()).into_reference().unwrap(); let voice = js!(return new AudioContext()).into_reference().unwrap();
let mut voices = self.voices.lock().unwrap(); let mut voices = self.voices.lock().unwrap();
@ -110,14 +107,20 @@ impl EventLoop {
#[inline] #[inline]
pub fn play(&self, voice_id: VoiceId) { pub fn play(&self, voice_id: VoiceId) {
let voices = self.voices.lock().unwrap(); let voices = self.voices.lock().unwrap();
let voice = voices.get(voice_id.0).and_then(|v| v.as_ref()).expect("invalid voice ID"); let voice = voices
.get(voice_id.0)
.and_then(|v| v.as_ref())
.expect("invalid voice ID");
js!(@{voice}.resume()); js!(@{voice}.resume());
} }
#[inline] #[inline]
pub fn pause(&self, voice_id: VoiceId) { pub fn pause(&self, voice_id: VoiceId) {
let voices = self.voices.lock().unwrap(); let voices = self.voices.lock().unwrap();
let voice = voices.get(voice_id.0).and_then(|v| v.as_ref()).expect("invalid voice ID"); let voice = voices
.get(voice_id.0)
.and_then(|v| v.as_ref())
.expect("invalid voice ID");
js!(@{voice}.suspend()); js!(@{voice}.suspend());
} }
} }
@ -130,9 +133,12 @@ pub struct VoiceId(usize);
fn is_webaudio_available() -> bool { fn is_webaudio_available() -> bool {
stdweb::initialize(); stdweb::initialize();
js!( js!(if (!AudioContext) {
if (!AudioContext) { return false; } else { return true; } return false;
).try_into().unwrap() } else {
return true;
}).try_into()
.unwrap()
} }
// Content is false if the iterator is empty. // Content is false if the iterator is empty.
@ -170,18 +176,20 @@ pub struct Endpoint;
impl Endpoint { impl Endpoint {
#[inline] #[inline]
pub fn supported_formats( pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
// TODO: right now cpal's API doesn't allow flexibility here // TODO: right now cpal's API doesn't allow flexibility here
// "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if
// this ever becomes more flexible, don't forget to change that // this ever becomes more flexible, don't forget to change that
Ok(vec![SupportedFormat { Ok(
channels: vec![::ChannelPosition::BackLeft, ::ChannelPosition::BackRight], vec![
min_samples_rate: ::SamplesRate(44100), SupportedFormat {
max_samples_rate: ::SamplesRate(44100), channels: vec![::ChannelPosition::BackLeft, ::ChannelPosition::BackRight],
data_type: ::SampleFormat::F32, min_samples_rate: ::SamplesRate(44100),
}].into_iter()) max_samples_rate: ::SamplesRate(44100),
data_type: ::SampleFormat::F32,
},
].into_iter(),
)
} }
#[inline] #[inline]
@ -192,12 +200,16 @@ impl Endpoint {
pub type SupportedFormatsIterator = ::std::vec::IntoIter<SupportedFormat>; pub type SupportedFormatsIterator = ::std::vec::IntoIter<SupportedFormat>;
pub struct Buffer<'a, T: 'a> where T: Sample { pub struct Buffer<'a, T: 'a>
where T: Sample
{
temporary_buffer: Vec<T>, temporary_buffer: Vec<T>,
voice: &'a Reference, voice: &'a Reference,
} }
impl<'a, T> Buffer<'a, T> where T: Sample { impl<'a, T> Buffer<'a, T>
where T: Sample
{
#[inline] #[inline]
pub fn buffer(&mut self) -> &mut [T] { pub fn buffer(&mut self) -> &mut [T] {
&mut self.temporary_buffer &mut self.temporary_buffer
@ -214,12 +226,15 @@ impl<'a, T> Buffer<'a, T> where T: Sample {
let typed_array = { let typed_array = {
let t_slice: &[T] = self.temporary_buffer.as_slice(); let t_slice: &[T] = self.temporary_buffer.as_slice();
let u8_slice: &[u8] = unsafe { from_raw_parts(t_slice.as_ptr() as *const _, t_slice.len() * mem::size_of::<T>()) }; let u8_slice: &[u8] = unsafe {
from_raw_parts(t_slice.as_ptr() as *const _,
t_slice.len() * mem::size_of::<T>())
};
let typed_array: TypedArray<u8> = u8_slice.into(); let typed_array: TypedArray<u8> = u8_slice.into();
typed_array typed_array
}; };
let num_channels = 2u32; // TODO: correct value let num_channels = 2u32; // TODO: correct value
debug_assert_eq!(self.temporary_buffer.len() % num_channels as usize, 0); debug_assert_eq!(self.temporary_buffer.len() % num_channels as usize, 0);
js!( js!(

View File

@ -1,42 +1,113 @@
/*! //! # How to use cpal
# How to use cpal //!
//! Here are some concepts cpal exposes:
In order to play a sound, first you need to create an `EventLoop` and a voice. //!
//! - An endpoint is a target where the data of the audio channel will be played.
```no_run //! - A voice is an open audio channel which you can stream audio data to. You have to choose which
// getting the default sound output of the system (can return `None` if nothing is supported) //! endpoint your voice targets before you create one.
let endpoint = cpal::default_endpoint().unwrap(); //! - An event loop is a collection of voices. Each voice must belong to an event loop, and all the
//! voices that belong to an event loop are managed together.
// note that the user can at any moment disconnect the device, therefore all operations return //!
// a `Result` to handle this situation //! In order to play a sound, you first need to create an event loop:
//!
// getting a format for the PCM //! ```
let supported_formats_range = endpoint.supported_formats().unwrap().next().unwrap(); //! use cpal::EventLoop;
let format = supported_formats_range.with_max_samples_rate(); //! let event_loop = EventLoop::new();
//! ```
let event_loop = cpal::EventLoop::new(); //!
//! Then choose an endpoint. You can either use the default endpoint with the `default_endpoint()`
let voice_id = event_loop.build_voice(&endpoint, &format).unwrap(); //! function, or enumerate all the available endpoints with the `endpoints()` function. Beware that
event_loop.play(voice_id); //! `default_endpoint()` returns an `Option` in case no endpoint is available on the system.
``` //!
//! ```
`voice_id` is an identifier for the voice can be used to control the play/pause of the output. //! // Note: we call `unwrap()` because it is convenient, but you should avoid doing that in a real
//! // code.
Once that's done, you can call `run()` on the `event_loop`. //! let endpoint = cpal::default_endpoint().expect("no endpoint is available");
//! ```
```no_run //!
# let event_loop = cpal::EventLoop::new(); //! Before we can create a voice, we must decide what the format of the audio samples is going to
event_loop.run(move |_voice_id, _buffer| { //! be. You can query all the supported formats with the `supported_formats()` method, which
// write data to `buffer` here //! produces a list of `SupportedFormat` structs which can later be turned into actual `Format`
}); //! structs. If you don't want to query the list of formats, you can also build your own `Format`
``` //! manually, but doing so could lead to an error when building the voice if the format ends up not
//! being supported.
Calling `run()` will block the thread forever, so it's usually best done in a separate thread. //!
//! > **Note**: the `supported_formats()` method could return an error for example if the device
While `run()` is running, the audio device of the user will call the callbacks you registered //! > has been disconnected.
from time to time. //!
//! ```no_run
*/ //! # let endpoint = cpal::default_endpoint().unwrap();
//! let mut supported_formats_range = endpoint.supported_formats()
//! .expect("error while querying formats");
//! let format = supported_formats_range.next().expect("no supported format?!")
//! .with_max_samples_rate();
//! ```
//!
//! Now that we have everything, we can create a voice from that event loop:
//!
//! ```no_run
//! # let endpoint = cpal::default_endpoint().unwrap();
//! # let format = endpoint.supported_formats().unwrap().next().unwrap().with_max_samples_rate();
//! # let event_loop = cpal::EventLoop::new();
//! let voice_id = event_loop.build_voice(&endpoint, &format).unwrap();
//! ```
//!
//! The value returned by `build_voice()` is of type `VoiceId` and is an identifier that will
//! allow you to control the voice.
//!
//! There is a last step to perform before going forward, which is to start the voice. This is done
//! with the `play()` method on the event loop.
//!
//! ```
//! # let event_loop: cpal::EventLoop = return;
//! # let voice_id: cpal::VoiceId = return;
//! event_loop.play(voice_id);
//! ```
//!
//! Once everything is done, you must call `run()` on the `event_loop`.
//!
//! ```no_run
//! # let event_loop = cpal::EventLoop::new();
//! event_loop.run(move |_voice_id, _buffer| {
//! // write data to `buffer` here
//! });
//! ```
//!
//! > **Note**: Calling `run()` will block the thread forever, so it's usually best done in a
//! > separate thread.
//!
//! While `run()` is running, the audio device of the user will from time to time call the callback
//! that you passed to this function. The callback gets passed the voice ID, and a struct of type
//! `UnknownTypeBuffer` that represents the buffer that must be filled with audio samples. The
//! `UnknownTypeBuffer` can be one of `I16`, `U16` or `F32` depending on the format that was passed
//! to `build_voice`.
//!
//! In this example, we simply simply fill the buffer with zeroes.
//!
//! ```no_run
//! use cpal::UnknownTypeBuffer;
//!
//! # let event_loop = cpal::EventLoop::new();
//! event_loop.run(move |_voice_id, mut buffer| {
//! match buffer {
//! UnknownTypeBuffer::U16(mut buffer) => {
//! for elem in buffer.iter_mut() {
//! *elem = u16::max_value() / 2;
//! }
//! },
//! UnknownTypeBuffer::I16(mut buffer) => {
//! for elem in buffer.iter_mut() {
//! *elem = 0;
//! }
//! },
//! UnknownTypeBuffer::F32(mut buffer) => {
//! for elem in buffer.iter_mut() {
//! *elem = 0.0;
//! }
//! },
//! }
//! });
//! ```
#![recursion_limit = "512"] #![recursion_limit = "512"]
@ -78,6 +149,8 @@ mod cpal_impl;
mod cpal_impl; mod cpal_impl;
/// An iterator for the list of formats that are supported by the backend. /// An iterator for the list of formats that are supported by the backend.
///
/// See [`endpoints()`](fn.endpoints.html).
pub struct EndpointsIterator(cpal_impl::EndpointsIterator); pub struct EndpointsIterator(cpal_impl::EndpointsIterator);
impl Iterator for EndpointsIterator { impl Iterator for EndpointsIterator {
@ -95,6 +168,8 @@ impl Iterator for EndpointsIterator {
} }
/// Return an iterator to the list of formats that are supported by the system. /// Return an iterator to the list of formats that are supported by the system.
///
/// Can be empty if the system doesn't support audio in general.
#[inline] #[inline]
pub fn endpoints() -> EndpointsIterator { pub fn endpoints() -> EndpointsIterator {
EndpointsIterator(Default::default()) EndpointsIterator(Default::default())
@ -120,12 +195,18 @@ pub fn get_default_endpoint() -> Option<Endpoint> {
default_endpoint() default_endpoint()
} }
/// An opaque type that identifies an end point. /// An opaque type that identifies an endpoint that is capable of playing audio.
///
/// Please note that endpoints may become invalid if they get disconnected. Therefore all the
/// methods that involve an endpoint return a `Result`.
#[derive(Clone, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
pub struct Endpoint(cpal_impl::Endpoint); pub struct Endpoint(cpal_impl::Endpoint);
impl Endpoint { impl Endpoint {
/// Returns an iterator that produces the list of formats that are supported by the backend. /// Returns an iterator that produces the list of formats that are supported by the backend.
///
/// Can return an error if the endpoint is no longer valid (eg. it has been disconnected).
/// The returned iterator should never be empty.
#[inline] #[inline]
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> { pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
Ok(SupportedFormatsIterator(self.0.supported_formats()?)) Ok(SupportedFormatsIterator(self.0.supported_formats()?))
@ -141,6 +222,7 @@ impl Endpoint {
} }
/// Returns the name of the endpoint. /// Returns the name of the endpoint.
// TODO: human-readable or system name?
#[inline] #[inline]
pub fn name(&self) -> String { pub fn name(&self) -> String {
self.0.name() self.0.name()
@ -193,6 +275,8 @@ pub struct Format {
} }
/// An iterator that produces a list of formats supported by the endpoint. /// An iterator that produces a list of formats supported by the endpoint.
///
/// See [`Endpoint::supported_formats()`](struct.Endpoint.html#method.supported_formats).
pub struct SupportedFormatsIterator(cpal_impl::SupportedFormatsIterator); pub struct SupportedFormatsIterator(cpal_impl::SupportedFormatsIterator);
impl Iterator for SupportedFormatsIterator { impl Iterator for SupportedFormatsIterator {
@ -209,17 +293,20 @@ impl Iterator for SupportedFormatsIterator {
} }
} }
/// Describes a format. /// Describes a range of supported formats.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct SupportedFormat { pub struct SupportedFormat {
pub channels: Vec<ChannelPosition>, pub channels: Vec<ChannelPosition>,
/// Minimum value for the samples rate of the supported formats.
pub min_samples_rate: SamplesRate, pub min_samples_rate: SamplesRate,
/// Maximum value for the samples rate of the supported formats.
pub max_samples_rate: SamplesRate, pub max_samples_rate: SamplesRate,
/// Type of data expected by the endpoint.
pub data_type: SampleFormat, pub data_type: SampleFormat,
} }
impl SupportedFormat { impl SupportedFormat {
/// Builds a corresponding `Format` corresponding to the maximum samples rate. /// Turns this `SupportedFormat` into a `Format` corresponding to the maximum samples rate.
#[inline] #[inline]
pub fn with_max_samples_rate(self) -> Format { pub fn with_max_samples_rate(self) -> Format {
Format { Format {
@ -242,6 +329,9 @@ impl From<Format> for SupportedFormat {
} }
} }
/// Collection of voices managed together.
///
/// Created with the [`new`](struct.EventLoop.html#method.new) method.
pub struct EventLoop(cpal_impl::EventLoop); pub struct EventLoop(cpal_impl::EventLoop);
impl EventLoop { impl EventLoop {
@ -254,10 +344,12 @@ impl EventLoop {
/// Creates a new voice that will play on the given endpoint and with the given format. /// Creates a new voice that will play on the given endpoint and with the given format.
/// ///
/// On success, returns an identifier for the voice. /// On success, returns an identifier for the voice.
///
/// Can return an error if the endpoint is no longer valid, or if the format is not supported
/// by the endpoint.
#[inline] #[inline]
pub fn build_voice(&self, endpoint: &Endpoint, format: &Format) pub fn build_voice(&self, endpoint: &Endpoint, format: &Format)
-> Result<VoiceId, CreationError> -> Result<VoiceId, CreationError> {
{
self.0.build_voice(&endpoint.0, format).map(VoiceId) self.0.build_voice(&endpoint.0, format).map(VoiceId)
} }
@ -274,9 +366,11 @@ impl EventLoop {
/// Takes control of the current thread and processes the sounds. /// Takes control of the current thread and processes the sounds.
/// ///
/// > **Note**: Since it takes control of the thread, this method is best called on a separate
/// > thread.
///
/// Whenever a voice needs to be fed some data, the closure passed as parameter is called. /// Whenever a voice needs to be fed some data, the closure passed as parameter is called.
/// **Note**: Calling other methods of the events loop from the callback will most likely /// You can call the other methods of `EventLoop` without getting a deadlock.
/// deadlock. Don't do that. Maybe this will change in the future.
#[inline] #[inline]
pub fn run<F>(&self, mut callback: F) -> ! pub fn run<F>(&self, mut callback: F) -> !
where F: FnMut(VoiceId, UnknownTypeBuffer) where F: FnMut(VoiceId, UnknownTypeBuffer)
@ -284,7 +378,7 @@ impl EventLoop {
self.0.run(move |id, buf| callback(VoiceId(id), buf)) self.0.run(move |id, buf| callback(VoiceId(id), buf))
} }
/// Sends a command to the audio device that it should start playing. /// Instructs the audio device that it should start playing.
/// ///
/// Has no effect is the voice was already playing. /// Has no effect is the voice was already playing.
/// ///
@ -300,11 +394,11 @@ impl EventLoop {
self.0.play(voice.0) self.0.play(voice.0)
} }
/// Sends a command to the audio device that it should stop playing. /// Instructs the audio device that it should stop playing.
/// ///
/// Has no effect is the voice was already paused. /// Has no effect is the voice was already paused.
/// ///
/// If you call `play` afterwards, the playback will resume exactly where it was. /// If you call `play` afterwards, the playback will resume where it was.
/// ///
/// # Panic /// # Panic
/// ///
@ -405,8 +499,12 @@ impl Error for CreationError {
/// Represents a buffer that must be filled with audio data. /// Represents a buffer that must be filled with audio data.
/// ///
/// You should destroy this object as soon as possible. Data is only committed when it /// You should destroy this object as soon as possible. Data is only sent to the audio device when
/// is destroyed. /// this object is destroyed.
///
/// This struct implements the `Deref` and `DerefMut` traits to `[T]`. Therefore writing to this
/// buffer is done in the same way as writing to a `Vec` or any other kind of Rust array.
// TODO: explain audio stuff in general
#[must_use] #[must_use]
pub struct Buffer<'a, T: 'a> pub struct Buffer<'a, T: 'a>
where T: Sample where T: Sample

View File

@ -23,9 +23,7 @@ impl EventLoop {
} }
#[inline] #[inline]
pub fn build_voice(&self, _: &Endpoint, _: &Format) pub fn build_voice(&self, _: &Endpoint, _: &Format) -> Result<VoiceId, CreationError> {
-> Result<VoiceId, CreationError>
{
Err(CreationError::DeviceNotAvailable) Err(CreationError::DeviceNotAvailable)
} }
@ -70,9 +68,7 @@ pub struct Endpoint;
impl Endpoint { impl Endpoint {
#[inline] #[inline]
pub fn supported_formats( pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
unreachable!() unreachable!()
} }

View File

@ -110,9 +110,7 @@ impl Endpoint {
Ok(client) Ok(client)
} }
pub fn supported_formats( pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
// We always create voices in shared mode, therefore all samples go through an audio // We always create voices in shared mode, therefore all samples go through an audio
// processor to mix them together. // processor to mix them together.
// However there is no way to query the list of all formats that are supported by the // However there is no way to query the list of all formats that are supported by the
@ -299,7 +297,7 @@ lazy_static! {
// building the devices enumerator object // building the devices enumerator object
unsafe { unsafe {
let mut enumerator: *mut winapi::IMMDeviceEnumerator = mem::uninitialized(); let mut enumerator: *mut winapi::IMMDeviceEnumerator = mem::uninitialized();
let hresult = ole32::CoCreateInstance(&winapi::CLSID_MMDeviceEnumerator, let hresult = ole32::CoCreateInstance(&winapi::CLSID_MMDeviceEnumerator,
ptr::null_mut(), winapi::CLSCTX_ALL, ptr::null_mut(), winapi::CLSCTX_ALL,
&winapi::IID_IMMDeviceEnumerator, &winapi::IID_IMMDeviceEnumerator,

View File

@ -4,7 +4,7 @@ extern crate kernel32;
use std::io::Error as IoError; use std::io::Error as IoError;
pub use self::endpoint::{Endpoint, EndpointsIterator, default_endpoint, SupportedFormatsIterator}; pub use self::endpoint::{Endpoint, EndpointsIterator, SupportedFormatsIterator, default_endpoint};
pub use self::voice::{Buffer, EventLoop, VoiceId}; pub use self::voice::{Buffer, EventLoop, VoiceId};
mod com; mod com;

View File

@ -1,4 +1,3 @@
use super::Endpoint; use super::Endpoint;
use super::check_result; use super::check_result;
use super::com; use super::com;
@ -10,9 +9,9 @@ use std::marker::PhantomData;
use std::mem; use std::mem;
use std::ptr; use std::ptr;
use std::slice; use std::slice;
use std::sync::Mutex;
use std::sync::atomic::AtomicUsize; use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::sync::Mutex;
use ChannelPosition; use ChannelPosition;
use CreationError; use CreationError;
@ -81,9 +80,9 @@ impl EventLoop {
EventLoop { EventLoop {
pending_scheduled_event: pending_scheduled_event, pending_scheduled_event: pending_scheduled_event,
run_context: Mutex::new(RunContext { run_context: Mutex::new(RunContext {
voices: Vec::new(), voices: Vec::new(),
handles: vec![pending_scheduled_event], handles: vec![pending_scheduled_event],
}), }),
next_voice_id: AtomicUsize::new(0), next_voice_id: AtomicUsize::new(0),
commands: Mutex::new(Vec::new()), commands: Mutex::new(Vec::new()),
} }
@ -226,20 +225,20 @@ impl EventLoop {
}; };
let new_voice_id = VoiceId(self.next_voice_id.fetch_add(1, Ordering::Relaxed)); let new_voice_id = VoiceId(self.next_voice_id.fetch_add(1, Ordering::Relaxed));
assert_ne!(new_voice_id.0, usize::max_value()); // check for overflows assert_ne!(new_voice_id.0, usize::max_value()); // check for overflows
// Once we built the `VoiceInner`, we add a command that will be picked up by the // Once we built the `VoiceInner`, we add a command that will be picked up by the
// `run()` method and added to the `RunContext`. // `run()` method and added to the `RunContext`.
{ {
let inner = VoiceInner { let inner = VoiceInner {
id: new_voice_id.clone(), id: new_voice_id.clone(),
audio_client: audio_client, audio_client: audio_client,
render_client: render_client, render_client: render_client,
event: event, event: event,
playing: false, playing: false,
max_frames_in_buffer: max_frames_in_buffer, max_frames_in_buffer: max_frames_in_buffer,
bytes_per_frame: format.nBlockAlign, bytes_per_frame: format.nBlockAlign,
}; };
self.commands.lock().unwrap().push(Command::NewVoice(inner)); self.commands.lock().unwrap().push(Command::NewVoice(inner));
@ -254,7 +253,10 @@ impl EventLoop {
#[inline] #[inline]
pub fn destroy_voice(&self, voice_id: VoiceId) { pub fn destroy_voice(&self, voice_id: VoiceId) {
unsafe { unsafe {
self.commands.lock().unwrap().push(Command::DestroyVoice(voice_id)); self.commands
.lock()
.unwrap()
.push(Command::DestroyVoice(voice_id));
let result = kernel32::SetEvent(self.pending_scheduled_event); let result = kernel32::SetEvent(self.pending_scheduled_event);
assert!(result != 0); assert!(result != 0);
} }
@ -356,8 +358,8 @@ impl EventLoop {
debug_assert!(!buffer.is_null()); debug_assert!(!buffer.is_null());
(buffer as *mut _, (buffer as *mut _,
frames_available as usize * voice.bytes_per_frame as usize / frames_available as usize * voice.bytes_per_frame as usize /
mem::size_of::<f32>()) // FIXME: correct size when not f32 mem::size_of::<f32>()) // FIXME: correct size when not f32
}; };
let buffer = Buffer { let buffer = Buffer {
@ -368,7 +370,7 @@ impl EventLoop {
marker: PhantomData, marker: PhantomData,
}; };
let buffer = UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }); // FIXME: not always f32 let buffer = UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }); // FIXME: not always f32
callback(voice_id, buffer); callback(voice_id, buffer);
} }
} }