Docs and style improvements (#174)

* Improve the crate root documentation

* Add entry in CHANGELOG

* Run rustfmt on the code

* More improvements to documentation
This commit is contained in:
tomaka 2017-10-23 16:41:38 +02:00 committed by GitHub
parent c524f63000
commit 91adc3e380
10 changed files with 271 additions and 145 deletions

View File

@ -1,6 +1,7 @@
# Unreleased
- Changed the emscripten backend to consume less CPU.
- Added improvements to the crate documentation.
# Version 0.5.1 (2017-10-21)

View File

@ -1,7 +1,7 @@
use super::Endpoint;
use super::alsa;
use super::libc;
use super::check_errors;
use super::libc;
use std::ffi::CStr;
use std::ffi::CString;

View File

@ -67,9 +67,7 @@ impl Drop for Trigger {
pub struct Endpoint(String);
impl Endpoint {
pub fn supported_formats(
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
unsafe {
let mut playback_handle = mem::uninitialized();
let device_name = ffi::CString::new(self.0.clone()).expect("Unable to get device name");
@ -105,8 +103,7 @@ impl Endpoint {
SND_PCM_FORMAT_S32_BE,
SND_PCM_FORMAT_U32_LE,
SND_PCM_FORMAT_U32_BE,*/
(SampleFormat::F32, alsa::SND_PCM_FORMAT_FLOAT_LE)
/*SND_PCM_FORMAT_FLOAT_BE,
(SampleFormat::F32, alsa::SND_PCM_FORMAT_FLOAT_LE) /*SND_PCM_FORMAT_FLOAT_BE,
SND_PCM_FORMAT_FLOAT64_LE,
SND_PCM_FORMAT_FLOAT64_BE,
SND_PCM_FORMAT_IEC958_SUBFRAME_LE,
@ -154,7 +151,11 @@ impl Endpoint {
let samples_rates = if min_rate == max_rate {
vec![(min_rate, max_rate)]
} else if alsa::snd_pcm_hw_params_test_rate(playback_handle, hw_params.0, min_rate + 1, 0) == 0 {
} else if alsa::snd_pcm_hw_params_test_rate(playback_handle,
hw_params.0,
min_rate + 1,
0) == 0
{
vec![(min_rate, max_rate)]
} else {
const RATES: [libc::c_uint; 13] = [
@ -369,13 +370,14 @@ impl EventLoop {
fd: self.pending_trigger.read_fd(),
events: libc::POLLIN,
revents: 0,
}
},
];
for voice in run_context.voices.iter() {
run_context.descriptors.reserve(voice.num_descriptors);
let len = run_context.descriptors.len();
let filled = alsa::snd_pcm_poll_descriptors(voice.channel,
run_context.descriptors
run_context
.descriptors
.as_mut_ptr()
.offset(len as isize),
voice.num_descriptors as
@ -413,9 +415,13 @@ impl EventLoop {
{
let num_descriptors = voice_inner.num_descriptors as libc::c_uint;
check_errors(alsa::snd_pcm_poll_descriptors_revents(voice_inner.channel, run_context.descriptors
.as_mut_ptr().offset(i_descriptor),
num_descriptors, &mut revent)).unwrap();
let desc_ptr =
run_context.descriptors.as_mut_ptr().offset(i_descriptor);
let res = alsa::snd_pcm_poll_descriptors_revents(voice_inner.channel,
desc_ptr,
num_descriptors,
&mut revent);
check_errors(res).unwrap();
}
if (revent as libc::c_short & libc::POLLOUT) == 0 {
@ -433,10 +439,12 @@ impl EventLoop {
// buffer underrun
voice_inner.buffer_len
} else if available < 0 {
check_errors(available as libc::c_int).expect("buffer is not available");
check_errors(available as libc::c_int)
.expect("buffer is not available");
unreachable!()
} else {
(available * voice_inner.num_channels as alsa::snd_pcm_sframes_t) as usize
(available * voice_inner.num_channels as alsa::snd_pcm_sframes_t) as
usize
}
};
@ -473,9 +481,8 @@ impl EventLoop {
SampleFormat::F32 => {
let buffer = Buffer {
voice_inner: voice_inner,
buffer: iter::repeat(0.0) // we don't use mem::uninitialized in case of sNaN
.take(available)
.collect(),
// Note that we don't use `mem::uninitialized` because of sNaN.
buffer: iter::repeat(0.0).take(available).collect(),
};
UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) })
@ -594,7 +601,10 @@ impl EventLoop {
resume_trigger: Trigger::new(),
};
self.commands.lock().unwrap().push(Command::NewVoice(voice_inner));
self.commands
.lock()
.unwrap()
.push(Command::NewVoice(voice_inner));
self.pending_trigger.wakeup();
Ok(new_voice_id)
}
@ -602,7 +612,10 @@ impl EventLoop {
#[inline]
pub fn destroy_voice(&self, voice_id: VoiceId) {
self.commands.lock().unwrap().push(Command::DestroyVoice(voice_id));
self.commands
.lock()
.unwrap()
.push(Command::DestroyVoice(voice_id));
self.pending_trigger.wakeup();
}
@ -670,8 +683,9 @@ impl<'a, T> Buffer<'a, T> {
unsafe {
loop {
let result =
alsa::snd_pcm_writei(self.voice_inner.channel, self.buffer.as_ptr() as *const _, to_write);
let result = alsa::snd_pcm_writei(self.voice_inner.channel,
self.buffer.as_ptr() as *const _,
to_write);
if result == -32 {
// buffer underrun

View File

@ -26,9 +26,7 @@ pub use self::enumerate::{EndpointsIterator, SupportedFormatsIterator, default_e
pub struct Endpoint;
impl Endpoint {
pub fn supported_formats(
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
Ok(
vec![
SupportedFormat {
@ -70,9 +68,7 @@ impl EventLoop {
#[inline]
pub fn new() -> EventLoop {
EventLoop {
active_callbacks: Arc::new(ActiveCallbacks {
callbacks: Mutex::new(Vec::new()),
}),
active_callbacks: Arc::new(ActiveCallbacks { callbacks: Mutex::new(Vec::new()) }),
voices: Mutex::new(Vec::new()),
}
}
@ -82,7 +78,11 @@ impl EventLoop {
where F: FnMut(VoiceId, UnknownTypeBuffer)
{
let callback: &mut FnMut(VoiceId, UnknownTypeBuffer) = &mut callback;
self.active_callbacks.callbacks.lock().unwrap().push(unsafe { mem::transmute(callback) });
self.active_callbacks
.callbacks
.lock()
.unwrap()
.push(unsafe { mem::transmute(callback) });
loop {
// So the loop does not get optimised out in --release
@ -95,8 +95,7 @@ impl EventLoop {
#[inline]
pub fn build_voice(&self, endpoint: &Endpoint, format: &Format)
-> Result<VoiceId, CreationError>
{
-> Result<VoiceId, CreationError> {
fn convert_error(err: coreaudio::Error) -> CreationError {
match err {
coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat |
@ -110,9 +109,9 @@ impl EventLoop {
let mut audio_unit = {
let au_type = if cfg!(target_os = "ios") {
// The DefaultOutput unit isn't available in iOS unfortunately. RemoteIO is a sensible replacement.
// See
// https://developer.apple.com/library/content/documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/UsingSpecificAudioUnits/UsingSpecificAudioUnits.html
// The DefaultOutput unit isn't available in iOS unfortunately.
// RemoteIO is a sensible replacement.
// See https://goo.gl/CWwRTx
coreaudio::audio_unit::IOType::RemoteIO
} else {
coreaudio::audio_unit::IOType::DefaultOutput
@ -123,7 +122,10 @@ impl EventLoop {
// Determine the future ID of the voice.
let mut voices_lock = self.voices.lock().unwrap();
let voice_id = voices_lock.iter().position(|n| n.is_none()).unwrap_or(voices_lock.len());
let voice_id = voices_lock
.iter()
.position(|n| n.is_none())
.unwrap_or(voices_lock.len());
// TODO: iOS uses integer and fixed-point data

View File

@ -5,8 +5,8 @@ use std::sync::Mutex;
use stdweb;
use stdweb::Reference;
use stdweb::unstable::TryInto;
use stdweb::web::set_timeout;
use stdweb::web::TypedArray;
use stdweb::web::set_timeout;
use CreationError;
use Format;
@ -33,9 +33,7 @@ impl EventLoop {
pub fn new() -> EventLoop {
stdweb::initialize();
EventLoop {
voices: Mutex::new(Vec::new()),
}
EventLoop { voices: Mutex::new(Vec::new()) }
}
#[inline]
@ -68,7 +66,8 @@ impl EventLoop {
voice: &voice,
};
user_cb(VoiceId(voice_id), ::UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }));
user_cb(VoiceId(voice_id),
::UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }));
}
set_timeout(|| callback_fn::<F>(user_data_ptr), 330);
@ -84,9 +83,7 @@ impl EventLoop {
}
#[inline]
pub fn build_voice(&self, _: &Endpoint, _format: &Format)
-> Result<VoiceId, CreationError>
{
pub fn build_voice(&self, _: &Endpoint, _format: &Format) -> Result<VoiceId, CreationError> {
let voice = js!(return new AudioContext()).into_reference().unwrap();
let mut voices = self.voices.lock().unwrap();
@ -110,14 +107,20 @@ impl EventLoop {
#[inline]
pub fn play(&self, voice_id: VoiceId) {
let voices = self.voices.lock().unwrap();
let voice = voices.get(voice_id.0).and_then(|v| v.as_ref()).expect("invalid voice ID");
let voice = voices
.get(voice_id.0)
.and_then(|v| v.as_ref())
.expect("invalid voice ID");
js!(@{voice}.resume());
}
#[inline]
pub fn pause(&self, voice_id: VoiceId) {
let voices = self.voices.lock().unwrap();
let voice = voices.get(voice_id.0).and_then(|v| v.as_ref()).expect("invalid voice ID");
let voice = voices
.get(voice_id.0)
.and_then(|v| v.as_ref())
.expect("invalid voice ID");
js!(@{voice}.suspend());
}
}
@ -130,9 +133,12 @@ pub struct VoiceId(usize);
fn is_webaudio_available() -> bool {
stdweb::initialize();
js!(
if (!AudioContext) { return false; } else { return true; }
).try_into().unwrap()
js!(if (!AudioContext) {
return false;
} else {
return true;
}).try_into()
.unwrap()
}
// Content is false if the iterator is empty.
@ -170,18 +176,20 @@ pub struct Endpoint;
impl Endpoint {
#[inline]
pub fn supported_formats(
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
// TODO: right now cpal's API doesn't allow flexibility here
// "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if
// this ever becomes more flexible, don't forget to change that
Ok(vec![SupportedFormat {
Ok(
vec![
SupportedFormat {
channels: vec![::ChannelPosition::BackLeft, ::ChannelPosition::BackRight],
min_samples_rate: ::SamplesRate(44100),
max_samples_rate: ::SamplesRate(44100),
data_type: ::SampleFormat::F32,
}].into_iter())
},
].into_iter(),
)
}
#[inline]
@ -192,12 +200,16 @@ impl Endpoint {
pub type SupportedFormatsIterator = ::std::vec::IntoIter<SupportedFormat>;
pub struct Buffer<'a, T: 'a> where T: Sample {
pub struct Buffer<'a, T: 'a>
where T: Sample
{
temporary_buffer: Vec<T>,
voice: &'a Reference,
}
impl<'a, T> Buffer<'a, T> where T: Sample {
impl<'a, T> Buffer<'a, T>
where T: Sample
{
#[inline]
pub fn buffer(&mut self) -> &mut [T] {
&mut self.temporary_buffer
@ -214,7 +226,10 @@ impl<'a, T> Buffer<'a, T> where T: Sample {
let typed_array = {
let t_slice: &[T] = self.temporary_buffer.as_slice();
let u8_slice: &[u8] = unsafe { from_raw_parts(t_slice.as_ptr() as *const _, t_slice.len() * mem::size_of::<T>()) };
let u8_slice: &[u8] = unsafe {
from_raw_parts(t_slice.as_ptr() as *const _,
t_slice.len() * mem::size_of::<T>())
};
let typed_array: TypedArray<u8> = u8_slice.into();
typed_array
};

View File

@ -1,42 +1,113 @@
/*!
# How to use cpal
In order to play a sound, first you need to create an `EventLoop` and a voice.
```no_run
// getting the default sound output of the system (can return `None` if nothing is supported)
let endpoint = cpal::default_endpoint().unwrap();
// note that the user can at any moment disconnect the device, therefore all operations return
// a `Result` to handle this situation
// getting a format for the PCM
let supported_formats_range = endpoint.supported_formats().unwrap().next().unwrap();
let format = supported_formats_range.with_max_samples_rate();
let event_loop = cpal::EventLoop::new();
let voice_id = event_loop.build_voice(&endpoint, &format).unwrap();
event_loop.play(voice_id);
```
`voice_id` is an identifier for the voice can be used to control the play/pause of the output.
Once that's done, you can call `run()` on the `event_loop`.
```no_run
# let event_loop = cpal::EventLoop::new();
event_loop.run(move |_voice_id, _buffer| {
// write data to `buffer` here
});
```
Calling `run()` will block the thread forever, so it's usually best done in a separate thread.
While `run()` is running, the audio device of the user will call the callbacks you registered
from time to time.
*/
//! # How to use cpal
//!
//! Here are some concepts cpal exposes:
//!
//! - An endpoint is a target where the data of the audio channel will be played.
//! - A voice is an open audio channel which you can stream audio data to. You have to choose which
//! endpoint your voice targets before you create one.
//! - An event loop is a collection of voices. Each voice must belong to an event loop, and all the
//! voices that belong to an event loop are managed together.
//!
//! In order to play a sound, you first need to create an event loop:
//!
//! ```
//! use cpal::EventLoop;
//! let event_loop = EventLoop::new();
//! ```
//!
//! Then choose an endpoint. You can either use the default endpoint with the `default_endpoint()`
//! function, or enumerate all the available endpoints with the `endpoints()` function. Beware that
//! `default_endpoint()` returns an `Option` in case no endpoint is available on the system.
//!
//! ```
//! // Note: we call `unwrap()` because it is convenient, but you should avoid doing that in a real
//! // code.
//! let endpoint = cpal::default_endpoint().expect("no endpoint is available");
//! ```
//!
//! Before we can create a voice, we must decide what the format of the audio samples is going to
//! be. You can query all the supported formats with the `supported_formats()` method, which
//! produces a list of `SupportedFormat` structs which can later be turned into actual `Format`
//! structs. If you don't want to query the list of formats, you can also build your own `Format`
//! manually, but doing so could lead to an error when building the voice if the format ends up not
//! being supported.
//!
//! > **Note**: the `supported_formats()` method could return an error for example if the device
//! > has been disconnected.
//!
//! ```no_run
//! # let endpoint = cpal::default_endpoint().unwrap();
//! let mut supported_formats_range = endpoint.supported_formats()
//! .expect("error while querying formats");
//! let format = supported_formats_range.next().expect("no supported format?!")
//! .with_max_samples_rate();
//! ```
//!
//! Now that we have everything, we can create a voice from that event loop:
//!
//! ```no_run
//! # let endpoint = cpal::default_endpoint().unwrap();
//! # let format = endpoint.supported_formats().unwrap().next().unwrap().with_max_samples_rate();
//! # let event_loop = cpal::EventLoop::new();
//! let voice_id = event_loop.build_voice(&endpoint, &format).unwrap();
//! ```
//!
//! The value returned by `build_voice()` is of type `VoiceId` and is an identifier that will
//! allow you to control the voice.
//!
//! There is a last step to perform before going forward, which is to start the voice. This is done
//! with the `play()` method on the event loop.
//!
//! ```
//! # let event_loop: cpal::EventLoop = return;
//! # let voice_id: cpal::VoiceId = return;
//! event_loop.play(voice_id);
//! ```
//!
//! Once everything is done, you must call `run()` on the `event_loop`.
//!
//! ```no_run
//! # let event_loop = cpal::EventLoop::new();
//! event_loop.run(move |_voice_id, _buffer| {
//! // write data to `buffer` here
//! });
//! ```
//!
//! > **Note**: Calling `run()` will block the thread forever, so it's usually best done in a
//! > separate thread.
//!
//! While `run()` is running, the audio device of the user will from time to time call the callback
//! that you passed to this function. The callback gets passed the voice ID, and a struct of type
//! `UnknownTypeBuffer` that represents the buffer that must be filled with audio samples. The
//! `UnknownTypeBuffer` can be one of `I16`, `U16` or `F32` depending on the format that was passed
//! to `build_voice`.
//!
//! In this example, we simply simply fill the buffer with zeroes.
//!
//! ```no_run
//! use cpal::UnknownTypeBuffer;
//!
//! # let event_loop = cpal::EventLoop::new();
//! event_loop.run(move |_voice_id, mut buffer| {
//! match buffer {
//! UnknownTypeBuffer::U16(mut buffer) => {
//! for elem in buffer.iter_mut() {
//! *elem = u16::max_value() / 2;
//! }
//! },
//! UnknownTypeBuffer::I16(mut buffer) => {
//! for elem in buffer.iter_mut() {
//! *elem = 0;
//! }
//! },
//! UnknownTypeBuffer::F32(mut buffer) => {
//! for elem in buffer.iter_mut() {
//! *elem = 0.0;
//! }
//! },
//! }
//! });
//! ```
#![recursion_limit = "512"]
@ -78,6 +149,8 @@ mod cpal_impl;
mod cpal_impl;
/// An iterator for the list of formats that are supported by the backend.
///
/// See [`endpoints()`](fn.endpoints.html).
pub struct EndpointsIterator(cpal_impl::EndpointsIterator);
impl Iterator for EndpointsIterator {
@ -95,6 +168,8 @@ impl Iterator for EndpointsIterator {
}
/// Return an iterator to the list of formats that are supported by the system.
///
/// Can be empty if the system doesn't support audio in general.
#[inline]
pub fn endpoints() -> EndpointsIterator {
EndpointsIterator(Default::default())
@ -120,12 +195,18 @@ pub fn get_default_endpoint() -> Option<Endpoint> {
default_endpoint()
}
/// An opaque type that identifies an end point.
/// An opaque type that identifies an endpoint that is capable of playing audio.
///
/// Please note that endpoints may become invalid if they get disconnected. Therefore all the
/// methods that involve an endpoint return a `Result`.
#[derive(Clone, PartialEq, Eq)]
pub struct Endpoint(cpal_impl::Endpoint);
impl Endpoint {
/// Returns an iterator that produces the list of formats that are supported by the backend.
///
/// Can return an error if the endpoint is no longer valid (eg. it has been disconnected).
/// The returned iterator should never be empty.
#[inline]
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
Ok(SupportedFormatsIterator(self.0.supported_formats()?))
@ -141,6 +222,7 @@ impl Endpoint {
}
/// Returns the name of the endpoint.
// TODO: human-readable or system name?
#[inline]
pub fn name(&self) -> String {
self.0.name()
@ -193,6 +275,8 @@ pub struct Format {
}
/// An iterator that produces a list of formats supported by the endpoint.
///
/// See [`Endpoint::supported_formats()`](struct.Endpoint.html#method.supported_formats).
pub struct SupportedFormatsIterator(cpal_impl::SupportedFormatsIterator);
impl Iterator for SupportedFormatsIterator {
@ -209,17 +293,20 @@ impl Iterator for SupportedFormatsIterator {
}
}
/// Describes a format.
/// Describes a range of supported formats.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SupportedFormat {
pub channels: Vec<ChannelPosition>,
/// Minimum value for the samples rate of the supported formats.
pub min_samples_rate: SamplesRate,
/// Maximum value for the samples rate of the supported formats.
pub max_samples_rate: SamplesRate,
/// Type of data expected by the endpoint.
pub data_type: SampleFormat,
}
impl SupportedFormat {
/// Builds a corresponding `Format` corresponding to the maximum samples rate.
/// Turns this `SupportedFormat` into a `Format` corresponding to the maximum samples rate.
#[inline]
pub fn with_max_samples_rate(self) -> Format {
Format {
@ -242,6 +329,9 @@ impl From<Format> for SupportedFormat {
}
}
/// Collection of voices managed together.
///
/// Created with the [`new`](struct.EventLoop.html#method.new) method.
pub struct EventLoop(cpal_impl::EventLoop);
impl EventLoop {
@ -254,10 +344,12 @@ impl EventLoop {
/// Creates a new voice that will play on the given endpoint and with the given format.
///
/// On success, returns an identifier for the voice.
///
/// Can return an error if the endpoint is no longer valid, or if the format is not supported
/// by the endpoint.
#[inline]
pub fn build_voice(&self, endpoint: &Endpoint, format: &Format)
-> Result<VoiceId, CreationError>
{
-> Result<VoiceId, CreationError> {
self.0.build_voice(&endpoint.0, format).map(VoiceId)
}
@ -274,9 +366,11 @@ impl EventLoop {
/// Takes control of the current thread and processes the sounds.
///
/// > **Note**: Since it takes control of the thread, this method is best called on a separate
/// > thread.
///
/// Whenever a voice needs to be fed some data, the closure passed as parameter is called.
/// **Note**: Calling other methods of the events loop from the callback will most likely
/// deadlock. Don't do that. Maybe this will change in the future.
/// You can call the other methods of `EventLoop` without getting a deadlock.
#[inline]
pub fn run<F>(&self, mut callback: F) -> !
where F: FnMut(VoiceId, UnknownTypeBuffer)
@ -284,7 +378,7 @@ impl EventLoop {
self.0.run(move |id, buf| callback(VoiceId(id), buf))
}
/// Sends a command to the audio device that it should start playing.
/// Instructs the audio device that it should start playing.
///
/// Has no effect is the voice was already playing.
///
@ -300,11 +394,11 @@ impl EventLoop {
self.0.play(voice.0)
}
/// Sends a command to the audio device that it should stop playing.
/// Instructs the audio device that it should stop playing.
///
/// Has no effect is the voice was already paused.
///
/// If you call `play` afterwards, the playback will resume exactly where it was.
/// If you call `play` afterwards, the playback will resume where it was.
///
/// # Panic
///
@ -405,8 +499,12 @@ impl Error for CreationError {
/// Represents a buffer that must be filled with audio data.
///
/// You should destroy this object as soon as possible. Data is only committed when it
/// is destroyed.
/// You should destroy this object as soon as possible. Data is only sent to the audio device when
/// this object is destroyed.
///
/// This struct implements the `Deref` and `DerefMut` traits to `[T]`. Therefore writing to this
/// buffer is done in the same way as writing to a `Vec` or any other kind of Rust array.
// TODO: explain audio stuff in general
#[must_use]
pub struct Buffer<'a, T: 'a>
where T: Sample

View File

@ -23,9 +23,7 @@ impl EventLoop {
}
#[inline]
pub fn build_voice(&self, _: &Endpoint, _: &Format)
-> Result<VoiceId, CreationError>
{
pub fn build_voice(&self, _: &Endpoint, _: &Format) -> Result<VoiceId, CreationError> {
Err(CreationError::DeviceNotAvailable)
}
@ -70,9 +68,7 @@ pub struct Endpoint;
impl Endpoint {
#[inline]
pub fn supported_formats(
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
unreachable!()
}

View File

@ -110,9 +110,7 @@ impl Endpoint {
Ok(client)
}
pub fn supported_formats(
&self)
-> Result<SupportedFormatsIterator, FormatsEnumerationError> {
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
// We always create voices in shared mode, therefore all samples go through an audio
// processor to mix them together.
// However there is no way to query the list of all formats that are supported by the

View File

@ -4,7 +4,7 @@ extern crate kernel32;
use std::io::Error as IoError;
pub use self::endpoint::{Endpoint, EndpointsIterator, default_endpoint, SupportedFormatsIterator};
pub use self::endpoint::{Endpoint, EndpointsIterator, SupportedFormatsIterator, default_endpoint};
pub use self::voice::{Buffer, EventLoop, VoiceId};
mod com;

View File

@ -1,4 +1,3 @@
use super::Endpoint;
use super::check_result;
use super::com;
@ -10,9 +9,9 @@ use std::marker::PhantomData;
use std::mem;
use std::ptr;
use std::slice;
use std::sync::Mutex;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::Mutex;
use ChannelPosition;
use CreationError;
@ -254,7 +253,10 @@ impl EventLoop {
#[inline]
pub fn destroy_voice(&self, voice_id: VoiceId) {
unsafe {
self.commands.lock().unwrap().push(Command::DestroyVoice(voice_id));
self.commands
.lock()
.unwrap()
.push(Command::DestroyVoice(voice_id));
let result = kernel32::SetEvent(self.pending_scheduled_event);
assert!(result != 0);
}