diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..34b1f40 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,8 @@ +# Unreleased (major) + +- Removed the dependency on the `futures` library. +- Removed the `Voice` and `SamplesStream` types. +- Added `EventLoop::build_voice`, `EventLoop::destroy_voice`, `EventLoop::play`, + and `EventLoop::pause` that can be used to create, destroy, play and pause voices. +- Added a `VoiceId` struct that is now used to identify a voice owned by an `EventLoop`. +- Changed `EventLoop::run()` to take a callback that is called whenever a voice requires sound data. diff --git a/Cargo.toml b/Cargo.toml index 5d661b6..c8b4ba9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,6 @@ license = "Apache-2.0" keywords = ["audio", "sound"] [dependencies] -futures = "0.1.1" libc = "0.2" lazy_static = "0.2" diff --git a/examples/beep.rs b/examples/beep.rs index 884316b..44e6f0e 100644 --- a/examples/beep.rs +++ b/examples/beep.rs @@ -1,23 +1,9 @@ extern crate cpal; -extern crate futures; - -use futures::stream::Stream; -use futures::task; -use futures::task::Executor; -use futures::task::Run; use std::sync::Arc; use std::thread; use std::time::Duration; -struct MyExecutor; - -impl Executor for MyExecutor { - fn execute(&self, r: Run) { - r.run(); - } -} - fn main() { let endpoint = cpal::default_endpoint().expect("Failed to get default endpoint"); let format = endpoint @@ -27,18 +13,15 @@ fn main() { .expect("Failed to get endpoint format"); let event_loop = cpal::EventLoop::new(); - let executor = Arc::new(MyExecutor); - - let (mut voice, stream) = cpal::Voice::new(&endpoint, &format, &event_loop) - .expect("Failed to create a voice"); + let voice_id = event_loop.build_voice(&endpoint, &format).unwrap(); + event_loop.play(voice_id); // Produce a sinusoid of maximum amplitude. let samples_rate = format.samples_rate.0 as f32; let mut data_source = (0u64..).map(move |t| t as f32 * 440.0 * 2.0 * 3.141592 / samples_rate) // 440 Hz .map(move |t| t.sin()); - voice.play(); - task::spawn(stream.for_each(move |buffer| -> Result<_, ()> { + event_loop.run(move |_, buffer| { match buffer { cpal::UnknownTypeBuffer::U16(mut buffer) => { for (sample, value) in buffer @@ -75,16 +58,5 @@ fn main() { } }, }; - - Ok(()) - })).execute(executor); - - thread::spawn(move || loop { - thread::sleep(Duration::from_millis(500)); - voice.pause(); - thread::sleep(Duration::from_millis(500)); - voice.play(); - }); - - event_loop.run(); + }); } diff --git a/src/alsa/mod.rs b/src/alsa/mod.rs index b0fb361..ec3069f 100644 --- a/src/alsa/mod.rs +++ b/src/alsa/mod.rs @@ -13,15 +13,9 @@ use UnknownTypeBuffer; use std::{cmp, ffi, iter, mem, ptr}; use std::sync::{Arc, Mutex}; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::vec::IntoIter as VecIntoIter; -use futures::Async; -use futures::Poll; -use futures::stream::Stream; -use futures::task; -use futures::task::Task; - pub type SupportedFormatsIterator = VecIntoIter; mod enumerate; @@ -256,205 +250,46 @@ impl Endpoint { } pub struct EventLoop { - inner: Arc, -} + // Each newly-created voice gets a new ID from this counter. The counter is then incremented. + next_voice_id: AtomicUsize, // TODO: use AtomicU64 when stable? -struct EventLoopInner { - // Descriptors that we are currently waiting upon. This member is always locked while `run()` - // is executed, ie. most of the time. - // - // Note that for `current_wait`, the first element of `descriptors` is always - // `pending_wait_signal`. Therefore the length of `descriptors` is always one more than - // `voices`. - current_wait: Mutex, - - // Since we can't add elements to `current_wait` (as it's locked), we add them to - // `pending_wait`. Once that's done, we signal `pending_wait_signal` so that the `run()` - // function can pause and add the content of `pending_wait` to `current_wait`. - pending_wait: Mutex, - - // A trigger that uses a `pipe` as backend. Always the first element - // of `current_wait.descriptors`. Should be notified when an element is added - // to `pending_wait` so that the current wait can stop and take the pending wait into - // account. + // A trigger that uses a `pipe()` as backend. Signalled whenever a new command is ready, so + // that `poll()` can wake up and pick the changes. pending_trigger: Trigger, + + // This field is locked by the `run()` method. + // The mutex also ensures that only one thread at a time has `run()` running. + run_context: Mutex, + + // Commands processed by the `run()` method that is currently running. + // TODO: use a lock-free container + commands: Mutex>, } -struct PollDescriptors { - // Descriptors to wait for. +unsafe impl Send for EventLoop { +} + +unsafe impl Sync for EventLoop { +} + +enum Command { + NewVoice(VoiceInner), + DestroyVoice(VoiceId), +} + +struct RunContext { + // Descriptors to wait for. Always contains `pending_trigger.read_fd()` as first element. descriptors: Vec, // List of voices that are written in `descriptors`. - voices: Vec>, -} - -unsafe impl Send for EventLoopInner { -} -unsafe impl Sync for EventLoopInner { -} - -impl EventLoop { - #[inline] - pub fn new() -> EventLoop { - let pending_trigger = Trigger::new(); - - EventLoop { - inner: Arc::new(EventLoopInner { - current_wait: Mutex::new(PollDescriptors { - descriptors: vec![ - libc::pollfd { - fd: pending_trigger.read_fd(), - events: libc::POLLIN, - revents: 0, - }, - ], - voices: Vec::new(), - }), - pending_wait: Mutex::new(PollDescriptors { - descriptors: Vec::new(), - voices: Vec::new(), - }), - pending_trigger: pending_trigger, - }), - } - } - - #[inline] - pub fn run(&self) { - unsafe { - let mut current_wait = self.inner.current_wait.lock().unwrap(); - - loop { - let ret = libc::poll(current_wait.descriptors.as_mut_ptr(), - current_wait.descriptors.len() as libc::nfds_t, - -1 /* infinite */); - assert!(ret >= 0, "poll() failed"); - - if ret == 0 { - continue; - } - - // If the `pending_wait_signal` was signaled, add the pending waits to - // the current waits. - if current_wait.descriptors[0].revents != 0 { - current_wait.descriptors[0].revents = 0; - - let mut pending = self.inner.pending_wait.lock().unwrap(); - current_wait.descriptors.append(&mut pending.descriptors); - current_wait.voices.append(&mut pending.voices); - - // Emptying the signal. - self.inner.pending_trigger.clear_pipe(); - } - - // Check each individual descriptor for events. - let mut i_voice = 0; - let mut i_descriptor = 1; - while i_voice < current_wait.voices.len() { - let kind = { - let scheduled = current_wait.voices[i_voice].scheduled.lock().unwrap(); - match *scheduled { - Some(ref scheduled) => scheduled.kind, - None => panic!("current wait unscheduled task"), - } - }; - - // Depending on the kind of scheduling the number of descriptors corresponding - // to the voice and the events associated are different - match kind { - ScheduledKind::WaitPCM => { - let mut revent = mem::uninitialized(); - - { - let channel = *current_wait.voices[i_voice].channel.lock().unwrap(); - let num_descriptors = - current_wait.voices[i_voice].num_descriptors as libc::c_uint; - check_errors(alsa::snd_pcm_poll_descriptors_revents(channel, current_wait.descriptors - .as_mut_ptr().offset(i_descriptor), - num_descriptors, &mut revent)).unwrap(); - } - - if (revent as libc::c_short & libc::POLLOUT) != 0 { - let scheduled = current_wait.voices[i_voice] - .scheduled - .lock() - .unwrap() - .take(); - scheduled.unwrap().task.unpark(); - - for _ in 0 .. current_wait.voices[i_voice].num_descriptors { - current_wait.descriptors.remove(i_descriptor as usize); - } - current_wait.voices.remove(i_voice); - - } else { - i_descriptor += current_wait.voices[i_voice].num_descriptors as - isize; - i_voice += 1; - } - }, - ScheduledKind::WaitResume => { - if current_wait.descriptors[i_descriptor as usize].revents != 0 { - // Unpark the task - let scheduled = current_wait.voices[i_voice] - .scheduled - .lock() - .unwrap() - .take(); - scheduled.unwrap().task.unpark(); - - // Emptying the signal. - let mut out = 0u64; - let ret = - libc::read(current_wait.descriptors[i_descriptor as usize].fd, - &mut out as *mut u64 as *mut _, - 8); - assert_eq!(ret, 8); - - // Remove from current waiting poll descriptors - current_wait.descriptors.remove(i_descriptor as usize); - current_wait.voices.remove(i_voice); - } else { - i_descriptor += 1; - i_voice += 1; - } - }, - } - } - } - } - } -} - -pub struct Voice { - inner: Arc, -} - -pub struct Buffer { - inner: Arc, - buffer: Vec, -} - -pub struct SamplesStream { - inner: Arc, -} - -pub struct Scheduled { - task: Task, - kind: ScheduledKind, -} - -#[derive(Clone, Copy)] -pub enum ScheduledKind { - WaitResume, - WaitPCM, + voices: Vec, } struct VoiceInner { - // The event loop used to create the voice. - event_loop: Arc, + // The id of the voice. + id: VoiceId, // The ALSA channel. - channel: Mutex<*mut alsa::snd_pcm_t>, + channel: *mut alsa::snd_pcm_t, // When converting between file descriptors and `snd_pcm_t`, this is the number of // file descriptors that this `snd_pcm_t` uses. @@ -472,9 +307,6 @@ struct VoiceInner { // Minimum number of samples to put in the buffer. period_len: usize, - // If `Some`, something previously called `schedule` on the stream. - scheduled: Mutex>, - // Wherease the sample stream is paused is_paused: AtomicBool, @@ -483,159 +315,180 @@ struct VoiceInner { resume_trigger: Trigger, } -unsafe impl Send for VoiceInner { -} -unsafe impl Sync for VoiceInner { -} +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct VoiceId(usize); -impl SamplesStream { +impl EventLoop { #[inline] - fn schedule(&mut self, kind: ScheduledKind) { + pub fn new() -> EventLoop { + let pending_trigger = Trigger::new(); + + let run_context = Mutex::new(RunContext { + descriptors: Vec::new(), // TODO: clearify in doc initial value not necessary + voices: Vec::new(), + }); + + EventLoop { + next_voice_id: AtomicUsize::new(0), + pending_trigger: pending_trigger, + run_context, + commands: Mutex::new(Vec::new()), + } + } + + #[inline] + pub fn run(&self, mut callback: F) -> ! + where F: FnMut(VoiceId, UnknownTypeBuffer) + { + self.run_inner(&mut callback) + } + + fn run_inner(&self, callback: &mut FnMut(VoiceId, UnknownTypeBuffer)) -> ! { unsafe { - let channel = self.inner.channel.lock().unwrap(); + let mut run_context = self.run_context.lock().unwrap(); + let run_context = &mut *run_context; - // We start by filling `scheduled`. - *self.inner.scheduled.lock().unwrap() = Some(Scheduled { - task: task::park(), - kind: kind, - }); + loop { + { + let mut commands_lock = self.commands.lock().unwrap(); + if !commands_lock.is_empty() { + for command in commands_lock.drain(..) { + match command { + Command::DestroyVoice(voice_id) => { + run_context.voices.retain(|v| v.id != voice_id); + }, + Command::NewVoice(voice_inner) => { + run_context.voices.push(voice_inner); + }, + } + } - let mut pending_wait = self.inner.event_loop.pending_wait.lock().unwrap(); - match kind { - ScheduledKind::WaitPCM => { - // In this function we turn the `snd_pcm_t` into a collection of file descriptors. - // And we add these descriptors to `event_loop.pending_wait.descriptors`. - pending_wait.descriptors.reserve(self.inner.num_descriptors); + run_context.descriptors = vec![ + libc::pollfd { + fd: self.pending_trigger.read_fd(), + events: libc::POLLIN, + revents: 0, + } + ]; + for voice in run_context.voices.iter() { + run_context.descriptors.reserve(voice.num_descriptors); + let len = run_context.descriptors.len(); + let filled = alsa::snd_pcm_poll_descriptors(voice.channel, + run_context.descriptors + .as_mut_ptr() + .offset(len as isize), + voice.num_descriptors as + libc::c_uint); + debug_assert_eq!(filled, voice.num_descriptors as libc::c_int); + run_context.descriptors.set_len(len + voice.num_descriptors); + } + } + } - let len = pending_wait.descriptors.len(); - let filled = alsa::snd_pcm_poll_descriptors(*channel, - pending_wait - .descriptors - .as_mut_ptr() - .offset(len as isize), - self.inner.num_descriptors as - libc::c_uint); - debug_assert_eq!(filled, self.inner.num_descriptors as libc::c_int); - pending_wait - .descriptors - .set_len(len + self.inner.num_descriptors); - }, - ScheduledKind::WaitResume => { - // And we add the descriptor corresponding to the resume signal - // to `event_loop.pending_wait.descriptors`. - pending_wait.descriptors.push(libc::pollfd { - fd: self.inner.resume_trigger.read_fd(), - events: libc::POLLIN, - revents: 0, - }); - }, + let ret = libc::poll(run_context.descriptors.as_mut_ptr(), + run_context.descriptors.len() as libc::nfds_t, + -1 /* infinite */); + assert!(ret >= 0, "poll() failed"); + + if ret == 0 { + continue; + } + + // If the `pending_trigger` was signaled, we need to process the comands. + if run_context.descriptors[0].revents != 0 { + run_context.descriptors[0].revents = 0; + self.pending_trigger.clear_pipe(); + } + + // Iterate over each individual voice/descriptor. + let mut i_voice = 0; + let mut i_descriptor = 1; + while (i_descriptor as usize) < run_context.descriptors.len() { + let voice_inner = run_context.voices.get_mut(i_voice).unwrap(); + + // Check whether the event is `POLLOUT`. If not, `continue`. + { + let mut revent = mem::uninitialized(); + + { + let num_descriptors = voice_inner.num_descriptors as libc::c_uint; + check_errors(alsa::snd_pcm_poll_descriptors_revents(voice_inner.channel, run_context.descriptors + .as_mut_ptr().offset(i_descriptor), + num_descriptors, &mut revent)).unwrap(); + } + + if (revent as libc::c_short & libc::POLLOUT) == 0 { + i_descriptor += voice_inner.num_descriptors as isize; + i_voice += 1; + continue; + } + } + + // Determine the number of samples that are available to write. + let available = { + let available = alsa::snd_pcm_avail(voice_inner.channel); // TODO: what about snd_pcm_avail_update? + + if available == -32 { + // buffer underrun + voice_inner.buffer_len + } else if available < 0 { + check_errors(available as libc::c_int).expect("buffer is not available"); + unreachable!() + } else { + (available * voice_inner.num_channels as alsa::snd_pcm_sframes_t) as usize + } + }; + + if available < voice_inner.period_len { + i_descriptor += voice_inner.num_descriptors as isize; + i_voice += 1; + continue; + } + + let voice_id = voice_inner.id.clone(); + + // We're now sure that we're ready to write data. + let buffer = match voice_inner.sample_format { + SampleFormat::I16 => { + let buffer = Buffer { + voice_inner: voice_inner, + buffer: iter::repeat(mem::uninitialized()) + .take(available) + .collect(), + }; + + UnknownTypeBuffer::I16(::Buffer { target: Some(buffer) }) + }, + SampleFormat::U16 => { + let buffer = Buffer { + voice_inner: voice_inner, + buffer: iter::repeat(mem::uninitialized()) + .take(available) + .collect(), + }; + + UnknownTypeBuffer::U16(::Buffer { target: Some(buffer) }) + }, + SampleFormat::F32 => { + let buffer = Buffer { + voice_inner: voice_inner, + buffer: iter::repeat(0.0) // we don't use mem::uninitialized in case of sNaN + .take(available) + .collect(), + }; + + UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }) + }, + }; + + callback(voice_id, buffer); + } } - - // We also fill `voices`. - pending_wait.voices.push(self.inner.clone()); - - // Now that `pending_wait` received additional descriptors, we signal the event - // so that our event loops can pick it up. - drop(pending_wait); - self.inner.event_loop.pending_trigger.wakeup(); } } -} -impl Stream for SamplesStream { - type Item = UnknownTypeBuffer; - type Error = (); - - fn poll(&mut self) -> Poll, Self::Error> { - // If paused then we schedule the task and return `NotReady` - if self.inner.is_paused.load(Ordering::Relaxed) { - self.schedule(ScheduledKind::WaitResume); - return Ok(Async::NotReady); - } - - // Determine the number of samples that are available to write. - let available = { - let channel = self.inner.channel.lock().expect("could not lock channel"); - let available = unsafe { alsa::snd_pcm_avail(*channel) }; // TODO: what about snd_pcm_avail_update? - - if available == -32 { - // buffer underrun - self.inner.buffer_len - } else if available < 0 { - check_errors(available as libc::c_int).expect("buffer is not available"); - unreachable!() - } else { - (available * self.inner.num_channels as alsa::snd_pcm_sframes_t) as usize - } - }; - - // If we don't have one period ready, schedule the task and return `NotReady`. - if available < self.inner.period_len { - self.schedule(ScheduledKind::WaitPCM); - return Ok(Async::NotReady); - } - - // We now sure that we're ready to write data. - match self.inner.sample_format { - SampleFormat::I16 => { - let buffer = Buffer { - buffer: iter::repeat(unsafe { mem::uninitialized() }) - .take(available) - .collect(), - inner: self.inner.clone(), - }; - - Ok(Async::Ready((Some(UnknownTypeBuffer::I16(::Buffer { target: Some(buffer) }))))) - }, - SampleFormat::U16 => { - let buffer = Buffer { - buffer: iter::repeat(unsafe { mem::uninitialized() }) - .take(available) - .collect(), - inner: self.inner.clone(), - }; - - Ok(Async::Ready((Some(UnknownTypeBuffer::U16(::Buffer { target: Some(buffer) }))))) - }, - SampleFormat::F32 => { - let buffer = Buffer { - buffer: iter::repeat(unsafe { mem::uninitialized() }) - .take(available) - .collect(), - inner: self.inner.clone(), - }; - - Ok(Async::Ready((Some(UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }))))) - }, - } - } -} - -/// Wrapper around `hw_params`. -struct HwParams(*mut alsa::snd_pcm_hw_params_t); - -impl HwParams { - pub fn alloc() -> HwParams { - unsafe { - let mut hw_params = mem::uninitialized(); - check_errors(alsa::snd_pcm_hw_params_malloc(&mut hw_params)) - .expect("unable to get hardware parameters"); - HwParams(hw_params) - } - } -} - -impl Drop for HwParams { - fn drop(&mut self) { - unsafe { - alsa::snd_pcm_hw_params_free(self.0); - } - } -} - -impl Voice { - pub fn new(endpoint: &Endpoint, format: &Format, event_loop: &EventLoop) - -> Result<(Voice, SamplesStream), CreationError> { + pub fn build_voice(&self, endpoint: &Endpoint, format: &Format) + -> Result { unsafe { let name = ffi::CString::new(endpoint.0.clone()).expect("unable to clone endpoint"); @@ -647,11 +500,18 @@ impl Voice { e => check_errors(e).expect("Device unavailable") } - // TODO: check endianess - let data_type = match format.data_type { - SampleFormat::I16 => alsa::SND_PCM_FORMAT_S16_LE, - SampleFormat::U16 => alsa::SND_PCM_FORMAT_U16_LE, - SampleFormat::F32 => alsa::SND_PCM_FORMAT_FLOAT_LE, + let data_type = if cfg!(target_endian = "big") { + match format.data_type { + SampleFormat::I16 => alsa::SND_PCM_FORMAT_S16_BE, + SampleFormat::U16 => alsa::SND_PCM_FORMAT_U16_BE, + SampleFormat::F32 => alsa::SND_PCM_FORMAT_FLOAT_BE, + } + } else { + match format.data_type { + SampleFormat::I16 => alsa::SND_PCM_FORMAT_S16_LE, + SampleFormat::U16 => alsa::SND_PCM_FORMAT_U16_LE, + SampleFormat::F32 => alsa::SND_PCM_FORMAT_FLOAT_LE, + } }; let hw_params = HwParams::alloc(); @@ -718,36 +578,68 @@ impl Voice { num_descriptors as usize }; - let samples_stream_inner = Arc::new(VoiceInner { - event_loop: event_loop.inner.clone(), - channel: Mutex::new(playback_handle), - sample_format: format.data_type, - num_descriptors: num_descriptors, - num_channels: format.channels.len() as u16, - buffer_len: buffer_len, - period_len: period_len, - scheduled: Mutex::new(None), - is_paused: AtomicBool::new(true), - resume_trigger: Trigger::new(), - }); + let new_voice_id = VoiceId(self.next_voice_id.fetch_add(1, Ordering::Relaxed)); + assert_ne!(new_voice_id.0, usize::max_value()); // check for overflows - Ok((Voice { inner: samples_stream_inner.clone() }, - SamplesStream { inner: samples_stream_inner })) + let voice_inner = VoiceInner { + id: new_voice_id.clone(), + channel: playback_handle, + sample_format: format.data_type, + num_descriptors: num_descriptors, + num_channels: format.channels.len() as u16, + buffer_len: buffer_len, + period_len: period_len, + is_paused: AtomicBool::new(true), + resume_trigger: Trigger::new(), + }; + + self.commands.lock().unwrap().push(Command::NewVoice(voice_inner)); + self.pending_trigger.wakeup(); + Ok(new_voice_id) } } #[inline] - pub fn play(&mut self) { - // If it was paused then we resume and signal - // FIXME: the signal is send even if the event loop wasn't waiting for resume, is that an issue ? - if self.inner.is_paused.swap(false, Ordering::Relaxed) { - self.inner.resume_trigger.wakeup(); - } + pub fn destroy_voice(&self, voice_id: VoiceId) { + self.commands.lock().unwrap().push(Command::DestroyVoice(voice_id)); + self.pending_trigger.wakeup(); } #[inline] - pub fn pause(&mut self) { - self.inner.is_paused.store(true, Ordering::Relaxed); + pub fn play(&self, _: VoiceId) { + //unimplemented!() + } + + #[inline] + pub fn pause(&self, _: VoiceId) { + unimplemented!() + } +} + +pub struct Buffer<'a, T: 'a> { + voice_inner: &'a mut VoiceInner, + buffer: Vec, +} + +/// Wrapper around `hw_params`. +struct HwParams(*mut alsa::snd_pcm_hw_params_t); + +impl HwParams { + pub fn alloc() -> HwParams { + unsafe { + let mut hw_params = mem::uninitialized(); + check_errors(alsa::snd_pcm_hw_params_malloc(&mut hw_params)) + .expect("unable to get hardware parameters"); + HwParams(hw_params) + } + } +} + +impl Drop for HwParams { + fn drop(&mut self) { + unsafe { + alsa::snd_pcm_hw_params_free(self.0); + } } } @@ -755,12 +647,12 @@ impl Drop for VoiceInner { #[inline] fn drop(&mut self) { unsafe { - alsa::snd_pcm_close(*self.channel.lock().expect("drop for voice")); + alsa::snd_pcm_close(self.channel); } } } -impl Buffer { +impl<'a, T> Buffer<'a, T> { #[inline] pub fn buffer(&mut self) -> &mut [T] { &mut self.buffer @@ -772,21 +664,17 @@ impl Buffer { } pub fn finish(self) { - let to_write = (self.buffer.len() / self.inner.num_channels as usize) as + let to_write = (self.buffer.len() / self.voice_inner.num_channels as usize) as alsa::snd_pcm_uframes_t; - let channel = self.inner - .channel - .lock() - .expect("Buffer channel lock failed"); unsafe { loop { let result = - alsa::snd_pcm_writei(*channel, self.buffer.as_ptr() as *const _, to_write); + alsa::snd_pcm_writei(self.voice_inner.channel, self.buffer.as_ptr() as *const _, to_write); if result == -32 { // buffer underrun - alsa::snd_pcm_prepare(*channel); + alsa::snd_pcm_prepare(self.voice_inner.channel); } else if result < 0 { check_errors(result as libc::c_int).expect("could not write pcm"); } else { diff --git a/src/coreaudio/mod.rs b/src/coreaudio/mod.rs index c06153e..22fa76e 100644 --- a/src/coreaudio/mod.rs +++ b/src/coreaudio/mod.rs @@ -10,11 +10,7 @@ use SampleFormat; use SamplesRate; use UnknownTypeBuffer; -use futures::Async; -use futures::Poll; -use futures::stream::Stream; -use futures::task; -use futures::task::Task; +use std::mem; use std::sync::{Arc, Mutex}; use std::thread; use std::time::Duration; @@ -49,27 +45,170 @@ impl Endpoint { } } -pub struct EventLoop; +// The ID of a voice is its index within the `voices` array of the events loop. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct VoiceId(usize); + +pub struct EventLoop { + // This `Arc` is shared with all the callbacks of coreaudio. + active_callbacks: Arc, + voices: Mutex>>, +} + +struct ActiveCallbacks { + // Whenever the `run()` method is called with a callback, this callback is put in this list. + callbacks: Mutex>, +} + +struct VoiceInner { + playing: bool, + audio_unit: AudioUnit, +} + impl EventLoop { #[inline] pub fn new() -> EventLoop { - EventLoop + EventLoop { + active_callbacks: Arc::new(ActiveCallbacks { + callbacks: Mutex::new(Vec::new()), + }), + voices: Mutex::new(Vec::new()), + } } + #[inline] - pub fn run(&self) { + pub fn run(&self, mut callback: F) -> ! + where F: FnMut(VoiceId, UnknownTypeBuffer) + { + let callback: &mut FnMut(VoiceId, UnknownTypeBuffer) = &mut callback; + self.active_callbacks.callbacks.lock().unwrap().push(unsafe { mem::transmute(callback) }); + loop { // So the loop does not get optimised out in --release thread::sleep(Duration::new(1u64, 0u32)); } + + // Note: if we ever change this API so that `run` can return, then it is critical that + // we remove the callback from `active_callbacks`. + } + + #[inline] + pub fn build_voice(&self, endpoint: &Endpoint, format: &Format) + -> Result + { + fn convert_error(err: coreaudio::Error) -> CreationError { + match err { + coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat | + coreaudio::Error::NoKnownSubtype | + coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::FormatNotSupported) | + coreaudio::Error::AudioCodec(_) | + coreaudio::Error::AudioFormat(_) => CreationError::FormatNotSupported, + _ => CreationError::DeviceNotAvailable, + } + } + + let mut audio_unit = { + let au_type = if cfg!(target_os = "ios") { + // The DefaultOutput unit isn't available in iOS unfortunately. RemoteIO is a sensible replacement. + // See + // https://developer.apple.com/library/content/documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/UsingSpecificAudioUnits/UsingSpecificAudioUnits.html + coreaudio::audio_unit::IOType::RemoteIO + } else { + coreaudio::audio_unit::IOType::DefaultOutput + }; + + AudioUnit::new(au_type).map_err(convert_error)? + }; + + // Determine the future ID of the voice. + let mut voices_lock = self.voices.lock().unwrap(); + let voice_id = voices_lock.iter().position(|n| n.is_none()).unwrap_or(voices_lock.len()); + + // TODO: iOS uses integer and fixed-point data + + // Register the callback that is being called by coreaudio whenever it needs data to be + // fed to the audio buffer. + let active_callbacks = self.active_callbacks.clone(); + audio_unit.set_render_callback(move |mut args: render_callback::Args>| { + // If `run()` is currently running, then a callback will be available from this list. + // Otherwise, we just fill the buffer with zeroes and return. + let mut callbacks = active_callbacks.callbacks.lock().unwrap(); + let callback = if let Some(cb) = callbacks.get_mut(0) { + cb + } else { + for channel in args.data.channels_mut() { + for elem in channel.iter_mut() { + *elem = 0.0; + } + } + return Ok(()); + }; + + let buffer = { + let buffer_len = args.num_frames * args.data.channels().count(); + Buffer { + args: &mut args, + buffer: vec![0.0; buffer_len], + } + }; + + callback(VoiceId(voice_id), UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) })); + Ok(()) + + }).map_err(convert_error)?; + + // TODO: start playing now? is that consistent with the other backends? + audio_unit.start().map_err(convert_error)?; + + // Add the voice to the list of voices within `self`. + { + let inner = VoiceInner { + playing: true, + audio_unit: audio_unit, + }; + + if voice_id == voices_lock.len() { + voices_lock.push(Some(inner)); + } else { + voices_lock[voice_id] = Some(inner); + } + } + + Ok(VoiceId(voice_id)) + } + + pub fn destroy_voice(&self, voice_id: VoiceId) { + let mut voices = self.voices.lock().unwrap(); + voices[voice_id.0] = None; + } + + pub fn play(&self, voice: VoiceId) { + let mut voices = self.voices.lock().unwrap(); + let voice = voices[voice.0].as_mut().unwrap(); + + if !voice.playing { + voice.audio_unit.start().unwrap(); + voice.playing = true; + } + } + + pub fn pause(&self, voice: VoiceId) { + let mut voices = self.voices.lock().unwrap(); + let voice = voices[voice.0].as_mut().unwrap(); + + if voice.playing { + voice.audio_unit.stop().unwrap(); + voice.playing = false; + } } } -pub struct Buffer { - args: render_callback::Args>, +pub struct Buffer<'a, T: 'a> { + args: &'a mut render_callback::Args>, buffer: Vec, } -impl Buffer +impl<'a, T> Buffer<'a, T> where T: Sample { #[inline] @@ -86,158 +225,11 @@ impl Buffer pub fn finish(self) { // TODO: At the moment this assumes the Vec is a Vec. // Need to add T: Sample and use Sample::to_vec_f32. - let Buffer { mut args, buffer } = self; - - let num_channels = args.data.channels().count(); - for (i, frame) in buffer.chunks(num_channels).enumerate() { - for (channel, sample) in args.data.channels_mut().zip(frame.iter()) { + let num_channels = self.args.data.channels().count(); + for (i, frame) in self.buffer.chunks(num_channels).enumerate() { + for (channel, sample) in self.args.data.channels_mut().zip(frame.iter()) { channel[i] = *sample; } } } } - -pub struct Voice { - playing: bool, - audio_unit: Arc>, -} - -#[allow(dead_code)] // the audio_unit will be dropped if we don't hold it. -pub struct SamplesStream { - inner: Arc>, - audio_unit: Arc>, -} - - -struct SamplesStreamInner { - scheduled_task: Option, - current_callback: Option>>, -} - -impl Stream for SamplesStream { - type Item = UnknownTypeBuffer; - type Error = (); - - fn poll(&mut self) -> Poll, Self::Error> { - let mut inner = self.inner.lock().unwrap(); - - // There are two possibilites: either we're answering a callback of coreaudio and we return - // a buffer, or we're not answering a callback and we return that we're not ready. - - let current_callback = match inner.current_callback.take() { - Some(c) => c, - None => { - inner.scheduled_task = Some(task::park()); - return Ok(Async::NotReady); - }, - }; - - let buffer_len = current_callback.num_frames * current_callback.data.channels().count(); - - let buffer = Buffer { - args: current_callback, - buffer: vec![0.0; buffer_len], - }; - - Ok(Async::Ready(Some(UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) })))) - } -} - -impl Voice { - pub fn new(_: &Endpoint, _: &Format, _: &EventLoop) - -> Result<(Voice, SamplesStream), CreationError> { - let inner = Arc::new(Mutex::new(SamplesStreamInner { - scheduled_task: None, - current_callback: None, - })); - - fn convert_error(err: coreaudio::Error) -> CreationError { - match err { - coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat | - coreaudio::Error::NoKnownSubtype | - coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::FormatNotSupported) | - coreaudio::Error::AudioCodec(_) | - coreaudio::Error::AudioFormat(_) => CreationError::FormatNotSupported, - _ => CreationError::DeviceNotAvailable, - } - } - - let au_type = if cfg!(target_os = "ios") { - // The DefaultOutput unit isn't available in iOS unfortunately. RemoteIO is a sensible replacement. - // See - // https://developer.apple.com/library/content/documentation/MusicAudio/Conceptual/AudioUnitHostingGuide_iOS/UsingSpecificAudioUnits/UsingSpecificAudioUnits.html - coreaudio::audio_unit::IOType::RemoteIO - } else { - coreaudio::audio_unit::IOType::DefaultOutput - }; - let mut audio_unit = AudioUnit::new(au_type).map_err(convert_error)?; - - // TODO: iOS uses integer and fixed-point data - - { - let inner = inner.clone(); - let result = audio_unit.set_render_callback(move |args| { - // This callback is entered whenever the coreaudio engine needs to be fed data. - - // Store the callback argument in the `SamplesStreamInner` and return the task - // that we're supposed to notify. - let scheduled = { - let mut inner = inner.lock().unwrap(); - - assert!(inner.current_callback.is_none()); - inner.current_callback = Some(args); - - inner.scheduled_task.take() - }; - - // It is important that `inner` is unlocked here. - if let Some(scheduled) = scheduled { - // Calling `unpark()` should eventually call `poll()` on the `SamplesStream`, - // which will use the data we stored in `current_callback`. - scheduled.unpark(); - } - - // TODO: what should happen if the callback wasn't processed? in other word, what - // if the user didn't register any handler or did a stupid thing in the - // handler (like mem::forgetting the buffer)? - - Ok(()) - }); - - result.map_err(convert_error)?; - } - - audio_unit.start().map_err(convert_error)?; - - let au_arc = Arc::new(Mutex::new(audio_unit)); - - let samples_stream = SamplesStream { - inner: inner, - audio_unit: au_arc.clone(), - }; - - Ok((Voice { - playing: true, - audio_unit: au_arc.clone(), - }, - samples_stream)) - } - - #[inline] - pub fn play(&mut self) { - if !self.playing { - let mut unit = self.audio_unit.lock().unwrap(); - unit.start().unwrap(); - self.playing = true; - } - } - - #[inline] - pub fn pause(&mut self) { - if self.playing { - let mut unit = self.audio_unit.lock().unwrap(); - unit.stop().unwrap(); - self.playing = false; - } - } -} diff --git a/src/emscripten/mod.rs b/src/emscripten/mod.rs new file mode 100644 index 0000000..0b38a47 --- /dev/null +++ b/src/emscripten/mod.rs @@ -0,0 +1,252 @@ +use std::marker::PhantomData; +use std::os::raw::c_char; +use std::os::raw::c_int; +use std::os::raw::c_void; + +use CreationError; +use Format; +use FormatsEnumerationError; +use Sample; +use UnknownTypeBuffer; + +extern { + fn emscripten_set_main_loop_arg(_: extern fn(*mut c_void), _: *mut c_void, _: c_int, _: c_int); + fn emscripten_run_script(script: *const c_char); + fn emscripten_run_script_int(script: *const c_char) -> c_int; +} + +// The emscripten backend works by having a global variable named `_cpal_audio_contexts`, which +// is an array of `AudioContext` objects. A voice ID corresponds to an entry in this array. +// +// Creating a voice creates a new `AudioContext`. Destroying a voice destroys it. + +// TODO: handle latency better ; right now we just use setInterval with the amount of sound data +// that is in each buffer ; this is obviously bad, and also the schedule is too tight and there may +// be underflows + +pub struct EventLoop; +impl EventLoop { + #[inline] + pub fn new() -> EventLoop { + EventLoop + } + + #[inline] + pub fn run(&self, mut callback: F) -> ! + where F: FnMut(VoiceId, UnknownTypeBuffer) + { + unsafe { + // The `run` function uses `emscripten_set_main_loop_arg` to invoke a Rust callback + // repeatidely. The job of this callback is to fill the content of the audio buffers. + + // The first argument of the callback function (a `void*`) is a casted pointer to the + // `callback` parameter that was passed to `run`. + + extern "C" fn callback_fn(callback_ptr: *mut c_void) + where F: FnMut(VoiceId, UnknownTypeBuffer) + { + unsafe { + let num_contexts = emscripten_run_script_int("(function() { + if (window._cpal_audio_contexts) + return window._cpal_audio_contexts.length; + else + return 0; + })()\0".as_ptr() as *const _); + + // TODO: this processes all the voices, even those from maybe other event loops + // this is not a problem yet, but may become one in the future? + for voice_id in 0 .. num_contexts { + let callback_ptr = &mut *(callback_ptr as *mut F); + + let buffer = Buffer { + temporary_buffer: vec![0.0; 44100 * 2 / 3], + voice_id: voice_id, + marker: PhantomData, + }; + + callback_ptr(VoiceId(voice_id), ::UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) })); + } + } + } + + let callback_ptr = &mut callback as *mut F as *mut c_void; + emscripten_set_main_loop_arg(callback_fn::, callback_ptr, 3, 1); + + unreachable!() + } + } + + #[inline] + pub fn build_voice(&self, _: &Endpoint, format: &Format) + -> Result + { + // TODO: find an empty element in the array first, instead of pushing at the end, in case + // the user creates and destroys lots of voices? + + let num = unsafe { + emscripten_run_script_int(concat!(r#"(function() { + if (!window._cpal_audio_contexts) + window._cpal_audio_contexts = new Array(); + window._cpal_audio_contexts.push(new AudioContext()); + return window._cpal_audio_contexts.length - 1; + })()"#, "\0").as_ptr() as *const _) + }; + + Ok(VoiceId(num)) + } + + #[inline] + pub fn destroy_voice(&self, voice_id: VoiceId) { + unsafe { + let script = format!(" + if (window._cpal_audio_contexts) + window._cpal_audio_contexts[{}] = null;\0", voice_id.0); + emscripten_run_script(script.as_ptr() as *const _) + } + } + + #[inline] + pub fn play(&self, voice_id: VoiceId) { + unsafe { + let script = format!(" + if (window._cpal_audio_contexts) + if (window._cpal_audio_contexts[{v}]) + window._cpal_audio_contexts[{v}].resume();\0", v = voice_id.0); + emscripten_run_script(script.as_ptr() as *const _) + } + } + + #[inline] + pub fn pause(&self, voice_id: VoiceId) { + unsafe { + let script = format!(" + if (window._cpal_audio_contexts) + if (window._cpal_audio_contexts[{v}]) + window._cpal_audio_contexts[{v}].suspend();\0", v = voice_id.0); + emscripten_run_script(script.as_ptr() as *const _) + } + } +} + +// Index within the `_cpal_audio_contexts` global variable in Javascript. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct VoiceId(c_int); + +// Detects whether the `AudioContext` global variable is available. +fn is_webaudio_available() -> bool { + unsafe { + emscripten_run_script_int(concat!(r#"(function() { + if (!AudioContext) { return 0; } else { return 1; } + })()"#, "\0").as_ptr() as *const _) != 0 + } +} + +// Content is false if the iterator is empty. +pub struct EndpointsIterator(bool); +impl Default for EndpointsIterator { + fn default() -> EndpointsIterator { + // We produce an empty iterator if the WebAudio API isn't available. + EndpointsIterator(is_webaudio_available()) + } +} +impl Iterator for EndpointsIterator { + type Item = Endpoint; + #[inline] + fn next(&mut self) -> Option { + if self.0 { + self.0 = false; + Some(Endpoint) + } else { + None + } + } +} + +#[inline] +pub fn default_endpoint() -> Option { + if is_webaudio_available() { + Some(Endpoint) + } else { + None + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct Endpoint; + +impl Endpoint { + #[inline] + pub fn supported_formats( + &self) + -> Result { + // TODO: right now cpal's API doesn't allow flexibility here + // "44100" and "2" (channels) have also been hard-coded in the rest of the code ; if + // this ever becomes more flexible, don't forget to change that + Ok(vec![Format { + channels: vec![::ChannelPosition::BackLeft, ::ChannelPosition::BackRight], + samples_rate: ::SamplesRate(44100), + data_type: ::SampleFormat::F32, + }].into_iter()) + } + + #[inline] + pub fn name(&self) -> String { + "Default endpoint".to_owned() + } +} + +pub type SupportedFormatsIterator = ::std::vec::IntoIter; + +pub struct Buffer<'a, T: 'a> where T: Sample { + temporary_buffer: Vec, + voice_id: c_int, + marker: PhantomData<&'a mut T>, +} + +impl<'a, T> Buffer<'a, T> where T: Sample { + #[inline] + pub fn buffer(&mut self) -> &mut [T] { + &mut self.temporary_buffer + } + + #[inline] + pub fn len(&self) -> usize { + self.temporary_buffer.len() + } + + #[inline] + pub fn finish(self) { + unsafe { + // TODO: **very** slow + let src_data = self.temporary_buffer.iter().map(|&b| b.to_f32().to_string() + ", ").fold(String::new(), |mut a, b| { a.push_str(&b); a }); + + debug_assert_eq!(self.temporary_buffer.len() % 2, 0); // TODO: num channels + + let script = format!("(function() {{ + if (!window._cpal_audio_contexts) + return; + var context = window._cpal_audio_contexts[{voice_id}]; + if (!context) + return; + var buffer = context.createBuffer({num_channels}, {buf_len} / {num_channels}, 44100); + var src = [{src_data}]; + for (var channel = 0; channel < {num_channels}; ++channel) {{ + var buffer_content = buffer.getChannelData(channel); + for (var i = 0; i < {buf_len} / {num_channels}; ++i) {{ + buffer_content[i] = src[i * {num_channels} + channel]; + }} + }} + var node = context.createBufferSource(); + node.buffer = buffer; + node.connect(context.destination); + node.start(); + }})()\0", + num_channels = 2, + voice_id = self.voice_id, + buf_len = self.temporary_buffer.len(), + src_data = src_data); + + emscripten_run_script(script.as_ptr() as *const _) + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 4128fb6..09af62f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,65 +1,33 @@ /*! # How to use cpal -In order to play a sound, first you need to create an `EventLoop` and a `Voice`. +In order to play a sound, first you need to create an `EventLoop` and a voice. ```no_run // getting the default sound output of the system (can return `None` if nothing is supported) -let endpoint = cpal::get_default_endpoint().unwrap(); +let endpoint = cpal::default_endpoint().unwrap(); // note that the user can at any moment disconnect the device, therefore all operations return // a `Result` to handle this situation // getting a format for the PCM -let format = endpoint.get_supported_formats_list().unwrap().next().unwrap(); +let format = endpoint.supported_formats().unwrap().next().unwrap(); let event_loop = cpal::EventLoop::new(); -let (voice, mut samples_stream) = cpal::Voice::new(&endpoint, &format, &event_loop).unwrap(); +let voice_id = event_loop.build_voice(&endpoint, &format).unwrap(); +event_loop.play(voice_id); ``` -The `voice` can be used to control the play/pause of the output, while the `samples_stream` can -be used to register a callback that will be called whenever the backend is ready to get data. -See the documentation of `futures-rs` for more info about how to use streams. +`voice_id` is an identifier for the voice can be used to control the play/pause of the output. + +Once that's done, you can call `run()` on the `event_loop`. ```no_run -# extern crate futures; -# extern crate cpal; -# use std::sync::Arc; -use futures::stream::Stream; -use futures::task; -# struct MyExecutor; -# impl task::Executor for MyExecutor { -# fn execute(&self, r: task::Run) { -# r.run(); -# } -# } -# fn main() { -# let mut samples_stream: cpal::SamplesStream = unsafe { std::mem::uninitialized() }; -# let my_executor = Arc::new(MyExecutor); - -task::spawn(samples_stream.for_each(move |buffer| -> Result<_, ()> { +# let event_loop = cpal::EventLoop::new(); +event_loop.run(move |_voice_id, _buffer| { // write data to `buffer` here - - Ok(()) -})).execute(my_executor); -# } -``` - -TODO: add example - -After you have registered a callback, call `play`: - -```no_run -# let mut voice: cpal::Voice = unsafe { std::mem::uninitialized() }; -voice.play(); -``` - -And finally, run the event loop: - -```no_run -# let mut event_loop: cpal::EventLoop = unsafe { std::mem::uninitialized() }; -event_loop.run(); +}); ``` Calling `run()` will block the thread forever, so it's usually best done in a separate thread. @@ -69,7 +37,6 @@ from time to time. */ -extern crate futures; #[macro_use] extern crate lazy_static; extern crate libc; @@ -77,16 +44,13 @@ extern crate libc; pub use samples_formats::{Sample, SampleFormat}; #[cfg(all(not(windows), not(target_os = "linux"), not(target_os = "freebsd"), - not(target_os = "macos"), not(target_os = "ios")))] + not(target_os = "macos"), not(target_os = "ios"), not(target_os = "emscripten")))] use null as cpal_impl; use std::error::Error; use std::fmt; use std::ops::{Deref, DerefMut}; -use futures::Poll; -use futures::stream::Stream; - mod null; mod samples_formats; @@ -102,6 +66,10 @@ mod cpal_impl; #[path = "coreaudio/mod.rs"] mod cpal_impl; +#[cfg(target_os = "emscripten")] +#[path = "emscripten/mod.rs"] +mod cpal_impl; + /// An iterator for the list of formats that are supported by the backend. pub struct EndpointsIterator(cpal_impl::EndpointsIterator); @@ -237,42 +205,94 @@ impl Iterator for SupportedFormatsIterator { pub struct EventLoop(cpal_impl::EventLoop); impl EventLoop { + /// Initializes a new events loop. #[inline] pub fn new() -> EventLoop { EventLoop(cpal_impl::EventLoop::new()) } + /// Creates a new voice that will play on the given endpoint and with the given format. + /// + /// On success, returns an identifier for the voice. #[inline] - pub fn run(&self) { - self.0.run() + pub fn build_voice(&self, endpoint: &Endpoint, format: &Format) + -> Result + { + self.0.build_voice(&endpoint.0, format).map(VoiceId) + } + + /// Destroys an existing voice. + /// + /// # Panic + /// + /// If the voice doesn't exist, this function can either panic or be a no-op. + /// + #[inline] + pub fn destroy_voice(&self, voice_id: VoiceId) { + self.0.destroy_voice(voice_id.0) + } + + /// Takes control of the current thread and processes the sounds. + /// + /// Whenever a voice needs to be fed some data, the closure passed as parameter is called. + /// **Note**: Calling other methods of the events loop from the callback will most likely + /// deadlock. Don't do that. Maybe this will change in the future. + #[inline] + pub fn run(&self, mut callback: F) -> ! + where F: FnMut(VoiceId, UnknownTypeBuffer) + { + self.0.run(move |id, buf| callback(VoiceId(id), buf)) + } + + /// Sends a command to the audio device that it should start playing. + /// + /// Has no effect is the voice was already playing. + /// + /// Only call this after you have submitted some data, otherwise you may hear + /// some glitches. + /// + /// # Panic + /// + /// If the voice doesn't exist, this function can either panic or be a no-op. + /// + #[inline] + pub fn play(&self, voice: VoiceId) { + self.0.play(voice.0) + } + + /// Sends a command to the audio device that it should stop playing. + /// + /// Has no effect is the voice was already paused. + /// + /// If you call `play` afterwards, the playback will resume exactly where it was. + /// + /// # Panic + /// + /// If the voice doesn't exist, this function can either panic or be a no-op. + /// + #[inline] + pub fn pause(&self, voice: VoiceId) { + self.0.pause(voice.0) } } -/// Represents a buffer that must be filled with audio data. -/// -/// You should destroy this object as soon as possible. Data is only committed when it -/// is destroyed. -#[must_use] -pub struct Buffer - where T: Sample -{ - // also contains something, taken by `Drop` - target: Option>, -} +/// Identifier of a voice in an events loop. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct VoiceId(cpal_impl::VoiceId); /// This is the struct that is provided to you by cpal when you want to write samples to a buffer. /// /// Since the type of data is only known at runtime, you have to fill the right buffer. -pub enum UnknownTypeBuffer { +pub enum UnknownTypeBuffer<'a> { /// Samples whose format is `u16`. - U16(Buffer), + U16(Buffer<'a, u16>), /// Samples whose format is `i16`. - I16(Buffer), + I16(Buffer<'a, i16>), /// Samples whose format is `f32`. - F32(Buffer), + F32(Buffer<'a, f32>), } -impl UnknownTypeBuffer { +impl<'a> UnknownTypeBuffer<'a> { /// Returns the length of the buffer in number of samples. #[inline] pub fn len(&self) -> usize { @@ -343,111 +363,20 @@ impl Error for CreationError { } } -/// Controls a sound output. A typical application has one `Voice` for each sound -/// it wants to output. +/// Represents a buffer that must be filled with audio data. /// -/// A voice must be periodically filled with new data by calling `append_data`, or the sound -/// will stop playing. -/// -/// Each `Voice` is bound to a specific number of channels, samples rate, and samples format, -/// which can be retreived by calling `get_channels`, `get_samples_rate` and `get_samples_format`. -/// If you call `append_data` with values different than these, then cpal will automatically -/// perform a conversion on your data. -/// -/// If you have the possibility, you should try to match the format of the voice. -pub struct Voice { - voice: cpal_impl::Voice, - format: Format, +/// You should destroy this object as soon as possible. Data is only committed when it +/// is destroyed. +#[must_use] +pub struct Buffer<'a, T: 'a> + where T: Sample +{ + // Always contains something, taken by `Drop` + // TODO: change that + target: Option>, } -impl Voice { - /// Builds a new channel. - #[inline] - pub fn new(endpoint: &Endpoint, format: &Format, event_loop: &EventLoop) - -> Result<(Voice, SamplesStream), CreationError> { - let (voice, stream) = cpal_impl::Voice::new(&endpoint.0, format, &event_loop.0)?; - - let voice = Voice { - voice: voice, - format: format.clone(), - }; - - let stream = SamplesStream(stream); - - Ok((voice, stream)) - } - - /// Returns the format used by the voice. - #[inline] - pub fn format(&self) -> &Format { - &self.format - } - - /// DEPRECATED: use `format` instead. Returns the number of channels. - /// - /// You can add data with any number of channels, but matching the voice's native format - /// will lead to better performances. - #[deprecated] - #[inline] - pub fn get_channels(&self) -> ChannelsCount { - self.format().channels.len() as ChannelsCount - } - - /// DEPRECATED: use `format` instead. Returns the number of samples that are played per second. - /// - /// You can add data with any samples rate, but matching the voice's native format - /// will lead to better performances. - #[deprecated] - #[inline] - pub fn get_samples_rate(&self) -> SamplesRate { - self.format().samples_rate - } - - /// DEPRECATED: use `format` instead. Returns the format of the samples that are accepted by the backend. - /// - /// You can add data of any format, but matching the voice's native format - /// will lead to better performances. - #[deprecated] - #[inline] - pub fn get_samples_format(&self) -> SampleFormat { - self.format().data_type - } - - /// Sends a command to the audio device that it should start playing. - /// - /// Has no effect is the voice was already playing. - /// - /// Only call this after you have submitted some data, otherwise you may hear - /// some glitches. - #[inline] - pub fn play(&mut self) { - self.voice.play() - } - - /// Sends a command to the audio device that it should stop playing. - /// - /// Has no effect is the voice was already paused. - /// - /// If you call `play` afterwards, the playback will resume exactly where it was. - #[inline] - pub fn pause(&mut self) { - self.voice.pause() - } -} - -pub struct SamplesStream(cpal_impl::SamplesStream); - -impl Stream for SamplesStream { - type Item = UnknownTypeBuffer; - type Error = (); - - #[inline] - fn poll(&mut self) -> Poll, Self::Error> { - self.0.poll() - } -} - -impl Deref for Buffer +impl<'a, T> Deref for Buffer<'a, T> where T: Sample { type Target = [T]; @@ -458,7 +387,7 @@ impl Deref for Buffer } } -impl DerefMut for Buffer +impl<'a, T> DerefMut for Buffer<'a, T> where T: Sample { #[inline] @@ -467,7 +396,7 @@ impl DerefMut for Buffer } } -impl Drop for Buffer +impl<'a, T> Drop for Buffer<'a, T> where T: Sample { #[inline] diff --git a/src/null/mod.rs b/src/null/mod.rs index bd29684..4d3fb34 100644 --- a/src/null/mod.rs +++ b/src/null/mod.rs @@ -2,10 +2,6 @@ use std::marker::PhantomData; -use futures::Async; -use futures::Poll; -use futures::stream::Stream; - use CreationError; use Format; use FormatsEnumerationError; @@ -17,12 +13,40 @@ impl EventLoop { pub fn new() -> EventLoop { EventLoop } + #[inline] - pub fn run(&self) { + pub fn run(&self, _callback: F) -> ! + where F: FnMut(VoiceId, UnknownTypeBuffer) + { loop { /* TODO: don't spin */ } } + + #[inline] + pub fn build_voice(&self, _: &Endpoint, _: &Format) + -> Result + { + Err(CreationError::DeviceNotAvailable) + } + + #[inline] + pub fn destroy_voice(&self, _: VoiceId) { + unreachable!() + } + + #[inline] + pub fn play(&self, _: VoiceId) { + panic!() + } + + #[inline] + pub fn pause(&self, _: VoiceId) { + panic!() + } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct VoiceId; + #[derive(Default)] pub struct EndpointsIterator; @@ -68,40 +92,11 @@ impl Iterator for SupportedFormatsIterator { } } -pub struct Voice; -pub struct SamplesStream; - -impl Voice { - #[inline] - pub fn new(_: &Endpoint, _: &Format, _: &EventLoop) - -> Result<(Voice, SamplesStream), CreationError> { - Err(CreationError::DeviceNotAvailable) - } - - #[inline] - pub fn play(&mut self) { - } - - #[inline] - pub fn pause(&mut self) { - } +pub struct Buffer<'a, T: 'a> { + marker: PhantomData<&'a mut T>, } -impl Stream for SamplesStream { - type Item = UnknownTypeBuffer; - type Error = (); - - #[inline] - fn poll(&mut self) -> Poll, Self::Error> { - Ok(Async::NotReady) - } -} - -pub struct Buffer { - marker: PhantomData, -} - -impl Buffer { +impl<'a, T> Buffer<'a, T> { #[inline] pub fn buffer(&mut self) -> &mut [T] { unreachable!() diff --git a/src/samples_formats.rs b/src/samples_formats.rs index 33e3cc2..633ff38 100644 --- a/src/samples_formats.rs +++ b/src/samples_formats.rs @@ -35,6 +35,9 @@ pub unsafe trait Sample: Copy + Clone { /// Returns the `SampleFormat` corresponding to this data type. // TODO: rename to `format()`. Requires a breaking change. fn get_format() -> SampleFormat; + + /// Turns the sample into its equivalent as a floating-point. + fn to_f32(&self) -> f32; } unsafe impl Sample for u16 { @@ -42,6 +45,11 @@ unsafe impl Sample for u16 { fn get_format() -> SampleFormat { SampleFormat::U16 } + + #[inline] + fn to_f32(&self) -> f32 { + ((*self as f32 / u16::max_value() as f32) - 0.5) * 2.0 // TODO: maybe wrong + } } unsafe impl Sample for i16 { @@ -49,6 +57,15 @@ unsafe impl Sample for i16 { fn get_format() -> SampleFormat { SampleFormat::I16 } + + #[inline] + fn to_f32(&self) -> f32 { + if *self < 0 { + *self as f32 / -(::std::i16::MIN as f32) + } else { + *self as f32 / ::std::i16::MAX as f32 + } + } } unsafe impl Sample for f32 { @@ -56,4 +73,9 @@ unsafe impl Sample for f32 { fn get_format() -> SampleFormat { SampleFormat::F32 } + + #[inline] + fn to_f32(&self) -> f32 { + *self + } } diff --git a/src/wasapi/endpoint.rs b/src/wasapi/endpoint.rs new file mode 100644 index 0000000..c130adc --- /dev/null +++ b/src/wasapi/endpoint.rs @@ -0,0 +1,414 @@ +use std::ffi::OsString; +use std::io::Error as IoError; +use std::mem; +use std::option::IntoIter as OptionIntoIter; +use std::os::windows::ffi::OsStringExt; +use std::ptr; +use std::slice; +use std::sync::{Arc, Mutex, MutexGuard}; + +use ChannelPosition; +use Format; +use FormatsEnumerationError; +use SampleFormat; +use SamplesRate; + +use super::check_result; +use super::com; +use super::ole32; +use super::winapi; + +pub type SupportedFormatsIterator = OptionIntoIter; + +/// Wrapper because of that stupid decision to remove `Send` and `Sync` from raw pointers. +#[derive(Copy, Clone)] +struct IAudioClientWrapper(*mut winapi::IAudioClient); +unsafe impl Send for IAudioClientWrapper { +} +unsafe impl Sync for IAudioClientWrapper { +} + +/// An opaque type that identifies an end point. +pub struct Endpoint { + device: *mut winapi::IMMDevice, + + /// We cache an uninitialized `IAudioClient` so that we can call functions from it without + /// having to create/destroy audio clients all the time. + future_audio_client: Arc>>, // TODO: add NonZero around the ptr +} + +unsafe impl Send for Endpoint { +} +unsafe impl Sync for Endpoint { +} + +impl Endpoint { + // TODO: this function returns a GUID of the endpoin + // instead it should use the property store and return the friendly name + pub fn name(&self) -> String { + unsafe { + let mut name_ptr = mem::uninitialized(); + // can only fail if wrong params or out of memory + check_result((*self.device).GetId(&mut name_ptr)).unwrap(); + + // finding the length of the name + let mut len = 0; + while *name_ptr.offset(len) != 0 { + len += 1; + } + + // building a slice containing the name + let name_slice = slice::from_raw_parts(name_ptr, len as usize); + + // and turning it into a string + let name_string: OsString = OsStringExt::from_wide(name_slice); + ole32::CoTaskMemFree(name_ptr as *mut _); + name_string.into_string().unwrap() + } + } + + #[inline] + fn from_immdevice(device: *mut winapi::IMMDevice) -> Endpoint { + Endpoint { + device: device, + future_audio_client: Arc::new(Mutex::new(None)), + } + } + + /// Ensures that `future_audio_client` contains a `Some` and returns a locked mutex to it. + fn ensure_future_audio_client(&self) + -> Result>, IoError> { + let mut lock = self.future_audio_client.lock().unwrap(); + if lock.is_some() { + return Ok(lock); + } + + let audio_client: *mut winapi::IAudioClient = unsafe { + let mut audio_client = mem::uninitialized(); + let hresult = (*self.device).Activate(&winapi::IID_IAudioClient, + winapi::CLSCTX_ALL, + ptr::null_mut(), + &mut audio_client); + + // can fail if the device has been disconnected since we enumerated it, or if + // the device doesn't support playback for some reason + check_result(hresult)?; + assert!(!audio_client.is_null()); + audio_client as *mut _ + }; + + *lock = Some(IAudioClientWrapper(audio_client)); + Ok(lock) + } + + /// Returns an uninitialized `IAudioClient`. + #[inline] + pub(crate) fn build_audioclient(&self) -> Result<*mut winapi::IAudioClient, IoError> { + let mut lock = self.ensure_future_audio_client()?; + let client = lock.unwrap().0; + *lock = None; + Ok(client) + } + + pub fn supported_formats( + &self) + -> Result { + // We always create voices in shared mode, therefore all samples go through an audio + // processor to mix them together. + // However there is no way to query the list of all formats that are supported by the + // audio processor, but one format is guaranteed to be supported, the one returned by + // `GetMixFormat`. + + // initializing COM because we call `CoTaskMemFree` + com::com_initialized(); + + let lock = match self.ensure_future_audio_client() { + Err(ref e) if e.raw_os_error() == Some(winapi::AUDCLNT_E_DEVICE_INVALIDATED) => + return Err(FormatsEnumerationError::DeviceNotAvailable), + e => e.unwrap(), + }; + let client = lock.unwrap().0; + + unsafe { + let mut format_ptr = mem::uninitialized(); + match check_result((*client).GetMixFormat(&mut format_ptr)) { + Err(ref e) if e.raw_os_error() == Some(winapi::AUDCLNT_E_DEVICE_INVALIDATED) => { + return Err(FormatsEnumerationError::DeviceNotAvailable); + }, + Err(e) => panic!("{:?}", e), + Ok(()) => (), + }; + + let format = { + let (channels, data_type) = match (*format_ptr).wFormatTag { + winapi::WAVE_FORMAT_PCM => { + (vec![ChannelPosition::FrontLeft, ChannelPosition::FrontRight], + SampleFormat::I16) + }, + winapi::WAVE_FORMAT_EXTENSIBLE => { + let format_ptr = format_ptr as *const winapi::WAVEFORMATEXTENSIBLE; + + let channels = { + let mut channels = Vec::new(); + + let mask = (*format_ptr).dwChannelMask; + if (mask & winapi::SPEAKER_FRONT_LEFT) != 0 { + channels.push(ChannelPosition::FrontLeft); + } + if (mask & winapi::SPEAKER_FRONT_RIGHT) != 0 { + channels.push(ChannelPosition::FrontRight); + } + if (mask & winapi::SPEAKER_FRONT_CENTER) != 0 { + channels.push(ChannelPosition::FrontCenter); + } + if (mask & winapi::SPEAKER_LOW_FREQUENCY) != 0 { + channels.push(ChannelPosition::LowFrequency); + } + if (mask & winapi::SPEAKER_BACK_LEFT) != 0 { + channels.push(ChannelPosition::BackLeft); + } + if (mask & winapi::SPEAKER_BACK_RIGHT) != 0 { + channels.push(ChannelPosition::BackRight); + } + if (mask & winapi::SPEAKER_FRONT_LEFT_OF_CENTER) != 0 { + channels.push(ChannelPosition::FrontLeftOfCenter); + } + if (mask & winapi::SPEAKER_FRONT_RIGHT_OF_CENTER) != 0 { + channels.push(ChannelPosition::FrontRightOfCenter); + } + if (mask & winapi::SPEAKER_BACK_CENTER) != 0 { + channels.push(ChannelPosition::BackCenter); + } + if (mask & winapi::SPEAKER_SIDE_LEFT) != 0 { + channels.push(ChannelPosition::SideLeft); + } + if (mask & winapi::SPEAKER_SIDE_RIGHT) != 0 { + channels.push(ChannelPosition::SideRight); + } + if (mask & winapi::SPEAKER_TOP_CENTER) != 0 { + channels.push(ChannelPosition::TopCenter); + } + if (mask & winapi::SPEAKER_TOP_FRONT_LEFT) != 0 { + channels.push(ChannelPosition::TopFrontLeft); + } + if (mask & winapi::SPEAKER_TOP_FRONT_CENTER) != 0 { + channels.push(ChannelPosition::TopFrontCenter); + } + if (mask & winapi::SPEAKER_TOP_FRONT_RIGHT) != 0 { + channels.push(ChannelPosition::TopFrontRight); + } + if (mask & winapi::SPEAKER_TOP_BACK_LEFT) != 0 { + channels.push(ChannelPosition::TopBackLeft); + } + if (mask & winapi::SPEAKER_TOP_BACK_CENTER) != 0 { + channels.push(ChannelPosition::TopBackCenter); + } + if (mask & winapi::SPEAKER_TOP_BACK_RIGHT) != 0 { + channels.push(ChannelPosition::TopBackRight); + } + + assert_eq!((*format_ptr).Format.nChannels as usize, channels.len()); + channels + }; + + let format = { + fn cmp_guid(a: &winapi::GUID, b: &winapi::GUID) -> bool { + a.Data1 == b.Data1 && a.Data2 == b.Data2 && a.Data3 == b.Data3 && + a.Data4 == b.Data4 + } + if cmp_guid(&(*format_ptr).SubFormat, + &winapi::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) + { + SampleFormat::F32 + } else if cmp_guid(&(*format_ptr).SubFormat, + &winapi::KSDATAFORMAT_SUBTYPE_PCM) + { + SampleFormat::I16 + } else { + panic!("Unknown SubFormat GUID returned by GetMixFormat: {:?}", + (*format_ptr).SubFormat) + } + }; + + (channels, format) + }, + + f => panic!("Unknown data format returned by GetMixFormat: {:?}", f), + }; + + Format { + channels: channels, + samples_rate: SamplesRate((*format_ptr).nSamplesPerSec), + data_type: data_type, + } + }; + + ole32::CoTaskMemFree(format_ptr as *mut _); + + Ok(Some(format).into_iter()) + } + } +} + +impl PartialEq for Endpoint { + #[inline] + fn eq(&self, other: &Endpoint) -> bool { + self.device == other.device + } +} + +impl Eq for Endpoint { +} + +impl Clone for Endpoint { + #[inline] + fn clone(&self) -> Endpoint { + unsafe { + (*self.device).AddRef(); + } + + Endpoint { + device: self.device, + future_audio_client: self.future_audio_client.clone(), + } + } +} + +impl Drop for Endpoint { + #[inline] + fn drop(&mut self) { + unsafe { + (*self.device).Release(); + } + + if let Some(client) = self.future_audio_client.lock().unwrap().take() { + unsafe { + (*client.0).Release(); + } + } + } +} + +lazy_static! { + static ref ENUMERATOR: Enumerator = { + // COM initialization is thread local, but we only need to have COM initialized in the + // thread we create the objects in + com::com_initialized(); + + // building the devices enumerator object + unsafe { + let mut enumerator: *mut winapi::IMMDeviceEnumerator = mem::uninitialized(); + + let hresult = ole32::CoCreateInstance(&winapi::CLSID_MMDeviceEnumerator, + ptr::null_mut(), winapi::CLSCTX_ALL, + &winapi::IID_IMMDeviceEnumerator, + &mut enumerator + as *mut *mut winapi::IMMDeviceEnumerator + as *mut _); + + check_result(hresult).unwrap(); + Enumerator(enumerator) + } + }; +} + +/// RAII object around `winapi::IMMDeviceEnumerator`. +struct Enumerator(*mut winapi::IMMDeviceEnumerator); + +unsafe impl Send for Enumerator { +} +unsafe impl Sync for Enumerator { +} + +impl Drop for Enumerator { + #[inline] + fn drop(&mut self) { + unsafe { + (*self.0).Release(); + } + } +} + +/// WASAPI implementation for `EndpointsIterator`. +pub struct EndpointsIterator { + collection: *mut winapi::IMMDeviceCollection, + total_count: u32, + next_item: u32, +} + +unsafe impl Send for EndpointsIterator { +} +unsafe impl Sync for EndpointsIterator { +} + +impl Drop for EndpointsIterator { + #[inline] + fn drop(&mut self) { + unsafe { + (*self.collection).Release(); + } + } +} + +impl Default for EndpointsIterator { + fn default() -> EndpointsIterator { + unsafe { + let mut collection: *mut winapi::IMMDeviceCollection = mem::uninitialized(); + // can fail because of wrong parameters (should never happen) or out of memory + check_result((*ENUMERATOR.0).EnumAudioEndpoints(winapi::eRender, + winapi::DEVICE_STATE_ACTIVE, + &mut collection)) + .unwrap(); + + let mut count = mem::uninitialized(); + // can fail if the parameter is null, which should never happen + check_result((*collection).GetCount(&mut count)).unwrap(); + + EndpointsIterator { + collection: collection, + total_count: count, + next_item: 0, + } + } + } +} + +impl Iterator for EndpointsIterator { + type Item = Endpoint; + + fn next(&mut self) -> Option { + if self.next_item >= self.total_count { + return None; + } + + unsafe { + let mut device = mem::uninitialized(); + // can fail if out of range, which we just checked above + check_result((*self.collection).Item(self.next_item, &mut device)).unwrap(); + + self.next_item += 1; + Some(Endpoint::from_immdevice(device)) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let num = self.total_count - self.next_item; + let num = num as usize; + (num, Some(num)) + } +} + +pub fn default_endpoint() -> Option { + unsafe { + let mut device = mem::uninitialized(); + let hres = (*ENUMERATOR.0) + .GetDefaultAudioEndpoint(winapi::eRender, winapi::eConsole, &mut device); + + if let Err(_err) = check_result(hres) { + return None; // TODO: check specifically for `E_NOTFOUND`, and panic otherwise + } + + Some(Endpoint::from_immdevice(device)) + } +} diff --git a/src/wasapi/enumerate.rs b/src/wasapi/enumerate.rs deleted file mode 100644 index 3c4e19f..0000000 --- a/src/wasapi/enumerate.rs +++ /dev/null @@ -1,133 +0,0 @@ - -use super::Endpoint; -use super::check_result; -use super::com; -use super::ole32; -use super::winapi; - -use std::mem; -use std::ptr; - -lazy_static! { - static ref ENUMERATOR: Enumerator = { - // COM initialization is thread local, but we only need to have COM initialized in the - // thread we create the objects in - com::com_initialized(); - - // building the devices enumerator object - unsafe { - let mut enumerator: *mut winapi::IMMDeviceEnumerator = mem::uninitialized(); - - let hresult = ole32::CoCreateInstance(&winapi::CLSID_MMDeviceEnumerator, - ptr::null_mut(), winapi::CLSCTX_ALL, - &winapi::IID_IMMDeviceEnumerator, - &mut enumerator - as *mut *mut winapi::IMMDeviceEnumerator - as *mut _); - - check_result(hresult).unwrap(); - Enumerator(enumerator) - } - }; -} - -/// RAII object around `winapi::IMMDeviceEnumerator`. -struct Enumerator(*mut winapi::IMMDeviceEnumerator); - -unsafe impl Send for Enumerator { -} -unsafe impl Sync for Enumerator { -} - -impl Drop for Enumerator { - #[inline] - fn drop(&mut self) { - unsafe { - (*self.0).Release(); - } - } -} - -/// WASAPI implementation for `EndpointsIterator`. -pub struct EndpointsIterator { - collection: *mut winapi::IMMDeviceCollection, - total_count: u32, - next_item: u32, -} - -unsafe impl Send for EndpointsIterator { -} -unsafe impl Sync for EndpointsIterator { -} - -impl Drop for EndpointsIterator { - #[inline] - fn drop(&mut self) { - unsafe { - (*self.collection).Release(); - } - } -} - -impl Default for EndpointsIterator { - fn default() -> EndpointsIterator { - unsafe { - let mut collection: *mut winapi::IMMDeviceCollection = mem::uninitialized(); - // can fail because of wrong parameters (should never happen) or out of memory - check_result((*ENUMERATOR.0).EnumAudioEndpoints(winapi::eRender, - winapi::DEVICE_STATE_ACTIVE, - &mut collection)) - .unwrap(); - - let mut count = mem::uninitialized(); - // can fail if the parameter is null, which should never happen - check_result((*collection).GetCount(&mut count)).unwrap(); - - EndpointsIterator { - collection: collection, - total_count: count, - next_item: 0, - } - } - } -} - -impl Iterator for EndpointsIterator { - type Item = Endpoint; - - fn next(&mut self) -> Option { - if self.next_item >= self.total_count { - return None; - } - - unsafe { - let mut device = mem::uninitialized(); - // can fail if out of range, which we just checked above - check_result((*self.collection).Item(self.next_item, &mut device)).unwrap(); - - self.next_item += 1; - Some(Endpoint::from_immdevice(device)) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let num = self.total_count - self.next_item; - let num = num as usize; - (num, Some(num)) - } -} - -pub fn default_endpoint() -> Option { - unsafe { - let mut device = mem::uninitialized(); - let hres = (*ENUMERATOR.0) - .GetDefaultAudioEndpoint(winapi::eRender, winapi::eConsole, &mut device); - - if let Err(_err) = check_result(hres) { - return None; // TODO: check specifically for `E_NOTFOUND`, and panic otherwise - } - - Some(Endpoint::from_immdevice(device)) - } -} diff --git a/src/wasapi/mod.rs b/src/wasapi/mod.rs index cc1fa5c..136708b 100644 --- a/src/wasapi/mod.rs +++ b/src/wasapi/mod.rs @@ -2,28 +2,13 @@ extern crate winapi; extern crate ole32; extern crate kernel32; -use std::ffi::OsString; use std::io::Error as IoError; -use std::mem; -use std::os::windows::ffi::OsStringExt; -use std::ptr; -use std::slice; -use std::sync::{Arc, Mutex, MutexGuard}; -use ChannelPosition; -use Format; -use FormatsEnumerationError; -use SampleFormat; -use SamplesRate; - -pub use self::enumerate::{EndpointsIterator, default_endpoint}; -pub use self::voice::{Buffer, EventLoop, SamplesStream, Voice}; -pub use std::option::IntoIter as OptionIntoIter; - -pub type SupportedFormatsIterator = OptionIntoIter; +pub use self::endpoint::{Endpoint, EndpointsIterator, default_endpoint, SupportedFormatsIterator}; +pub use self::voice::{Buffer, EventLoop, VoiceId}; mod com; -mod enumerate; +mod endpoint; mod voice; #[inline] @@ -34,272 +19,3 @@ fn check_result(result: winapi::HRESULT) -> Result<(), IoError> { Ok(()) } } - -/// Wrapper because of that stupid decision to remove `Send` and `Sync` from raw pointers. -#[derive(Copy, Clone)] -struct IAudioClientWrapper(*mut winapi::IAudioClient); -unsafe impl Send for IAudioClientWrapper { -} -unsafe impl Sync for IAudioClientWrapper { -} - -/// An opaque type that identifies an end point. -pub struct Endpoint { - device: *mut winapi::IMMDevice, - - /// We cache an uninitialized `IAudioClient` so that we can call functions from it without - /// having to create/destroy audio clients all the time. - future_audio_client: Arc>>, // TODO: add NonZero around the ptr -} - -unsafe impl Send for Endpoint { -} -unsafe impl Sync for Endpoint { -} - -impl Endpoint { - // TODO: this function returns a GUID of the endpoin - // instead it should use the property store and return the friendly name - pub fn name(&self) -> String { - unsafe { - let mut name_ptr = mem::uninitialized(); - // can only fail if wrong params or out of memory - check_result((*self.device).GetId(&mut name_ptr)).unwrap(); - - // finding the length of the name - let mut len = 0; - while *name_ptr.offset(len) != 0 { - len += 1; - } - - // building a slice containing the name - let name_slice = slice::from_raw_parts(name_ptr, len as usize); - - // and turning it into a string - let name_string: OsString = OsStringExt::from_wide(name_slice); - ole32::CoTaskMemFree(name_ptr as *mut _); - name_string.into_string().unwrap() - } - } - - #[inline] - fn from_immdevice(device: *mut winapi::IMMDevice) -> Endpoint { - Endpoint { - device: device, - future_audio_client: Arc::new(Mutex::new(None)), - } - } - - /// Ensures that `future_audio_client` contains a `Some` and returns a locked mutex to it. - fn ensure_future_audio_client(&self) - -> Result>, IoError> { - let mut lock = self.future_audio_client.lock().unwrap(); - if lock.is_some() { - return Ok(lock); - } - - let audio_client: *mut winapi::IAudioClient = unsafe { - let mut audio_client = mem::uninitialized(); - let hresult = (*self.device).Activate(&winapi::IID_IAudioClient, - winapi::CLSCTX_ALL, - ptr::null_mut(), - &mut audio_client); - - // can fail if the device has been disconnected since we enumerated it, or if - // the device doesn't support playback for some reason - check_result(hresult)?; - assert!(!audio_client.is_null()); - audio_client as *mut _ - }; - - *lock = Some(IAudioClientWrapper(audio_client)); - Ok(lock) - } - - /// Returns an uninitialized `IAudioClient`. - #[inline] - fn build_audioclient(&self) -> Result<*mut winapi::IAudioClient, IoError> { - let mut lock = self.ensure_future_audio_client()?; - let client = lock.unwrap().0; - *lock = None; - Ok(client) - } - - pub fn supported_formats( - &self) - -> Result { - // We always create voices in shared mode, therefore all samples go through an audio - // processor to mix them together. - // However there is no way to query the list of all formats that are supported by the - // audio processor, but one format is guaranteed to be supported, the one returned by - // `GetMixFormat`. - - // initializing COM because we call `CoTaskMemFree` - com::com_initialized(); - - let lock = match self.ensure_future_audio_client() { - Err(ref e) if e.raw_os_error() == Some(winapi::AUDCLNT_E_DEVICE_INVALIDATED) => - return Err(FormatsEnumerationError::DeviceNotAvailable), - e => e.unwrap(), - }; - let client = lock.unwrap().0; - - unsafe { - let mut format_ptr = mem::uninitialized(); - match check_result((*client).GetMixFormat(&mut format_ptr)) { - Err(ref e) if e.raw_os_error() == Some(winapi::AUDCLNT_E_DEVICE_INVALIDATED) => { - return Err(FormatsEnumerationError::DeviceNotAvailable); - }, - Err(e) => panic!("{:?}", e), - Ok(()) => (), - }; - - let format = { - let (channels, data_type) = match (*format_ptr).wFormatTag { - winapi::WAVE_FORMAT_PCM => { - (vec![ChannelPosition::FrontLeft, ChannelPosition::FrontRight], - SampleFormat::I16) - }, - winapi::WAVE_FORMAT_EXTENSIBLE => { - let format_ptr = format_ptr as *const winapi::WAVEFORMATEXTENSIBLE; - - let channels = { - let mut channels = Vec::new(); - - let mask = (*format_ptr).dwChannelMask; - if (mask & winapi::SPEAKER_FRONT_LEFT) != 0 { - channels.push(ChannelPosition::FrontLeft); - } - if (mask & winapi::SPEAKER_FRONT_RIGHT) != 0 { - channels.push(ChannelPosition::FrontRight); - } - if (mask & winapi::SPEAKER_FRONT_CENTER) != 0 { - channels.push(ChannelPosition::FrontCenter); - } - if (mask & winapi::SPEAKER_LOW_FREQUENCY) != 0 { - channels.push(ChannelPosition::LowFrequency); - } - if (mask & winapi::SPEAKER_BACK_LEFT) != 0 { - channels.push(ChannelPosition::BackLeft); - } - if (mask & winapi::SPEAKER_BACK_RIGHT) != 0 { - channels.push(ChannelPosition::BackRight); - } - if (mask & winapi::SPEAKER_FRONT_LEFT_OF_CENTER) != 0 { - channels.push(ChannelPosition::FrontLeftOfCenter); - } - if (mask & winapi::SPEAKER_FRONT_RIGHT_OF_CENTER) != 0 { - channels.push(ChannelPosition::FrontRightOfCenter); - } - if (mask & winapi::SPEAKER_BACK_CENTER) != 0 { - channels.push(ChannelPosition::BackCenter); - } - if (mask & winapi::SPEAKER_SIDE_LEFT) != 0 { - channels.push(ChannelPosition::SideLeft); - } - if (mask & winapi::SPEAKER_SIDE_RIGHT) != 0 { - channels.push(ChannelPosition::SideRight); - } - if (mask & winapi::SPEAKER_TOP_CENTER) != 0 { - channels.push(ChannelPosition::TopCenter); - } - if (mask & winapi::SPEAKER_TOP_FRONT_LEFT) != 0 { - channels.push(ChannelPosition::TopFrontLeft); - } - if (mask & winapi::SPEAKER_TOP_FRONT_CENTER) != 0 { - channels.push(ChannelPosition::TopFrontCenter); - } - if (mask & winapi::SPEAKER_TOP_FRONT_RIGHT) != 0 { - channels.push(ChannelPosition::TopFrontRight); - } - if (mask & winapi::SPEAKER_TOP_BACK_LEFT) != 0 { - channels.push(ChannelPosition::TopBackLeft); - } - if (mask & winapi::SPEAKER_TOP_BACK_CENTER) != 0 { - channels.push(ChannelPosition::TopBackCenter); - } - if (mask & winapi::SPEAKER_TOP_BACK_RIGHT) != 0 { - channels.push(ChannelPosition::TopBackRight); - } - - assert_eq!((*format_ptr).Format.nChannels as usize, channels.len()); - channels - }; - - let format = { - fn cmp_guid(a: &winapi::GUID, b: &winapi::GUID) -> bool { - a.Data1 == b.Data1 && a.Data2 == b.Data2 && a.Data3 == b.Data3 && - a.Data4 == b.Data4 - } - if cmp_guid(&(*format_ptr).SubFormat, - &winapi::KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) - { - SampleFormat::F32 - } else if cmp_guid(&(*format_ptr).SubFormat, - &winapi::KSDATAFORMAT_SUBTYPE_PCM) - { - SampleFormat::I16 - } else { - panic!("Unknown SubFormat GUID returned by GetMixFormat: {:?}", - (*format_ptr).SubFormat) - } - }; - - (channels, format) - }, - - f => panic!("Unknown data format returned by GetMixFormat: {:?}", f), - }; - - Format { - channels: channels, - samples_rate: SamplesRate((*format_ptr).nSamplesPerSec), - data_type: data_type, - } - }; - - ole32::CoTaskMemFree(format_ptr as *mut _); - - Ok(Some(format).into_iter()) - } - } -} - -impl PartialEq for Endpoint { - #[inline] - fn eq(&self, other: &Endpoint) -> bool { - self.device == other.device - } -} - -impl Eq for Endpoint { -} - -impl Clone for Endpoint { - #[inline] - fn clone(&self) -> Endpoint { - unsafe { - (*self.device).AddRef(); - } - - Endpoint { - device: self.device, - future_audio_client: self.future_audio_client.clone(), - } - } -} - -impl Drop for Endpoint { - #[inline] - fn drop(&mut self) { - unsafe { - (*self.device).Release(); - } - - if let Some(client) = self.future_audio_client.lock().unwrap().take() { - unsafe { - (*client.0).Release(); - } - } - } -} diff --git a/src/wasapi/voice.rs b/src/wasapi/voice.rs index f06961a..e972e0f 100644 --- a/src/wasapi/voice.rs +++ b/src/wasapi/voice.rs @@ -6,19 +6,15 @@ use super::kernel32; use super::ole32; use super::winapi; +use std::iter; +use std::marker::PhantomData; use std::mem; use std::ptr; use std::slice; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; use std::sync::Arc; use std::sync::Mutex; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering; - -use futures::Async; -use futures::Poll; -use futures::stream::Stream; -use futures::task; -use futures::task::Task; use ChannelPosition; use CreationError; @@ -27,45 +23,56 @@ use SampleFormat; use UnknownTypeBuffer; pub struct EventLoop { - inner: Arc, -} + // Data used by the `run()` function implementation. The mutex is kept lock permanently by + // `run()`. This ensures that two `run()` invocations can't run at the same time, and also + // means that we shouldn't try to lock this field from anywhere else but `run()`. + run_context: Mutex, -unsafe impl Send for EventLoop { -} -unsafe impl Sync for EventLoop { -} + // Identifier of the next voice to create. Each new voice increases this counter. If the + // counter overflows, there's a panic. + // TODO: use AtomicU64 instead + next_voice_id: AtomicUsize, -struct EventLoopInner { - // List of handles that are currently being polled or that are going to be polled. This mutex - // is locked for as long as the event loop is running. - // - // In the `EventLoopScheduled`, the first handle in the list of handles is always - // `pending_scheduled_event`. This means that the length of `handles` is always 1 + the length - // of `task_handles`. - // FIXME: no way to remove elements from that list? - scheduled: Mutex, + // Commands processed by the `run()` method that is currently running. + // `pending_scheduled_event` must be signalled whenever a command is added here, so that it + // will get picked up. + // TODO: use a lock-free container + commands: Mutex>, - // Since the above mutex is locked most of the time, we add new handles to this list instead. - // After a new element is added to this list, you should notify `pending_scheduled_event` - // so that they get transferred to `scheduled`. - // - // The length of `handles` and `task_handles` should always be equal. - pending_scheduled: Mutex, - - // This event is signalled after elements have been added to `pending_scheduled` in order to - // notify that they should be picked up. + // This event is signalled after a new entry is added to `commands`, so that the `run()` + // method can be notified. pending_scheduled_event: winapi::HANDLE, } -struct EventLoopScheduled { - // List of handles that correspond to voices. - // They are linked to `task_handles`, but we store them separately in order to easily call - // `WaitForMultipleObjectsEx` on the array without having to perform any conversion. - handles: Vec, +struct RunContext { + // Voices that have been created in this event loop. + voices: Vec, - // List of task handles corresponding to `handles`. The second element is used to signal - // the voice that it has been signaled. - task_handles: Vec<(Task, Arc)>, + // Handles corresponding to the `event` field of each element of `voices`. Must always be in + // sync with `voices`, except that the first element is always `pending_scheduled_event`. + handles: Vec, +} + +enum Command { + NewVoice(VoiceInner), + DestroyVoice(VoiceId), + Play(VoiceId), + Pause(VoiceId), +} + +struct VoiceInner { + id: VoiceId, + audio_client: *mut winapi::IAudioClient, + render_client: *mut winapi::IAudioRenderClient, + // Event that is signalled by WASAPI whenever audio data must be written. + event: winapi::HANDLE, + // True if the voice is currently playing. False if paused. + playing: bool, + + // Number of frames of audio data in the underlying buffer allocated by WASAPI. + max_frames_in_buffer: winapi::UINT32, + // Number of bytes that each frame occupies. + bytes_per_frame: winapi::WORD, } impl EventLoop { @@ -74,103 +81,18 @@ impl EventLoop { unsafe { kernel32::CreateEventA(ptr::null_mut(), 0, 0, ptr::null()) }; EventLoop { - inner: Arc::new(EventLoopInner { - pending_scheduled_event: pending_scheduled_event, - scheduled: Mutex::new(EventLoopScheduled { - handles: vec![pending_scheduled_event], - task_handles: vec![], - }), - pending_scheduled: Mutex::new(EventLoopScheduled { - handles: vec![], - task_handles: vec![], - }), - }), + pending_scheduled_event: pending_scheduled_event, + run_context: Mutex::new(RunContext { + voices: Vec::new(), + handles: vec![pending_scheduled_event], + }), + next_voice_id: AtomicUsize::new(0), + commands: Mutex::new(Vec::new()), } } - pub fn run(&self) { - unsafe { - let mut scheduled = self.inner.scheduled.lock().unwrap(); - - loop { - debug_assert!(scheduled.handles.len() == 1 + scheduled.task_handles.len()); - - // Creating a voice checks for the MAXIMUM_WAIT_OBJECTS limit. - // FIXME: this is not the case ^ - debug_assert!(scheduled.handles.len() <= winapi::MAXIMUM_WAIT_OBJECTS as usize); - - // Wait for any of the handles to be signalled, which means that the corresponding - // sound needs a buffer. - let result = kernel32::WaitForMultipleObjectsEx(scheduled.handles.len() as u32, - scheduled.handles.as_ptr(), - winapi::FALSE, - winapi::INFINITE, /* TODO: allow setting a timeout */ - winapi::FALSE /* irrelevant parameter here */); - - // Notifying the corresponding task handler. - assert!(result >= winapi::WAIT_OBJECT_0); - let handle_id = (result - winapi::WAIT_OBJECT_0) as usize; - - if handle_id == 0 { - // The `pending_scheduled_event` handle has been notified, which means that we - // should pick up the content of `pending_scheduled`. - let mut pending = self.inner.pending_scheduled.lock().unwrap(); - scheduled.handles.append(&mut pending.handles); - scheduled.task_handles.append(&mut pending.task_handles); - - } else { - scheduled.handles.remove(handle_id); - let (task_handle, ready) = scheduled.task_handles.remove(handle_id - 1); - ready.store(true, Ordering::Relaxed); - task_handle.unpark(); - } - } - } - } -} - -impl Drop for EventLoop { - #[inline] - fn drop(&mut self) { - unsafe { - kernel32::CloseHandle(self.inner.pending_scheduled_event); - } - } -} - -pub struct Voice { - inner: Arc>, - playing: bool, -} - -pub struct SamplesStream { - event_loop: Arc, - inner: Arc>, - // The event that is signalled whenever a buffer is ready to be submitted to the voice. - event: winapi::HANDLE, // TODO: not deleted - max_frames_in_buffer: winapi::UINT32, - bytes_per_frame: winapi::WORD, - ready: Arc, -} - -unsafe impl Send for SamplesStream { -} -unsafe impl Sync for SamplesStream { -} - -struct VoiceInner { - audio_client: *mut winapi::IAudioClient, - render_client: *mut winapi::IAudioRenderClient, -} - -unsafe impl Send for Voice { -} -unsafe impl Sync for Voice { -} - -impl Voice { - pub fn new(end_point: &Endpoint, format: &Format, event_loop: &EventLoop) - -> Result<(Voice, SamplesStream), CreationError> { + pub fn build_voice(&self, end_point: &Endpoint, format: &Format) + -> Result { unsafe { // Making sure that COM is initialized. // It's not actually sure that this is required, but when in doubt do it. @@ -305,169 +227,218 @@ impl Voice { &mut *render_client }; - // Everything went fine. - let inner = Arc::new(Mutex::new(VoiceInner { - audio_client: audio_client, - render_client: render_client, - })); + let new_voice_id = VoiceId(self.next_voice_id.fetch_add(1, Ordering::Relaxed)); + assert_ne!(new_voice_id.0, usize::max_value()); // check for overflows - let voice = Voice { - inner: inner.clone(), - playing: false, + // Once we built the `VoiceInner`, we add a command that will be picked up by the + // `run()` method and added to the `RunContext`. + { + let inner = VoiceInner { + id: new_voice_id.clone(), + audio_client: audio_client, + render_client: render_client, + event: event, + playing: false, + max_frames_in_buffer: max_frames_in_buffer, + bytes_per_frame: format.nBlockAlign, + }; + + self.commands.lock().unwrap().push(Command::NewVoice(inner)); + + let result = kernel32::SetEvent(self.pending_scheduled_event); + assert!(result != 0); }; - let samples_stream = SamplesStream { - event_loop: event_loop.inner.clone(), - inner: inner, - event: event, - max_frames_in_buffer: max_frames_in_buffer, - bytes_per_frame: format.nBlockAlign, - ready: Arc::new(AtomicBool::new(false)), - }; - - Ok((voice, samples_stream)) + Ok(new_voice_id) } } #[inline] - pub fn play(&mut self) { - if !self.playing { - let mut inner = self.inner.lock().unwrap(); - - unsafe { - let hresult = (*inner.audio_client).Start(); - check_result(hresult).unwrap(); - } - } - - self.playing = true; - } - - #[inline] - pub fn pause(&mut self) { - if self.playing { - let mut inner = self.inner.lock().unwrap(); - - unsafe { - let hresult = (*inner.audio_client).Stop(); - check_result(hresult).unwrap(); - } - } - - self.playing = false; - } -} - -impl SamplesStream { - #[inline] - fn schedule(&mut self) { - let mut pending = self.event_loop.pending_scheduled.lock().unwrap(); - pending.handles.push(self.event); - pending - .task_handles - .push((task::park(), self.ready.clone())); - drop(pending); - - let result = unsafe { kernel32::SetEvent(self.event_loop.pending_scheduled_event) }; - assert!(result != 0); - } -} - -impl Stream for SamplesStream { - type Item = UnknownTypeBuffer; - type Error = (); - - fn poll(&mut self) -> Poll, Self::Error> { + pub fn destroy_voice(&self, voice_id: VoiceId) { unsafe { - if self.ready.swap(false, Ordering::Relaxed) == false { - // Despite its name this function does not block, because we pass `0`. - let result = kernel32::WaitForSingleObject(self.event, 0); + self.commands.lock().unwrap().push(Command::DestroyVoice(voice_id)); + let result = kernel32::SetEvent(self.pending_scheduled_event); + assert!(result != 0); + } + } - // Park the task and returning if the event is not ready. - match result { - winapi::WAIT_OBJECT_0 => (), - winapi::WAIT_TIMEOUT => { - self.schedule(); - return Ok(Async::NotReady); - }, - _ => unreachable!(), - }; - } + #[inline] + pub fn run(&self, mut callback: F) -> ! + where F: FnMut(VoiceId, UnknownTypeBuffer) + { + self.run_inner(&mut callback); + } - // If we reach here, that means we're ready to accept new samples. + fn run_inner(&self, callback: &mut FnMut(VoiceId, UnknownTypeBuffer)) -> ! { + unsafe { + // We keep `run_context` locked forever, which guarantees that two invocations of + // `run()` cannot run simultaneously. + let mut run_context = self.run_context.lock().unwrap(); - let poll = { - let mut inner = self.inner.lock().unwrap(); + loop { + // Process the pending commands. + let mut commands_lock = self.commands.lock().unwrap(); + for command in commands_lock.drain(..) { + match command { + Command::NewVoice(voice_inner) => { + let event = voice_inner.event; + run_context.voices.push(voice_inner); + run_context.handles.push(event); + }, + Command::DestroyVoice(voice_id) => { + match run_context.voices.iter().position(|v| v.id == voice_id) { + None => continue, + Some(p) => { + run_context.handles.remove(p + 1); + run_context.voices.remove(p); + }, + } + }, + Command::Play(voice_id) => { + if let Some(v) = run_context.voices.get_mut(voice_id.0) { + if !v.playing { + let hresult = (*v.audio_client).Start(); + check_result(hresult).unwrap(); + v.playing = true; + } + } + }, + Command::Pause(voice_id) => { + if let Some(v) = run_context.voices.get_mut(voice_id.0) { + if v.playing { + let hresult = (*v.audio_client).Stop(); + check_result(hresult).unwrap(); + v.playing = true; + } + } + }, + } + } + drop(commands_lock); - // Obtaining the number of frames that are available to be written. - let frames_available = { - let mut padding = mem::uninitialized(); - let hresult = (*inner.audio_client).GetCurrentPadding(&mut padding); - check_result(hresult).unwrap(); - self.max_frames_in_buffer - padding - }; + // Wait for any of the handles to be signalled, which means that the corresponding + // sound needs a buffer. + debug_assert!(run_context.handles.len() <= winapi::MAXIMUM_WAIT_OBJECTS as usize); + let result = kernel32::WaitForMultipleObjectsEx(run_context.handles.len() as u32, + run_context.handles.as_ptr(), + winapi::FALSE, + winapi::INFINITE, /* TODO: allow setting a timeout */ + winapi::FALSE /* irrelevant parameter here */); - if frames_available == 0 { - Ok(Async::NotReady) - } else { + // Notifying the corresponding task handler. + debug_assert!(result >= winapi::WAIT_OBJECT_0); + let handle_id = (result - winapi::WAIT_OBJECT_0) as usize; + + // If `handle_id` is 0, then it's `pending_scheduled_event` that was signalled in + // order for us to pick up the pending commands. + // Otherwise, a voice needs data. + if handle_id >= 1 { + let voice = &mut run_context.voices[handle_id - 1]; + let voice_id = voice.id.clone(); + + // Obtaining the number of frames that are available to be written. + let frames_available = { + let mut padding = mem::uninitialized(); + let hresult = (*voice.audio_client).GetCurrentPadding(&mut padding); + check_result(hresult).unwrap(); + voice.max_frames_in_buffer - padding + }; + + if frames_available == 0 { + // TODO: can this happen? + continue; + } // Obtaining a pointer to the buffer. let (buffer_data, buffer_len) = { let mut buffer: *mut winapi::BYTE = mem::uninitialized(); - let hresult = (*inner.render_client) + let hresult = (*voice.render_client) .GetBuffer(frames_available, &mut buffer as *mut *mut _); check_result(hresult).unwrap(); // FIXME: can return `AUDCLNT_E_DEVICE_INVALIDATED` debug_assert!(!buffer.is_null()); (buffer as *mut _, - frames_available as usize * self.bytes_per_frame as usize / - mem::size_of::()) // FIXME: correct size + frames_available as usize * voice.bytes_per_frame as usize / + mem::size_of::()) // FIXME: correct size when not f32 }; let buffer = Buffer { - voice: self.inner.clone(), + voice: voice, buffer_data: buffer_data, buffer_len: buffer_len, frames: frames_available, + marker: PhantomData, }; - Ok(Async::Ready(Some(UnknownTypeBuffer::F32(::Buffer { - target: Some(buffer), - })))) // FIXME: not necessarily F32 + let buffer = UnknownTypeBuffer::F32(::Buffer { target: Some(buffer) }); // FIXME: not always f32 + callback(voice_id, buffer); } - }; - - if let Ok(Async::NotReady) = poll { - self.schedule(); } + } + } - poll + #[inline] + pub fn play(&self, voice: VoiceId) { + unsafe { + self.commands.lock().unwrap().push(Command::Play(voice)); + let result = kernel32::SetEvent(self.pending_scheduled_event); + assert!(result != 0); + } + } + + #[inline] + pub fn pause(&self, voice: VoiceId) { + unsafe { + self.commands.lock().unwrap().push(Command::Pause(voice)); + let result = kernel32::SetEvent(self.pending_scheduled_event); + assert!(result != 0); } } } +impl Drop for EventLoop { + #[inline] + fn drop(&mut self) { + unsafe { + kernel32::CloseHandle(self.pending_scheduled_event); + } + } +} + +unsafe impl Send for EventLoop { +} +unsafe impl Sync for EventLoop { +} + +// The content of a voice ID is a number that was fetched from `next_voice_id`. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct VoiceId(usize); + impl Drop for VoiceInner { #[inline] fn drop(&mut self) { unsafe { (*self.render_client).Release(); (*self.audio_client).Release(); + kernel32::CloseHandle(self.event); } } } -pub struct Buffer { - voice: Arc>, +pub struct Buffer<'a, T: 'a> { + voice: &'a mut VoiceInner, buffer_data: *mut T, buffer_len: usize, frames: winapi::UINT32, + + marker: PhantomData<&'a mut [T]>, } -unsafe impl Send for Buffer { +unsafe impl<'a, T> Send for Buffer<'a, T> { } -impl Buffer { +impl<'a, T> Buffer<'a, T> { #[inline] pub fn buffer(&mut self) -> &mut [T] { unsafe { slice::from_raw_parts_mut(self.buffer_data, self.buffer_len) } @@ -481,10 +452,9 @@ impl Buffer { #[inline] pub fn finish(self) { unsafe { - let mut inner = self.voice.lock().unwrap(); - let hresult = (*inner.render_client).ReleaseBuffer(self.frames as u32, 0); + let hresult = (*self.voice.render_client).ReleaseBuffer(self.frames as u32, 0); match check_result(hresult) { - // ignoring the error that is produced if the device has been disconnected + // Ignoring the error that is produced if the device has been disconnected. Err(ref e) if e.raw_os_error() == Some(winapi::AUDCLNT_E_DEVICE_INVALIDATED) => (), e => e.unwrap(), }; @@ -492,6 +462,7 @@ impl Buffer { } } +// Turns a `Format` into a `WAVEFORMATEXTENSIBLE`. fn format_to_waveformatextensible(format: &Format) -> Result { Ok(winapi::WAVEFORMATEXTENSIBLE { @@ -505,10 +476,10 @@ fn format_to_waveformatextensible(format: &Format) nSamplesPerSec: format.samples_rate.0 as winapi::DWORD, nAvgBytesPerSec: format.channels.len() as winapi::DWORD * format.samples_rate.0 as winapi::DWORD * - format.data_type.get_sample_size() as winapi::DWORD, + format.data_type.sample_size() as winapi::DWORD, nBlockAlign: format.channels.len() as winapi::WORD * - format.data_type.get_sample_size() as winapi::WORD, - wBitsPerSample: 8 * format.data_type.get_sample_size() as winapi::WORD, + format.data_type.sample_size() as winapi::WORD, + wBitsPerSample: 8 * format.data_type.sample_size() as winapi::WORD, cbSize: match format.data_type { SampleFormat::I16 => 0, SampleFormat::F32 => (mem::size_of::() - @@ -517,7 +488,7 @@ fn format_to_waveformatextensible(format: &Format) SampleFormat::U16 => return Err(CreationError::FormatNotSupported), }, }, - Samples: 8 * format.data_type.get_sample_size() as winapi::WORD, + Samples: 8 * format.data_type.sample_size() as winapi::WORD, dwChannelMask: { let mut mask = 0; for &channel in format.channels.iter() {