diff --git a/examples/beep.rs b/examples/beep.rs index e74120a..97fa384 100644 --- a/examples/beep.rs +++ b/examples/beep.rs @@ -18,11 +18,15 @@ fn main() { }); loop { - let mut buffer = channel.append_data(1, cpal::SamplesRate(44100), 32768); + { + let mut buffer = channel.append_data(1, cpal::SamplesRate(44100), 32768); - for sample in buffer.iter_mut() { - let value = data_source.next().unwrap(); - *sample = value; + for sample in buffer.iter_mut() { + let value = data_source.next().unwrap(); + *sample = value; + } } + + channel.play(); } } diff --git a/examples/music.rs b/examples/music.rs index 34006ce..39aa106 100644 --- a/examples/music.rs +++ b/examples/music.rs @@ -5,6 +5,7 @@ use std::io::BufReader; fn main() { let mut channel = cpal::Voice::new(); + channel.play(); let mut decoder = vorbis::Decoder::new(BufReader::new(include_bin!("music.ogg"))) .unwrap(); @@ -20,22 +21,27 @@ fn main() { continue 'main; } - let mut buffer = channel.append_data(channels, cpal::SamplesRate(rate as u32), data.len()); - let mut buffer = buffer.iter_mut(); + { + let mut buffer = channel.append_data(channels, cpal::SamplesRate(rate as u32), + data.len()); + let mut buffer = buffer.iter_mut(); - loop { - let next_sample = match data.get(0) { - Some(s) => *s, - None => continue 'main - }; + loop { + let next_sample = match data.get(0) { + Some(s) => *s, + None => continue 'main + }; - if let Some(output) = buffer.next() { - *output = next_sample as u16; - data = data.slice_from(1); - } else { - break; + if let Some(output) = buffer.next() { + *output = next_sample as u16; + data = data.slice_from(1); + } else { + break; + } } } + + channel.play(); } } } diff --git a/src/alsa/mod.rs b/src/alsa/mod.rs index 40450b5..9f796a9 100644 --- a/src/alsa/mod.rs +++ b/src/alsa/mod.rs @@ -63,6 +63,15 @@ impl Voice { buffer: Vec::from_elem(elements, unsafe { mem::uninitialized() }) } } + + pub fn play(&mut self) { + // already playing + //unimplemented!() + } + + pub fn pause(&mut self) { + unimplemented!() + } } impl Drop for Voice { diff --git a/src/lib.rs b/src/lib.rs index b760ea6..2a03a28 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,8 +27,15 @@ for e in buffer.iter_mut() { This is the case if the device doesn't have enough space available. **It happens very often**, this is not some obscure situation that can be ignored. +After you have submitted data for the first time, call `play`: + +```no_run +# let mut voice = cpal::Voice::new(); +voice.play(); +``` + The audio device of the user will read the buffer that you sent, and play it. If the audio device -reaches the end of the data, it will stop playing. You **must** continuously fill the buffer by +reaches the end of the data, it will stop playing. You must continuously fill the buffer by calling `append_data` repeatedly if you don't want the audio to stop playing. # Native format @@ -212,6 +219,25 @@ impl Voice { } } } + + /// Sends a command to the audio device that it should start playing. + /// + /// Has no effect is the voice was already playing. + /// + /// Only call this after you have submitted some data, otherwise you may hear + /// some glitches. + pub fn play(&mut self) { + self.0.play() + } + + /// Sends a command to the audio device that it should stop playing. + /// + /// Has no effect is the voice was already paused. + /// + /// If you call `play` afterwards, the playback will resume exactly where it was. + pub fn pause(&mut self) { + self.0.pause() + } } impl<'a, T> Deref<[T]> for Buffer<'a, T> { diff --git a/src/wasapi/mod.rs b/src/wasapi/mod.rs index f522e99..53a258d 100644 --- a/src/wasapi/mod.rs +++ b/src/wasapi/mod.rs @@ -13,15 +13,13 @@ pub struct Voice { bytes_per_frame: winapi::WORD, samples_per_second: winapi::DWORD, bits_per_sample: winapi::WORD, - started: bool, + playing: bool, } pub struct Buffer<'a, T> { - audio_client: *mut winapi::IAudioClient, render_client: *mut winapi::IAudioRenderClient, buffer: CVec, frames: winapi::UINT32, - start_on_drop: bool, } impl Voice { @@ -83,18 +81,39 @@ impl Voice { }; let buffer = Buffer { - audio_client: self.audio_client, render_client: self.render_client, buffer: buffer, frames: frames_available, - start_on_drop: !self.started, }; - self.started = true; return buffer; } } } + + pub fn play(&mut self) { + if !self.playing { + unsafe { + let f = self.audio_client.as_mut().unwrap().lpVtbl.as_ref().unwrap().Start; + let hresult = f(self.audio_client); + check_result(hresult).unwrap(); + } + } + + self.playing = true; + } + + pub fn pause(&mut self) { + if self.playing { + unsafe { + let f = self.audio_client.as_mut().unwrap().lpVtbl.as_ref().unwrap().Stop; + let hresult = f(self.audio_client); + check_result(hresult).unwrap(); + } + } + + self.playing = false; + } } impl Drop for Voice { @@ -124,12 +143,6 @@ impl<'a, T> Buffer<'a, T> { let f = self.render_client.as_mut().unwrap().lpVtbl.as_ref().unwrap().ReleaseBuffer; let hresult = f(self.render_client, self.frames as u32, 0); check_result(hresult).unwrap(); - - if self.start_on_drop { - let f = self.audio_client.as_mut().unwrap().lpVtbl.as_ref().unwrap().Start; - let hresult = f(self.audio_client); - check_result(hresult).unwrap(); - } }; } } @@ -237,7 +250,7 @@ fn init() -> Result { bytes_per_frame: format.nBlockAlign, samples_per_second: format.nSamplesPerSec, bits_per_sample: format.wBitsPerSample, - started: false, + playing: false, }) } }