Merge pull request #196 from mitchmindtree/device_enumeration

[coreaudio] Implement Endpoint and Format Enumeration
This commit is contained in:
mitchmindtree 2018-02-04 15:10:39 +11:00 committed by GitHub
commit 6bf65f589d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 294 additions and 31 deletions

View File

@ -1,5 +1,6 @@
# Unreleased
- Implement Endpoint and Format Enumeration for macos.
- Implement format handling for macos `build_voice` method.
# Version 0.6.0 (2017-12-11)

View File

@ -21,7 +21,8 @@ alsa-sys = { version = "0.1", path = "alsa-sys" }
libc = "0.2"
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies]
coreaudio-rs = "0.7.0"
coreaudio-rs = { version = "0.8.1", default-features = false, features = ["audio_unit", "core_audio"] }
core-foundation-sys = "0.5.1" # For linking to CoreFoundation.framework and handling device name `CFString`s.
[target.'cfg(target_os = "emscripten")'.dependencies]
stdweb = { version = "0.1.3", default-features = false }

View File

@ -1,8 +1,9 @@
extern crate cpal;
fn main() {
let endpoints = cpal::endpoints();
println!("Default Endpoint:\n {:?}", cpal::default_endpoint().map(|e| e.name()));
let endpoints = cpal::endpoints();
println!("Endpoints: ");
for (endpoint_index, endpoint) in endpoints.enumerate() {
println!("{}. Endpoint \"{}\" Audio formats: ",

View File

@ -1,10 +1,76 @@
use SupportedFormat;
use std::mem;
use std::ptr::null;
use std::vec::IntoIter as VecIntoIter;
use super::coreaudio::sys::{
AudioDeviceID,
AudioObjectPropertyAddress,
AudioObjectGetPropertyData,
AudioObjectGetPropertyDataSize,
kAudioHardwareNoError,
kAudioHardwarePropertyDefaultOutputDevice,
kAudioHardwarePropertyDevices,
kAudioObjectPropertyElementMaster,
kAudioObjectPropertyScopeGlobal,
kAudioObjectSystemObject,
OSStatus,
};
use super::Endpoint;
use SupportedFormat;
unsafe fn audio_output_devices() -> Result<Vec<AudioDeviceID>, OSStatus> {
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioHardwarePropertyDevices,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
};
use std::vec::IntoIter as VecIntoIter;
macro_rules! try_status_or_return {
($status:expr) => {
if $status != kAudioHardwareNoError as i32 {
return Err($status);
}
};
}
pub struct EndpointsIterator(bool);
let data_size = 0u32;
let status = AudioObjectGetPropertyDataSize(
kAudioObjectSystemObject,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
);
try_status_or_return!(status);
let device_count = data_size / mem::size_of::<AudioDeviceID>() as u32;
let mut audio_devices = vec![];
audio_devices.reserve_exact(device_count as usize);
let status = AudioObjectGetPropertyData(
kAudioObjectSystemObject,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
audio_devices.as_mut_ptr() as *mut _,
);
try_status_or_return!(status);
audio_devices.set_len(device_count as usize);
// Only keep the devices that have some supported output format.
audio_devices.retain(|&id| {
let e = Endpoint { audio_device_id: id };
match e.supported_formats() {
Err(_) => false,
Ok(mut fmts) => fmts.next().is_some(),
}
});
Ok(audio_devices)
}
pub struct EndpointsIterator(VecIntoIter<AudioDeviceID>);
unsafe impl Send for EndpointsIterator {
}
@ -13,24 +79,47 @@ unsafe impl Sync for EndpointsIterator {
impl Default for EndpointsIterator {
fn default() -> Self {
EndpointsIterator(false)
let devices = unsafe {
audio_output_devices().expect("failed to get audio output devices")
};
EndpointsIterator(devices.into_iter())
}
}
impl Iterator for EndpointsIterator {
type Item = Endpoint;
fn next(&mut self) -> Option<Endpoint> {
if self.0 {
None
} else {
self.0 = true;
Some(Endpoint)
}
self.0.next().map(|id| Endpoint { audio_device_id: id })
}
}
pub fn default_endpoint() -> Option<Endpoint> {
Some(Endpoint)
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioHardwarePropertyDefaultOutputDevice,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
};
let audio_device_id: AudioDeviceID = 0;
let data_size = mem::size_of::<AudioDeviceID>();;
let status = unsafe {
AudioObjectGetPropertyData(
kAudioObjectSystemObject,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&audio_device_id as *const _ as *mut _,
)
};
if status != kAudioHardwareNoError as i32 {
return None;
}
let endpoint = Endpoint {
audio_device_id: audio_device_id,
};
Some(endpoint)
}
pub type SupportedFormatsIterator = VecIntoIter<SupportedFormat>;

View File

@ -1,4 +1,5 @@
extern crate coreaudio;
extern crate core_foundation_sys;
use ChannelPosition;
use CreationError;
@ -10,7 +11,10 @@ use SamplesRate;
use SupportedFormat;
use UnknownTypeBuffer;
use std::ffi::CStr;
use std::mem;
use std::os::raw::c_char;
use std::ptr::null;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::Duration;
@ -18,13 +22,32 @@ use std::slice;
use self::coreaudio::audio_unit::{AudioUnit, Scope, Element};
use self::coreaudio::audio_unit::render_callback::{self, data};
use self::coreaudio::bindings::audio_unit::{
AudioStreamBasicDescription,
use self::coreaudio::sys::{
AudioBuffer,
kAudioFormatLinearPCM,
AudioBufferList,
AudioDeviceID,
AudioObjectGetPropertyData,
AudioObjectGetPropertyDataSize,
AudioObjectPropertyAddress,
AudioStreamBasicDescription,
AudioValueRange,
kAudioDevicePropertyAvailableNominalSampleRates,
kAudioDevicePropertyDeviceNameCFString,
kAudioDevicePropertyScopeOutput,
kAudioDevicePropertyStreamConfiguration,
kAudioFormatFlagIsFloat,
kAudioFormatFlagIsPacked,
kAudioUnitProperty_StreamFormat
kAudioFormatLinearPCM,
kAudioHardwareNoError,
kAudioObjectPropertyElementMaster,
kAudioObjectPropertyScopeOutput,
kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitProperty_StreamFormat,
kCFStringEncodingUTF8,
};
use self::core_foundation_sys::string::{
CFStringRef,
CFStringGetCStringPtr,
};
mod enumerate;
@ -32,24 +55,166 @@ mod enumerate;
pub use self::enumerate::{EndpointsIterator, SupportedFormatsIterator, default_endpoint};
#[derive(Clone, PartialEq, Eq)]
pub struct Endpoint;
pub struct Endpoint {
audio_device_id: AudioDeviceID,
}
impl Endpoint {
pub fn supported_formats(&self) -> Result<SupportedFormatsIterator, FormatsEnumerationError> {
Ok(
vec![
SupportedFormat {
channels: vec![ChannelPosition::FrontLeft, ChannelPosition::FrontRight],
min_samples_rate: SamplesRate(44100),
max_samples_rate: SamplesRate(44100),
data_type: SampleFormat::F32,
},
].into_iter(),
)
let mut property_address = AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyStreamConfiguration,
mScope: kAudioObjectPropertyScopeOutput,
mElement: kAudioObjectPropertyElementMaster,
};
unsafe {
// Retrieve the devices audio buffer list.
let data_size = 0u32;
let status = AudioObjectGetPropertyDataSize(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
);
if status != kAudioHardwareNoError as i32 {
unimplemented!();
}
let mut audio_buffer_list: Vec<u8> = vec![];
audio_buffer_list.reserve_exact(data_size as usize);
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
audio_buffer_list.as_mut_ptr() as *mut _,
);
if status != kAudioHardwareNoError as i32 {
unimplemented!();
}
let audio_buffer_list = audio_buffer_list.as_mut_ptr() as *mut AudioBufferList;
// If there's no buffers, skip.
if (*audio_buffer_list).mNumberBuffers == 0 {
return Ok(vec![].into_iter());
}
// Count the number of channels as the sum of all channels in all output buffers.
let n_buffers = (*audio_buffer_list).mNumberBuffers as usize;
let first: *const AudioBuffer = (*audio_buffer_list).mBuffers.as_ptr();
let buffers: &'static [AudioBuffer] = slice::from_raw_parts(first, n_buffers);
let mut n_channels = 0;
for buffer in buffers {
n_channels += buffer.mNumberChannels as usize;
}
const CHANNEL_POSITIONS: &'static [ChannelPosition] = &[
ChannelPosition::FrontLeft,
ChannelPosition::FrontRight,
ChannelPosition::FrontCenter,
ChannelPosition::LowFrequency,
ChannelPosition::BackLeft,
ChannelPosition::BackRight,
ChannelPosition::FrontLeftOfCenter,
ChannelPosition::FrontRightOfCenter,
ChannelPosition::BackCenter,
ChannelPosition::SideLeft,
ChannelPosition::SideRight,
ChannelPosition::TopCenter,
ChannelPosition::TopFrontLeft,
ChannelPosition::TopFrontCenter,
ChannelPosition::TopFrontRight,
ChannelPosition::TopBackLeft,
ChannelPosition::TopBackCenter,
ChannelPosition::TopBackRight,
];
// AFAIK the sample format should always be f32 on macos and i16 on iOS? Feel free to
// fix this if more pcm formats are supported.
let sample_format = if cfg!(target_os = "ios") {
SampleFormat::I16
} else {
SampleFormat::F32
};
// Get available sample rate ranges.
property_address.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
let data_size = 0u32;
let status = AudioObjectGetPropertyDataSize(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
);
if status != kAudioHardwareNoError as i32 {
unimplemented!();
}
let n_ranges = data_size as usize / mem::size_of::<AudioValueRange>();
let mut ranges: Vec<u8> = vec![];
ranges.reserve_exact(data_size as usize);
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
ranges.as_mut_ptr() as *mut _,
);
if status != kAudioHardwareNoError as i32 {
unimplemented!();
}
let ranges: *mut AudioValueRange = ranges.as_mut_ptr() as *mut _;
let ranges: &'static [AudioValueRange] = slice::from_raw_parts(ranges, n_ranges);
// Collect the supported formats for the device.
let mut fmts = vec![];
for range in ranges {
let channels = CHANNEL_POSITIONS.iter()
.cloned()
.cycle()
.take(n_channels)
.collect::<Vec<_>>();
let fmt = SupportedFormat {
channels: channels.clone(),
min_samples_rate: SamplesRate(range.mMinimum as _),
max_samples_rate: SamplesRate(range.mMaximum as _),
data_type: sample_format,
};
fmts.push(fmt);
}
Ok(fmts.into_iter())
}
}
pub fn name(&self) -> String {
"Default AudioUnit Endpoint".to_string()
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyDeviceNameCFString,
mScope: kAudioDevicePropertyScopeOutput,
mElement: kAudioObjectPropertyElementMaster,
};
let device_name: CFStringRef = null();
let data_size = mem::size_of::<CFStringRef>();
let c_str = unsafe {
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&device_name as *const _ as *mut _,
);
if status != kAudioHardwareNoError as i32 {
return format!("<OSStatus: {:?}>", status);
}
let c_string: *const c_char = CFStringGetCStringPtr(device_name, kCFStringEncodingUTF8);
if c_string == null() {
return "<null>".into();
}
CStr::from_ptr(c_string as *mut _)
};
c_str.to_string_lossy().into_owned()
}
}
@ -117,7 +282,7 @@ impl EventLoop {
}
#[inline]
pub fn build_voice(&self, _endpoint: &Endpoint, format: &Format)
pub fn build_voice(&self, endpoint: &Endpoint, format: &Format)
-> Result<VoiceId, CreationError> {
let mut audio_unit = {
let au_type = if cfg!(target_os = "ios") {
@ -132,7 +297,13 @@ impl EventLoop {
AudioUnit::new(au_type)?
};
// TODO: iOS uses integer and fixed-point data
// TODO: Set the audio output unit device as the given endpoint device.
audio_unit.set_property(
kAudioOutputUnitProperty_CurrentDevice,
Scope::Global,
Element::Output,
Some(&endpoint.audio_device_id),
)?;
// Set the stream in interleaved mode.
let n_channels = format.channels.len();