1、简述: Audio Unit:iOS提供音频处理插件、支持混合、均衡、格式转换和实时输入、输出、 用于录制、播放、离线渲染和实施对话: 例如VoIP(护联网协议语音)可以从iOS应用程序动态加载和使用它
Audio Unit通常在称为Audio processing graph的封闭对象的上下文中工作、 Audio unit是iOS音频层面中最底层的编码层、如果要充分利用它需要对Audio unit有更深入的了解、 除非需要实时播 同步的声音、低延迟的输入输出或者一些音频优化的其他特性,否则请使用Media Player,AVFoundation、OpenAL、 Audio ToolBox等上层框架
iOS使用Audio unit实现音频数据采集,直接采集PCM无损数据 实现原理: 使用Audio unit采集硬件输入端、如麦克风、其他外置具备麦克风功能的设备(带麦的耳机、话筒等)
采集的字节量 数据量(字节/秒) = ((采样率Hz) * 采样位数(bit)* 声道数)/ 8
#define INPUT_BUS 1 ///< A I/O unit's bus 1 connects to input hardware (microphone).
#define OUTPUT_BUS 0 ///< A I/O unit's bus 0 connects to output hardware (speaker).
**const** **static** NSString *kModuleName = @"KpAudioCapture";
**static** AudioUnit m_audioUnit;
**static** AudioBufferList *m_bufferList;
**static** AudioStreamBasicDescription m_audioDataFormat;
uint32_t g_av_base_time = 100;
**@interface** AudioUnitController ()
**@property** (**nonatomic**, **assign**, **readonly**) **BOOL** isRunning;
**@end**
**static** OSStatus AudioCaptureCallback(**void** *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
**const** AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
AudioUnitRender(m_audioUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, m_bufferList);
**if** (g_av_base_time == 0){
**return** noErr;
}
Float64 currentTime = CMTimeGetSeconds(CMClockMakeHostTimeFromSystemUnits(inTimeStamp->mHostTime));
int64_t pts = (int64_t)((currentTime - g_av_base_time) * 1000);
**void** *bufferData = m_bufferList->mBuffers[0].mData;
UInt32 bufferSize = m_bufferList->mBuffers[0].mDataByteSize;
**return** noErr;
}
**@implementation** AudioUnitController
SingletonM //单例
- (**void**)startAudioCaptureWithAudioUnit:(AudioUnit)audioUnit isRunning:(**BOOL** *)isRunning{
OSStatus status;
**if** (*isRunning){
NSLog(@"%@: %s - start recorder repeat \n",kModuleName, **__func__** );
**return**;
}
status = AudioOutputUnitStart(audioUnit);
**if** (status == noErr){
*isRunning = **YES**;
NSLog(@"%@: %s - start audio unit success \n",kModuleName, **__func__** );
}**else**{
*isRunning = **NO**;
NSLog(@"%@: %s - start audio unit failed \n",kModuleName, **__func__** );
}
}
-(**void**)stopAudioCaptureWithAudioUnit:(AudioUnit)audioUnit isRunning:(**BOOL** *)isRunning{
**if** (*isRunning == **NO**) {
NSLog(@"%@: %s - stop capture repeat \n",kModuleName, **__func__** );
**return**;
}
*isRunning = **NO**;
**if** (audioUnit != **NULL**){
OSStatus status = AudioOutputUnitStop(audioUnit);
**if** (status != noErr){
NSLog(@"%@: %s - stop audio unit failed. \n",kModuleName, **__func__** );
}**else** {
NSLog(@"%@: %s - stop audio unit successful",kModuleName, **__func__** );
}
}
}
- (**void**)freeAudioUnit:(AudioUnit)audioUnit {
**if** (!audioUnit) {
NSLog(@"%@: %s - repeat call!",kModuleName, **__func__** );
**return**;
}
OSStatus result = AudioOutputUnitStop(audioUnit);
**if** (result != noErr){
NSLog(@"%@: %s - stop audio unit failed.",kModuleName, **__func__** );
}
result = AudioUnitUninitialize(m_audioUnit);
**if** (result != noErr) {
NSLog(@"%@: %s - uninitialize audio unit failed, status : %d",kModuleName, **__func__** ,result);
}
// It will trigger audio route change repeatedly
result = AudioComponentInstanceDispose(m_audioUnit);
**if** (result != noErr) {
NSLog(@"%@: %s - dispose audio unit failed. status : %d",kModuleName, **__func__** ,result);
}**else** {
audioUnit = **nil**;
}
}
- (**void**)viewDidLoad {
[**super** viewDidLoad];
}
+ (**instancetype**)getInstance{
**return** [[**self** alloc]init];
}
- (**instancetype**)init {
**static** dispatch_once_t onceToken;
_dispatch_once(&onceToken, ^{
_instance = [**super** init];
[_instance configureAudioInfoWithDataFormat:&m_audioDataFormat
formatID:kAudioFormatLinearPCM
sampleRate:44100
channelCount:1
audioBufferSize:2048
durationSec:0.02
callBack:AudioCaptureCallback];
});
**return** _instance;
}
//
- (**void**)configureAudioInfoWithDataFormat:(AudioStreamBasicDescription *)dataFormat
formatID:(UInt32)formatID
sampleRate:(Float64)sampleRate
channelCount:(UInt32)channelCount
audioBufferSize:(**int**)audioBufferSize
durationSec:(**float**)durationSec
callBack:(AURenderCallback)callBack
{
// configure ASBD
[**self** configureAudioToAudioFormat:dataFormat
formatID:formatID
sampleRate:sampleRate
channelCount:channelCount];
// set sample time
[[AVAudioSession sharedInstance] setPreferredIOBufferDuration:durationSec error:**NULL**];
// configure audio unit
m_audioUnit = [**self** configureAudioUnitWithDataFormat:*dataFormat
audioBufferSize:audioBufferSize
callBack:callBack];
}
// Audio unit
- (AudioUnit)configureAudioUnitWithDataFormat:(AudioStreamBasicDescription)dataFormat audioBufferSize:(**int**)audioBufferSize callBack:(AURenderCallback)callBack{
AudioUnit audioUnit = [**self** createAudioUnitObject];
**if**(!audioUnit){
**return** **NULL**;
}
[**self** initCaptureAudioBufferWithAudioUnit:audioUnit
channelCount:dataFormat.mChannelsPerFrame
dataByteSize:audioBufferSize];
[**self** setAudioUnitPropertyWithAudioUnit:audioUnit dataFormat:dataFormat];
[**self** initCaptureCallbackWithAudioUnit:audioUnit callBack:callBack];
OSStatus status = AudioUnitInitialize(audioUnit);
**if** (status != noErr){
NSLog(@"%@: %s - couldn't init audio unit instance, status : %d \n",kModuleName, **__func__** ,status);
}
**return** audioUnit;
}
- (AudioUnit)createAudioUnitObject{
AudioUnit audioUnit;
AudioComponentDescription audioDesc;
audioDesc.componentType = kAudioUnitType_Output;
audioDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
audioDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
audioDesc.componentFlags = 0;
audioDesc.componentFlagsMask = 0;
AudioComponent inputComponent = AudioComponentFindNext(**NULL**, &audioDesc);
OSStatus status = AudioComponentInstanceNew(inputComponent, &audioUnit);
**if** (status != noErr){
NSLog(@"%@: %s - create audio unit failed, status: %d \n",kModuleName, **__func__** , status);
**return** **NULL**;
}**else**{
**return** audioUnit;
}
}
- (**void**)initCaptureAudioBufferWithAudioUnit:(AudioUnit)audioUnit channelCount:(**int**)channelCount dataByteSize:(**int**)dataByteSize{
// Disable AU buffer allocation for the recorder, we allocate out own;
UInt32 flag = 0;
OSStatus status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Output, 1, &flag, **sizeof**(flag));
**if** (status != noErr){
NSLog(@"%@: %s - couldn‘t allocate buffer of callback, status :%d \n",kModuleName, **__func__** ,status);
}
AudioBufferList *bufferList = (AudioBufferList*)malloc(**sizeof**(AudioBufferList));
bufferList->mNumberBuffers = 1;
bufferList->mBuffers[0].mNumberChannels = channelCount;
bufferList->mBuffers[0].mDataByteSize = dataByteSize;
bufferList->mBuffers[0].mData = (UInt32*)malloc(dataByteSize);
m_bufferList = bufferList;
}
- (**void**)setAudioUnitPropertyWithAudioUnit:(AudioUnit)audioUnit dataFormat:(AudioStreamBasicDescription)dataFormat{
//设置录制音频流
OSStatus status;
status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, INPUT_BUS, &dataFormat, **sizeof**(dataFormat));
**if**(status != noErr){
NSLog(@"%@: %s -set audio unit stream format failed, status: %d\n",kModuleName, **__func__** ,status);
}
UInt32 enableFlag = 1;
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, INPUT_BUS, &enableFlag, **sizeof**(enableFlag));
**if**(status != noErr){
NSLog(@"%@: %s - count not enable input on AURemoteIO , status: %d\n",kModuleName, **__func__** ,status);
}
UInt32 disableFlag = 0;
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, OUTPUT_BUS, &disableFlag, **sizeof**(disableFlag));
**if**(status != noErr){
NSLog(@"%@: %s -could not enable output on AURemoteIO, status: %d\n",kModuleName, **__func__** ,status);
}
}
- (**void**)initCaptureCallbackWithAudioUnit:(AudioUnit)audioUnit callBack:(AURenderCallback)callBack{
AURenderCallbackStruct captureCallBack;
captureCallBack.inputProc = callBack;
captureCallBack.inputProcRefCon = ( **__bridge** **void** *)**self**;
OSStatus status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, INPUT_BUS, &captureCallBack, **sizeof**(captureCallBack));
**if** (status != noErr) {
NSLog(@"%@: %s - Audio Unit set capture callback failed, status : %d \n",kModuleName, **__func__** ,status);
}
}
// 配置ASBD(AudioStreamBasicDescription)音频流格式启用数据流 Audio Format
- (**void**)configureAudioToAudioFormat:(AudioStreamBasicDescription*)audioFormat
formatID:(UInt32)formatID
sampleRate:(Float64)sampleRate
channelCount:(UInt32)channelCount
{
AudioStreamBasicDescription dataFormat = {0};
UInt32 size = **sizeof**(dataFormat.mSampleRate);
// Get hardware origin sample rate (Recommended it)
Float64 hardwareSampleRate = 0;
AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&hardwareSampleRate);
// Manual set sample rate
dataFormat.mSampleRate = sampleRate;
size = **sizeof**(dataFormat.mChannelsPerFrame);
// Get hardware origin channels number (Must refer to it)
UInt32 hardwareNumberChannels = 0;
AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputNumberChannels, &size, &hardwareNumberChannels);
dataFormat.mChannelsPerFrame = channelCount;
dataFormat.mFormatID = formatID;
**if** (formatID == kAudioFormatLinearPCM){
dataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
dataFormat.mBitsPerChannel = 16;
dataFormat.mBytesPerPacket = dataFormat.mBytesPerFrame = (dataFormat.mBitsPerChannel / 8) * dataFormat.mChannelsPerFrame;
dataFormat.mFramesPerPacket = 1;
}
memcpy(audioFormat, &dataFormat, **sizeof**(dataFormat));
NSLog(@"%@: %s - sample rate:%f, channel count:%d",kModuleName, **__func__** , sampleRate, channelCount);
}