基于Audio Unit 实现录制和播放。在播放中有时候会出现杂音的情况,为了处理杂音,这里提供了两个缓存buffer,便于做数据源切换。
先来录制:
@interface SSLAudioRecord ()
{
AudioUnit audioUnit;
NSMutableData *buffers;
int bufferLen;
}
@implementation SSLAudioRecord
- (bool)startPhone{
buffers = [NSMutableData data];
bufferLen = 0;
NSError *error;
AVAudioSession *session = [AVAudioSession sharedInstance];
[session setCategory:AVAudioSessionCategoryPlayAndRecord mode:AVAudioSessionModeVideoChat options:AVAudioSessionCategoryOptionDefaultToSpeaker error:&error];
[session setActive:**YES** error:&error];
//**TODO: 初始化AudioUnit**
AudioComponentDescription inputDesc;
inputDesc.componentType = kAudioUnitType_Output;
// kAudioUnitSubType_RemoteIO
inputDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
//开启回音消除kAudioUnitSubType_VoiceProcessingIO
inputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
inputDesc.componentFlags = 0;
inputDesc.componentFlagsMask = 0;
AudioComponent inputComponent = AudioComponentFindNext(**NULL**, &inputDesc);
OSStatus status = AudioComponentInstanceNew(inputComponent, &audioUnit);
if (status != noErr)
{
return NO;
}
//设置音频格式及采集的参数
// | kAudioFormatFlagIsNonInterleaved
AudioStreamBasicDescription inputStreamDesc;
inputStreamDesc.mFormatID = kAudioFormatLinearPCM;
inputStreamDesc.mFormatFlags = (kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved | kAudioFormatFlagIsPacked);
inputStreamDesc.mSampleRate = 8000;
inputStreamDesc.mFramesPerPacket = 1;
inputStreamDesc.mChannelsPerFrame = 1;
inputStreamDesc.mBitsPerChannel = 16;
inputStreamDesc.mBytesPerFrame = inputStreamDesc.mBitsPerChannel * inputStreamDesc.mChannelsPerFrame / 8;
inputStreamDesc.mBytesPerPacket = inputStreamDesc.mBitsPerChannel * inputStreamDesc.mChannelsPerFrame / 8 * inputStreamDesc.mFramesPerPacket;
status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &inputStreamDesc, **sizeof**(inputStreamDesc));
if (status != noErr)
{
return NO;
}
//设置麦克风输入
int inputEnable = 1;
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &inputEnable, **sizeof**(inputEnable));
if (status != noErr)
{
return NO;
}
AURenderCallbackStruct inputCallBackStruct;
inputCallBackStruct.inputProc = inputCallBackFun;//设置回调函数
inputCallBackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);//设置回调响应对象,类似于target
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Output,
kInputBus,
&inputCallBackStruct,
sizeof(inputCallBackStruct));
if (status != noErr) {
return NO;
}
status = AudioOutputUnitStart(audioUnit);
if (status == noErr)
{
return YES;
}
if (audioUnit) {
[self stopPhone];
}
return NO;
}
static OSStatus inputCallBackFun(void * inRefCon,AudioUnitRenderActionFlags *ioActionFlags,const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,AudioBufferList * __nullable ioData)
{
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mDataByteSize = sizeof(SInt16)*inNumberFrames;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mData = (SInt16*) malloc(sizeof(SInt16)*inNumberFrames);
SSLAudioRecord *recorder = (__bridge SSLAudioRecord *)(inRefCon);
if (recorder->audioUnit)
{
OSStatus status = AudioUnitRender(recorder->audioUnit,
ioActionFlags,
inTimeStamp,
1,
inNumberFrames,
&bufferList);
if (status == noErr)
{
NSUInteger length = bufferList.mBuffers[0].mDataByteSize;
AudioBuffer buffer = bufferList.mBuffers[0];
NSData *tempData = [NSData dataWithBytes:buffer.mData length:length];
[recorder->buffers appendData:tempData];
recorder->bufferLen += tempData.length;//1920
int sendDataLen = 1920;//recorder.codeId == 102?800:960;//G711A 800 G711U 960
if (recorder->buffers.length > sendDataLen)
{
NSRange rang = NSMakeRange(0, sendDataLen);
NSData *data = [recorder->buffers subdataWithRange:rang];
}
}
}
free(bufferList.mBuffers[0].mData);
return noErr;
}
- (void)stopPhone{
if (!audioUnit)
{
return;
}
OSStatus status = AudioOutputUnitStop(audioUnit);
if (status != noErr)
{
}
status = AudioUnitUninitialize(audioUnit);
status = AudioComponentInstanceDispose(audioUnit);
if (status != noErr)
{
}
else
{
audioUnit = nil;
}
}
- (void)dealloc{
NSLog(@"ssl audio dealloc record\n");
}
播放:
#define kOutputBus (0)
typedef void (^SSLAudioPlayerInputBlock)(AudioBufferList *bufferList);
@interface SSLAudioPlayer ()
{
AudioUnit audioUnit;
}
@property (nonatomic, assign) int bit;
@property (nonatomic, assign) int rate;
@property (nonatomic, assign) int channel;
@property (**nonatomic, assign) NSUInteger currentReadLenght;
@property (**nonatomic, copy) SSLAudioPlayerInputBlock bl_input;
@property (nonatomic, strong) NSMutableData *playerData;
@property (nonatomic, strong) NSMutableData *playerDataCache;
@property (nonatomic, assign) UInt32 readerLength;
@property (nonatomic, assign) BOOL readCache;
@end
@implementation** SSLAudioPlayer
+ (instancetype)sslPlayerManager{
static SSLAudioPlayer *player;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
player = [[SSLAudioPlayer alloc]init];
});
return player;
}
- (void)startWithRate:(int)rate bit:(int)bit channel:(int)channel{
self.rate = rate;
self.bit = bit;
self.channel = channel;
self.playerData = [NSMutableData data];
self.playerDataCache = [NSMutableData data];
self.currentReadLenght = 0;
AudioComponentDescription outputDesc;
outputDesc.componentType = kAudioUnitType_Output;
outputDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
outputDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
outputDesc.componentFlags = 0;
outputDesc.componentFlagsMask = 0;
AudioComponent outputComponent = AudioComponentFindNext(**NULL**, &outputDesc);
AudioComponentInstanceNew(outputComponent, &audioUnit);
int mFramesPerPacket = 1;
AudioStreamBasicDescription _outputFormat;
memset(&_outputFormat, 0, **sizeof**(_outputFormat));
_outputFormat.mSampleRate = rate;//8000/16000/20000/44100/96000
_outputFormat.mFormatID = kAudioFormatLinearPCM;
_outputFormat.mFormatFlags = (kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved);
_outputFormat.mFramesPerPacket = mFramesPerPacket;
_outputFormat.mChannelsPerFrame = channel;
_outputFormat.mBitsPerChannel = bit;
_outputFormat.mBytesPerFrame = _outputFormat.mBitsPerChannel * _outputFormat.mChannelsPerFrame / 8;
_outputFormat.mBytesPerPacket = _outputFormat.mBitsPerChannel * _outputFormat.mChannelsPerFrame / 8 * _outputFormat.mFramesPerPacket;
OSStatus status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &_outputFormat, **sizeof**(_outputFormat));
AURenderCallbackStruct outputCallBackStruct;
outputCallBackStruct.inputProc = outputCallBackFun;
outputCallBackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
kOutputBus,
&outputCallBackStruct,
sizeof(outputCallBackStruct));
if (status == noErr) {
AudioOutputUnitStart(audioUnit);
__weak typeof(self) weakifySelf = self;
self.bl_input = ^(AudioBufferList *bufferList) {
AudioBuffer buffer = bufferList->mBuffers[0];
int len = buffer.mDataByteSize;
if (weakifySelf.readCache) {
int readLen = [weakifySelf readDataFrom:weakifySelf.playerDataCache len:len forData:buffer.mData];
if (readLen == 0) {
weakifySelf.readCache = NO;
if (weakifySelf.playerDataCache.length > 0) {
[weakifySelf.playerDataCache resetBytesInRange:NSMakeRange(0, weakifySelf.playerDataCache.length)];
weakifySelf.playerDataCache.length = 0;
}
if (self.playerData.length > 0) {
readLen = [weakifySelf readDataFrom:weakifySelf.playerData len:len forData:buffer.mData];
}
}
buffer.mDataByteSize = readLen;
}else{
int readLen = [weakifySelf readDataFrom:weakifySelf.playerData len:len forData:buffer.mData];
if (readLen == 0) {
weakifySelf.readCache = YES;
if (weakifySelf.playerData.length > 0) {
[weakifySelf.playerData resetBytesInRange:NSMakeRange(0, weakifySelf.playerData.length)];
weakifySelf.playerData.length = 0;
}
if (self.playerDataCache.length > 0) {
readLen = [weakifySelf readDataFrom:weakifySelf.playerDataCache len:len forData:buffer.mData];
}
}
buffer.mDataByteSize = readLen;
}
};
}else{
[self stop];
}
}
static OSStatus outputCallBackFun(void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * __nullable ioData)
{
memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize);
SSLAudioPlayer *player = ( **__bridge** SSLAudioPlayer *)(inRefCon);
if (player.bl_input)
{
player.bl_input(ioData);
}
return noErr;
}
- (void)setPlayerData:(void *)szData lenght:(int)lenght
{
if (self.readCache) {
[self.playerData appendBytes:szData length:lenght];
}else{
[self.playerDataCache appendBytes:szData length:lenght];
}
}
- (int)readDataFrom:(NSMutableData *)dataStore len:(int)len forData:(void *)data
{
UInt32 currentReadLength = 0;
if (_readerLength >= dataStore.length)
{
_readerLength = 0;
return currentReadLength;
}
NSRange range;
if (_readerLength+ len <= dataStore.length)
{
currentReadLength = len;
range = NSMakeRange(_readerLength, currentReadLength);
_readerLength = _readerLength + len
}
else
{
currentReadLength = (UInt32)(dataStore.length - _readerLength);
range = NSMakeRange(_readerLength, currentReadLength);
_readerLength = (UInt32) dataStore.length;
}
NSData *subData = [dataStore subdataWithRange:range];
Byte *tempByte = (Byte *)[subData bytes];
memcpy(data,tempByte,currentReadLength);
return currentReadLength;
}
- (void)stop
{
[self.playerDataCache resetBytesInRange:NSMakeRange(0, self.playerDataCache.length)];
sel.playerDataCache.length = 0;
[self.playerData resetBytesInRange:NSMakeRange(0, self.playerData.length)];
self.playerData.length = 0;
if (audioUnit == nil)
{
return;
}
OSStatus status;
status = AudioOutputUnitStop(audioUnit)
status = AudioUnitUninitialize(audioUnit);
status = AudioComponentInstanceDispose(audioUnit);
if (status == noErr) {
audioUnit = nil;
self.bl_input = nil;
}else{
}
}
@end