用AudioQueue录制MP3音频

1,143 阅读2分钟

前言

众所周知,iOS系统中只有mp3解码器,没有mp3编码器,所以在iOS中录制mp3文件,需要开发者自行解决mp3编码问题,市场上一般采用lame三方库实现mp3编码。这篇文章主要记录的是用AudioQueue边录音边转码mp3的方法。

实现方案

用AudioQueue录制pcm文件,然后在回调中将AudioQueue抛出的pcm音频数据用lame转换成mp3编码的音频数据并写入文件中。

具体实现

1、设置AVAudioSession

NSError *error;    
[[AVAudioSession sharedInstance]  setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionAllowBluetooth      error:&error];   
[[AVAudioSession sharedInstance] setPreferredSampleRate:44100 error:&error];   
[[AVAudioSession sharedInstance] setActive:true error:&error];

2、配置DataFormat

- (AudioStreamBasicDescription)getInputDataFormat {
    AudioStreamBasicDescription mDataFormat;   
    mDataFormat.mSampleRate = 44100;   
    mDataFormat.mChannelsPerFrame = 1;
    mDataFormat.mReserved = 0;
    mDataFormat.mBitsPerChannel =16;
    mDataFormat.mFramesPerPacket = 1;
    mDataFormat.mBytesPerFrame = mDataFormat.mBytesPerPacket = 2;
    mDataFormat.mFormatID = kAudioFormatLinearPCM;
    mDataFormat.mFormatFlags = kAudioFormatFlagIsNonInterleaved | 
                           kLinearPCMFormatFlagIsSignedInteger |  
                          kLinearPCMFormatFlagIsPacked;
    return mDataFormat;
}

3、初始化AudioQueue

- (BOOL)configInputWithDataFormat:(AudioStreamBasicDescription)dataFormat {
    _mDataFormat = dataFormat;
    //RecordBufferHandler是回调方法
    OSStatus inputFlag = AudioQueueNewInput(&_mDataFormat, RecordBufferHandler, (__bridge void *)(self), NULL, NULL, 0,  &_audioQueue);
    UInt32 trueValue = true;    
    AudioQueueSetProperty(_audioQueue,kAudioQueueProperty_EnableLevelMetering,&trueValue,sizeof (UInt32));
    return inputFlag == noErr;
}

4、配置audiobuffer

- (BOOL)configAudioQueueBuffersWithFrameSize:(UInt32)frameSize {
    _frameSize = frameSize;//frameSize = 1024
    OSStatus allocateBufferFlag = noErr;
    OSStatus enqueueBuffer = noErr;
    for(int i=0;i<kNumberAudioQueueBuffers;i++){//kNumberAudioQueueBuffers = 3 
       allocateBufferFlag = AudioQueueAllocateBuffer(_audioQueue, frameSize, &_mBuffers[i]); 
       enqueueBuffer = AudioQueueEnqueueBuffer(_audioQueue, _mBuffers[i], 0, NULL);
        if(allocateBufferFlag != noErr || enqueueBuffer != noErr) {
            NSAssert(false, @"AudioQueueAllocateBuffer或者AudioQueueEnqueueBuffer 初始化失败"); 
           return false; 
       }
    }
    return true;
}

5、初始化lame

- (BOOL)setLame {
    _lame = lame_init();
    lame_set_in_samplerate(_lame, 44100);
    lame_set_num_channels(_lame,1);
    lame_set_VBR(_lame, vbr_default);
    lame_set_mode(_lame, MONO); //if this is enabled the channels will be set to 1 by default 
    lame_set_quality(_lame, 0); //0 is the best .. 9 is the worst 
    return lame_init_params(_lame) == -1 ? false : true;
}

6、配置录音输出文件

- (BOOL)createAudioFile {
    _mp3File = fopen((char*)[self.model.filePath UTF8String], "wb");//打开文件
    return _mp3File != NULL;
}

7、处理音频数据回调方法

void RecordBufferHandler(void                  * inUserData,
                        AudioQueueRef          inAudioQueue,
                        AudioQueueBufferRef    inBuffer, 
                       const AudioTimeStamp   * inStartTime,
                        UInt32          frameSize,
                        const AudioStreamPacketDescription * inPacketDesc){
    RecordEngine *engine = (__bridge RecordEngine *)inUserData;
    if(frameSize > 0){
       void * data = buffer->mAudioData;
       UInt32 dataLength = buffer->mAudioDataByteSize;
       frameSize = dataLength/sizeof(short);
       if(dataLength > 0) {
       //MP3_SIZE = 4096,_mp3buf[MP3_SIZE],记住lame_encode_buffer方法这个是非交错的,与dataformat中的kAudioFormatFlagIsNonInterleaved要匹配
        int write = lame_encode_buffer(_lame, data, NULL, frameSize, _mp3buf, MP3_SIZE);
        fwrite(_mp3buf, write, 1, _mp3File);//写入文件
        }
    }
    if(engine->_isRunning){//判断是否还在录音中,自己设置的一个参数,如果stop掉了,isRunning == false
        AudioQueueEnqueueBuffer(engine->_audioQueue, inBuffer, 0, NULL);
    }
}

8、开始录音

AudioQueueStart(_audioQueue, NULL);

9、结束录音

AudioQueueStop(_audioQueue, true);
if(_mp3File != NULL) {
        int write = lame_encode_flush(self->_lame, self->_mp3buf, MP3_SIZE);
        fwrite(self->_mp3buf, write, 1, self->_mp3File);
        lame_close(self->_lame);
        fclose(self->_mp3File);
 }
for(int i=0;i<kNumberAudioQueueBuffers;i++){
        AudioQueueFreeBuffer(self->_audioQueue, self->_mBuffers[i]);
 }
[[AVAudioSession sharedInstance] setActive:false error:nil];

10、录音期间获取时间

- (NSTimeInterval)currentTime {
    NSTimeInterval timeInterval = 0.0;
    AudioQueueTimelineRef timeLine;
    OSStatus status = AudioQueueCreateTimeline(_audioQueue, &timeLine);
    if(status == noErr)    {
        AudioTimeStamp timeStamp;
        AudioQueueGetCurrentTime(_audioQueue, timeLine, &timeStamp, NULL);
        timeInterval = timeStamp.mSampleTime / _mDataFormat.mSampleRate; // modified
    } 
   return timeInterval;
}

11、录音期间获取Meter

- (float)instantaneousPeakPower {
//必须在录音前设置,不然获取不到
//    UInt32 trueValue = true;
//    AudioQueueSetProperty(_audioQueue,kAudioQueueProperty_EnableLevelMetering,&trueValue,sizeof (UInt32));
    UInt32 dataSize = sizeof(AudioQueueLevelMeterState)*_mDataFormat.mChannelsPerFrame;
    AudioQueueLevelMeterState *levels = (AudioQueueLevelMeterState*)malloc(dataSize);
    OSStatus rc = AudioQueueGetProperty(_audioQueue, kAudioQueueProperty_CurrentLevelMeter, levels, &dataSize);
    if (rc == noErr) {
        float channelAvg = 0;
        for (int i = 0; i < _mDataFormat.mChannelsPerFrame; i++) { 
         channelAvg += levels[i].mAveragePower;        
        }
    free(levels);
    return channelAvg;
    }else{
        return 0;
    }
}

ok,完结。