上一篇講了如果使用 AudioUnit進行播放音頻文件,這一篇講一下如何使用AudioUnit進行錄音
這個兩個過程其實很類似。只是在回調方法上的使用不同。
1.初始化
AudioUnit的初始化比較囉嗦,而且方法比較多。這裏採用一種比較簡單的。
AudioComponentDescription outputUinitDesc; //定義AudioUnit描述,下面是設置 unit 的類型。
memset(&outputUinitDesc, 0, sizeof(AudioComponentDescription));
outputUinitDesc.componentType = kAudioUnitType_Output;//輸出類型
outputUinitDesc.componentSubType = kAudioUnitSubType_RemoteIO;
outputUinitDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
outputUinitDesc.componentFlags = 0;
outputUinitDesc.componentFlagsMask = 0;
AudioComponent outComponent = AudioComponentFindNext(NULL, &outputUinitDesc);
OSStatus status = AudioComponentInstanceNew(outComponent, &_outAudioUinit);
接下來要設置一下AudioUnit的屬性,都是通過AudioUnitSetProperty這個接口來設置
AudioStreamBasicDescription recordFormat;
recordFormat.mSampleRate = 44100;
recordFormat.mFormatID = kAudioFormatLinearPCM;
recordFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger;
recordFormat.mFramesPerPacket = 1;
recordFormat.mChannelsPerFrame = 1;
recordFormat.mBitsPerChannel = 16;
recordFormat.mBytesPerFrame = recordFormat.mBytesPerPacket = 2;
status = AudioUnitSetProperty(recordUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &recordFormat, sizeof(recordFormat));
這個是設置 audioUnit對應的 ABSD。
設置錄音對用的 recordCallBack
AURenderCallbackStruct recordCallback;
recordCallback.inputProcRefCon = (__bridge void * _Nullable)(self);
recordCallback.inputProc = RecordCallback;//回調函數
status = AudioUnitSetProperty(recordUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Output, 1, &recordCallback, sizeof(recordCallback));
if (status != noErr) {
NSLog(@"AURenderCallbackStruct error, ret: %d", status);
}
錄製的音頻文件會從回調函數中獲得
設置 AudioBufferList
uint32_t numberBuffers = 1;
UInt32 bufferSize = 2048;
bufferList = (AudioBufferList*)malloc(sizeof(AudioBufferList));
bufferList->mNumberBuffers = numberBuffers;
bufferList->mBuffers[0].mData = malloc(bufferSize);
bufferList->mBuffers[0].mDataByteSize = bufferSize;
bufferList->mBuffers[0].mNumberChannels = 1;
初始化AudioUnit
OSStatus result = AudioUnitInitialize(recordUnit);
2. 編寫錄音的回調函數
當開始錄音的時候,程序就會進入到上面的設置的回調函數。在這個函數中,把音頻文件給取出來,然後進行下一步處理。
可以播放,可以進行加工編碼等等。。
static OSStatus RecordCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
AudioUnitRecordController *self = (__bridge AudioUnitRecordController*)inRefCon;
if (inNumberFrames > 0) {
self->bufferList->mNumberBuffers = 1;
OSStatus stauts = AudioUnitRender(self->recordUnit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, self->bufferList);
if (stauts != noErr) {
NSLog(@"recordcallback error is %d",stauts);
}
[self->pcmData appendBytes:self->bufferList->mBuffers[0].mData length:self->bufferList->mBuffers[0].mDataByteSize];
}
return noErr;
}
在這個回調函數中,通過函數AudioUnitRender來獲得錄音的數據,這些音頻數據保存在AudioBufferList中。