使用FFmpeg解碼的PCM音頻數(shù)據(jù)是以一定格式存放的铆农,包含在codec_ctx->sample_fmt中。使用AudioUnit可以直接播放FFmpeg中AV_SAMPLE_FMT_S16、AV_SAMPLE_FMT_S16P、AV_SAMPLE_FMT_FLT和AV_SAMPLE_FMT_FLTP格式的PCM數(shù)據(jù)鲤屡。
//通過AUGraph來創(chuàng)建AudioUnit
- (OSStatus)setupAudioUnitWithStreamDescription:(AudioStreamBasicDescription)streamDescription {
//iOS系統(tǒng)下需要先設(shè)置AVAudioSession业栅,MacOS下不需要
OSStatus status = NewAUGraph(&_graph);
if (status != noErr) {
NSLog(@"Can not create new graph");
return status;
}
AudioComponentDescription description;
bzero(&description, sizeof(description));
description.componentType = kAudioUnitType_Output;
//kAudioUnitSubType_HALOutput這個(gè)子類型是MacOS系統(tǒng)淤齐,iOS應(yīng)該使用kAudioUnitSubType_RemoteIO
description.componentSubType = kAudioUnitSubType_HALOutput;
description.componentManufacturer = kAudioUnitManufacturer_Apple;
status = AUGraphAddNode(_graph, &description, &_node);
if (status != noErr) {
NSLog(@"Can not add node");
return status;
}
status = AUGraphOpen(_graph);
if (status != noErr) {
NSLog(@"Can not open graph");
return status;
}
status = AUGraphNodeInfo(_graph, _node, NULL, &_unit);
if (status != noErr) {
NSLog(@"Can not get node info");
return status;
}
//通過AudioStreamBasicDescription設(shè)置輸入數(shù)據(jù)的格式
status = AudioUnitSetProperty(_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamDescription, sizeof(streamDescription));
if (status != noErr) {
NSLog(@"Can not set stream format on unit input scope");
return status;
}
//設(shè)置填充數(shù)據(jù)的回調(diào)
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = &InputRenderCallback;
callbackStruct.inputProcRefCon = (__bridge void *)self;
status = AudioUnitSetProperty(_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct));
if (status != noErr) {
NSLog(@"Fail to set render callback");
return status;
}
status = AUGraphInitialize(_graph);
if (status != noErr) {
NSLog(@"Can not initialize graph");
return status;
}
return status;
}
設(shè)置AV_SAMPLE_FMT_FLTP格式數(shù)據(jù)需要的AudioStreamBasicDescription
AudioStreamBasicDescription streamDescription;
bzero(&streamDescription, sizeof(streamDescription));
streamDescription.mFormatID = kAudioFormatLinearPCM;
/*
AV_SAMPLE_FMT_S16 kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked
AV_SAMPLE_FMT_S16P kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsNonInterleaved
AV_SAMPLE_FMT_FLT kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked
AV_SAMPLE_FMT_FLTP kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved
*/
streamDescription.mFormatFlags = kAudioFormatFlagIsFloat | kAudioFormatFlagIsNonInterleaved;
streamDescription.mSampleRate = 44100.0;
streamDescription.mChannelsPerFrame = codec_ctx->channels;
streamDescription.mFramesPerPacket = 1;
//Float類型占4個(gè)字節(jié)啦鸣,32比特,SignedInteger類型占2個(gè)字節(jié)蝉衣,16比特
streamDescription.mBitsPerChannel = 32;
//如果是左右聲道分開存儲(chǔ)是字節(jié)數(shù)括尸,左右聲道交叉存儲(chǔ)是字節(jié)數(shù)X2
streamDescription.mBytesPerFrame = 4;
streamDescription.mBytesPerPacket = 4;
//解碼音頻數(shù)據(jù)寫入到文件中
- (void)decodeAudioData {
AVPacket packet;
av_init_packet(&packet);
while ((av_read_frame(ifmt_ctx, &packet)) >= 0) {
if (packet.stream_index == audio_stream_index) {
int ret = avcodec_send_packet(codec_ctx, &packet);
while (ret >= 0) {
AVFrame * frame = av_frame_alloc();
ret = avcodec_receive_frame(codec_ctx, frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
break;
} else if (ret < 0) {
NSLog(@"Error during ecoding");
break;
}
int data_size = av_samples_get_buffer_size(frame->linesize, 1, frame->nb_samples, AV_SAMPLE_FMT_FLTP, 0);
fwrite(frame->data[0], 1, data_size, file1);
fwrite(frame->data[1], 1, data_size, file2);
av_frame_free(&frame);
}
}
}
if (file1) {
fseek(file1, 0, SEEK_SET);
}
if (file2) {
fseek(file2, 0, SEEK_SET);
}
}
//填充音頻數(shù)據(jù)的回調(diào)
- (OSStatus)renderData:(AudioBufferList *)ioData atTimeStamp:(const AudioTimeStamp *)timeStamp forElement:(UInt32)element numberFrames:(UInt32)numFrames flags:(AudioUnitRenderActionFlags *)flags {
for (int iBuffer = 0; iBuffer < ioData->mNumberBuffers; iBuffer++) {
memset(ioData->mBuffers[iBuffer].mData, 0, ioData->mBuffers[iBuffer].mDataByteSize);
}
FILE * files[] = {file1, file2};
for (int iBuffer = 0; iBuffer < ioData->mNumberBuffers; iBuffer++) {
//這里我偷懶直接把左右聲道的數(shù)據(jù)分別寫入到兩個(gè)文件中,在這里從文件中讀取mDataByteSize個(gè)數(shù)據(jù)病毡,F(xiàn)Fmpeg解碼出的數(shù)據(jù)大小和mDataByteSize可能相同需要做處理
fread(ioData->mBuffers[iBuffer].mData, ioData->mBuffers[iBuffer].mDataByteSize, 1, files[iBuffer]);
}
return noErr;
}
static OSStatus InputRenderCallback(void * inRefCon, AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp * inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) {
ViewController * viewController = (__bridge ViewController *)inRefCon;
return [viewController renderData:ioData atTimeStamp:inTimeStamp forElement:inBusNumber numberFrames:inNumberFrames flags:ioActionFlags];
}
我在Demo中把PCM數(shù)據(jù)用AVFilter分別轉(zhuǎn)成了AV_SAMPLE_FMT_S16濒翻、AV_SAMPLE_FMT_S16P、AV_SAMPLE_FMT_FLT和AV_SAMPLE_FMT_FLTP用AudioUnit進(jìn)行播放
github