一.AudioUnit
1.簡要
在iOS平臺上,所有的音頻框架底層都是基于AudioUnit實(shí)現(xiàn)的。較高層次的音頻框架包括:Media Player拘悦、AV Foundation低零、OpenAL和Audio Toolbox,這些框架都封裝了AudioUnit姥宝,然后提供了更高層次的API(功能更少,職責(zé)更單一的接口)恐疲。
2.應(yīng)用場景
1.想使用低延遲的音頻I/O(input或者output),比如說在VoIP的應(yīng)用場景下培己。
2.多路聲音的合成并且回放,比如游戲或者音樂合成器的應(yīng)用省咨。
3.使用AudioUnit里面提供的特有功能,比如:回聲消除、Mix兩軌音頻笤受,以及均衡器穷缤、壓縮器箩兽、混響器等效果器绅项。
4.需要圖狀結(jié)構(gòu)來處理音頻比肄,可以將音頻處理模塊組裝到靈活的圖狀結(jié)構(gòu)中快耿,蘋果公司為音頻開發(fā)者提供了這種API。
3.創(chuàng)建簡要流程
1)在運(yùn)行時芳绩,獲取對動態(tài)鏈接庫的引用掀亥,該庫定義要使用的音頻單元妥色。
2)實(shí)例化音頻單元。
3)按照其類型的需要配置音頻單元嘹害,以適應(yīng)您的應(yīng)用程序的意圖。
4)初始化音頻單元以準(zhǔn)備處理音頻笔呀。
5)開始音頻流。
6)控制音頻單元许师。
7)完成后,銷毀音頻單元
4.創(chuàng)建方式
1.裸創(chuàng)建方式
2.AUGraph方式創(chuàng)建
現(xiàn)在進(jìn)入AudioUnit實(shí)踐開篇主題,使用第一種方式創(chuàng)建AudioUnit
#import "AudioUnitManger.h"
#import <AudioToolbox/AudioToolbox.h>
@interface AudioUnitManger()<NSURLSessionDelegate>
{
NSInteger _readedPacketIndex;
UInt32 _renderBufferSize;
AudioUnit _outAudioUinit;
AudioStreamBasicDescription _streamDescription;
AudioFileStreamID _audioFileStreamID;
AudioBufferList *_renderBufferList;
AudioConverterRef _converter;
}
@property(nonatomic,strong) NSMutableArray<NSData*> *paketsArray;
@end
@implementation AudioUnitManger
//調(diào)用AudioConverterFillComplexBuffer傳入數(shù)據(jù)微渠,并在callBack函數(shù)調(diào)用填充buffer的方法。
OSStatus DJAURenderCallback(void *inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames, AudioBufferList * __nullable ioData){
AudioUnitManger *self = (__bridge AudioUnitManger *)(inRefCon);
@synchronized (self) {
if (self->_readedPacketIndex < self.paketsArray.count) {
@autoreleasepool {
UInt32 packetSize = inNumberFrames;
OSStatus status = AudioConverterFillComplexBuffer(self->_converter, DJAudioConverterComplexInputDataProc, (__bridge void *)self, &packetSize, self->_renderBufferList, NULL);
if (status != noErr && status != 'DJnd') {
[self stop];
return -1;
}
else if (!packetSize) {
ioData->mNumberBuffers = 0;
}
else {
ioData->mNumberBuffers = 1;
ioData->mBuffers[0].mNumberChannels = 2;
ioData->mBuffers[0].mDataByteSize = self->_renderBufferList->mBuffers[0].mDataByteSize;
ioData->mBuffers[0].mData =self->_renderBufferList->mBuffers[0].mData;
self->_renderBufferList->mBuffers[0].mDataByteSize = self->_renderBufferSize;
}
}
}
else {
ioData->mNumberBuffers = 0;
return -1;
}
}
return noErr;
}
//歌曲信息解析回調(diào)將傳遞給回調(diào)的常量檀蹋。
//每當(dāng)在數(shù)據(jù)中分析屬性的值時云芦,都將回調(diào)
void DJAudioFileStream_PropertyListenerProc(void * inClientData,AudioFileStreamID inAudioFileStream,AudioFileStreamPropertyID inPropertyID,AudioFileStreamPropertyFlags * ioFlags)
{
if (inPropertyID == kAudioFileStreamProperty_DataFormat) {
AudioUnitManger *self = (__bridge AudioUnitManger *)(inClientData);
UInt32 dataSize = 0;
Boolean writable = false;
OSStatus status = AudioFileStreamGetPropertyInfo(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &dataSize, &writable);
assert(status == noErr);
status = AudioFileStreamGetProperty(inAudioFileStream, kAudioFileStreamProperty_DataFormat, &dataSize, &self->_streamDescription);
assert(status == noErr);
AudioStreamBasicDescription destFormat = audioStreamBasicDescription();
status = AudioConverterNew(&self->_streamDescription, &destFormat, &self->_converter);
assert(status == noErr);
}
}
//解析分離幀回調(diào)
//每當(dāng)在數(shù)據(jù)中分析數(shù)據(jù)包時,都會向客戶機(jī)傳遞指向數(shù)據(jù)包的指針纱昧。開始回調(diào)。
void DJAudioFileStreamPacketsProc(void *inClientData,UInt32 inNumberBytes,UInt32 inNumberPackets,const void *inInputData,AudioStreamPacketDescription *inPacketDescriptions)
{
AudioUnitManger *self = (__bridge AudioUnitManger *)(inClientData);
if (inPacketDescriptions) {
for (int i = 0; i < inNumberPackets; i++) {
SInt64 packetOffset = inPacketDescriptions[i].mStartOffset;
UInt32 packetSize = inPacketDescriptions[i].mDataByteSize;
assert(packetSize > 0);
NSData *packet = [NSData dataWithBytes:inInputData + packetOffset length:packetSize];
[self.paketsArray addObject:packet];
}
}
if (self->_readedPacketIndex == 0 && self.paketsArray.count > [self packetsPerSecond] * 3) {
[self play];
}
}
- (double)packetsPerSecond
{
if (!(_streamDescription.mFramesPerPacket > 0)) {
return 0;
}
return _streamDescription.mSampleRate / _streamDescription.mFramesPerPacket;
}
/*
AudioFileStreamOpen (
void * __nullable inClientData,
AudioFileStream_PropertyListenerProc inPropertyListenerProc,
AudioFileStream_PacketsProc inPacketsProc,
AudioFileTypeID inFileTypeHint,
AudioFileStreamID __nullable * __nonnull outAudioFileStream)
向解析器提供數(shù)據(jù),當(dāng)在數(shù)據(jù)中發(fā)現(xiàn)有內(nèi)容(如屬性和音頻包)回調(diào)
@參數(shù)包數(shù)據(jù) inClientData
@參數(shù)infiletypehint
對于無法根據(jù)數(shù)據(jù)輕松或唯一確定其類型的文件(ADTS、AC3)善已,
此提示可用于指示文件類型。
否則换团,如果您不知道文件類型,則可以傳遞零艘包。
@參數(shù)outaudiofilestream
用于其他audiofilestream api調(diào)用的新文件流ID。
*/
//inClientData 上下文對象
//AudioFileStream_PropertyListenerProc 歌曲信息解析的回調(diào)想虎,每次解析出一個歌曲信息卦尊,都會執(zhí)行一次回調(diào)。
//AudioFileStream_PacketsProc 分離幀的回調(diào)舌厨,每解析出一部分幀就會進(jìn)行一次回調(diào)
//AudioFileTypeID 是文件類型的提示,創(chuàng)建指定文件格式的音頻流解析器躏哩。
-(instancetype)initWithURL:(NSURL*)url
{
if (self = [super init]) {
_paketsArray = [NSMutableArray arrayWithCapacity:0];
[self setupOutAudioUnit];
AudioFileStreamOpen((__bridge void * _Nullable)(self), DJAudioFileStream_PropertyListenerProc, DJAudioFileStreamPacketsProc, 0, &_audioFileStreamID);
NSURLSession *urlSession = [NSURLSession sessionWithConfiguration:[NSURLSessionConfiguration defaultSessionConfiguration] delegate:self delegateQueue:nil];
NSURLSessionDataTask *task = [urlSession dataTaskWithURL:url];
[task resume];
}
return self;
}
-(void)setupOutAudioUnit
{
// 構(gòu)造RemoteIO類型的AudioUnit描述的結(jié)構(gòu)體
AudioComponentDescription ioUnitDescription;
memset(&ioUnitDescription, 0, sizeof(AudioComponentDescription));
ioUnitDescription.componentType = kAudioUnitType_Output;
ioUnitDescription.componentSubType = kAudioUnitSubType_RemoteIO;
ioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
ioUnitDescription.componentFlags = 0;
ioUnitDescription.componentFlagsMask = 0;
// 首先根據(jù)AudioUnit的描述揉燃,找出實(shí)際的AudioUnit類型:
AudioComponent outComponent = AudioComponentFindNext(NULL, &ioUnitDescription);
// 根據(jù)AudioUnit類型創(chuàng)建出這個AudioUnit實(shí)例:
OSStatus status = AudioComponentInstanceNew(outComponent, &_outAudioUinit);
assert(status == noErr);
// Audio Stream Format的描述,構(gòu)造BasicDescription結(jié)構(gòu)體
AudioStreamBasicDescription pcmStreamDesc = audioStreamBasicDescription();
OSStatus statusSetProperty = noErr;
// 將這個結(jié)構(gòu)體設(shè)置給對應(yīng)的AudioUnit,將這Unit的Element0的Out-putScope和Speaker進(jìn)行連接使用揚(yáng)聲器
statusSetProperty = AudioUnitSetProperty(_outAudioUinit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &pcmStreamDesc, sizeof(pcmStreamDesc));
// AudioUnitSetProperty(AudioUnit inUnit,
// AudioUnitPropertyID inID,
// AudioUnitScope inScope,
// AudioUnitElement inElement,
// const void * __nullable inData,
// UInt32 inDataSize)
// 構(gòu)造一個AURenderCallback的結(jié)構(gòu)體,并指定一個回調(diào)函數(shù)炊汤,然后設(shè)置給RemoteIO Unit的輸入端,當(dāng)RemoteIO Unit需要數(shù)據(jù)輸入的時候就會回調(diào)該回調(diào)函數(shù)
AURenderCallbackStruct callBackStruct;
callBackStruct.inputProc = DJAURenderCallback;
callBackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
AudioUnitSetProperty(_outAudioUinit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &callBackStruct, sizeof(AURenderCallbackStruct));
UInt32 bufferSize = 4096 * 4;
_renderBufferSize = bufferSize;
_renderBufferList = calloc(4, sizeof(UInt32)+sizeof(bufferSize));
_renderBufferList->mNumberBuffers = 1;
_renderBufferList->mBuffers[0].mData = calloc(1, bufferSize);
_renderBufferList->mBuffers[0].mDataByteSize = bufferSize;
_renderBufferList->mBuffers[0].mNumberChannels = 2;
}
static AudioStreamBasicDescription audioStreamBasicDescription()
{
AudioStreamBasicDescription description;
description.mSampleRate = 44100.0;
description.mFormatID = kAudioFormatLinearPCM;
description.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
description.mFramesPerPacket = 1;
description.mBytesPerPacket = 4;
description.mBytesPerFrame = 4;
description.mChannelsPerFrame = 2;
description.mBitsPerChannel = 16;
description.mReserved = 0;
// msamplerate:流中每秒數(shù)據(jù)的采樣幀數(shù)婿崭。
// mformatid:在流中指定常規(guī)音頻數(shù)據(jù)格式的標(biāo)識符拨拓。(KaudioFormilanePCM等)
// mFormatflags:格式化特定標(biāo)志以指定格式的詳細(xì)信息氓栈。(klinearpcmFormatflagIsignedInteger、klinearpcmFormatflagIsFloat授瘦、klinearpcmFormatflagIsBigEndian、klinearpcmFormatflagIsPackaged提完、klinearpcmFormatflagIsOnInterleaved等)
// mbytesperpacket:數(shù)據(jù)包中的字節(jié)數(shù)。
// mframesPerpacket:每個數(shù)據(jù)包中的樣本幀數(shù)逐样。
// mbytespeframe:單個數(shù)據(jù)采樣幀中的字節(jié)數(shù)。
// mchannelsPerFrame:每幀數(shù)據(jù)中的通道數(shù)
// mbitsPerchannel:數(shù)據(jù)幀中每個通道的采樣數(shù)據(jù)位數(shù)脂新。
// mReserved將結(jié)構(gòu)墊出以強(qiáng)制8字節(jié)對齊
return description;
}
OSStatus DJAudioConverterComplexInputDataProc(AudioConverterRef inAudioConverter,UInt32 * ioNumberDataPackets,AudioBufferList * ioData,AudioStreamPacketDescription * __nullable * __nullable outDataPacketDescription,void * __nullable inUserData)
{
AudioUnitManger *self = (__bridge AudioUnitManger *)(inUserData);
if (self->_readedPacketIndex >= self.paketsArray.count) {
NSLog(@"Have No Data");
return -1;
}
// 填充PCM到緩沖區(qū)
NSData *packet = self.paketsArray[self->_readedPacketIndex];
ioData->mNumberBuffers = 1;
ioData->mBuffers[0].mData = (void *)packet.bytes;
ioData->mBuffers[0].mDataByteSize = (UInt32)packet.length;
static AudioStreamPacketDescription aspdesc;
aspdesc.mDataByteSize = (UInt32)packet.length;
aspdesc.mStartOffset = 0;
aspdesc.mVariableFramesInPacket = 1;
*outDataPacketDescription = &aspdesc;
self->_readedPacketIndex++;
return 0;
}
#pragma mark -NSURLSessionDelegate
- (void)URLSession:(NSURLSession *)session dataTask:(NSURLSessionDataTask *)dataTask didReceiveData:(NSData *)data
{
OSStatus status = AudioFileStreamParseBytes(_audioFileStreamID, (UInt32)data.length, data.bytes, 0);
if (status == noErr) {
}
}
- (BOOL)play
{
OSStatus status = AudioOutputUnitStart(_outAudioUinit);
if (status == noErr) {
return YES;
}
return 0;
}
- (BOOL)stop
{
OSStatus status = AudioOutputUnitStop(_outAudioUinit);
if (status == noErr) {
return YES;
}
return 0;
}
@end
//播放
AudioUnitManger *player = [[AudioUnitManger alloc] initWithURL:[NSURL URLWithString:@"http://www.ytmp3.cn/down/58627.mp3"]];
[player play];
Github
音頻編解碼基礎(chǔ)篇之AudioUnit-2-AUGraph方式創(chuàng)建并集成麥克風(fēng)
音頻編解碼基礎(chǔ)篇之AudioUnit-3-AUGraph方式創(chuàng)建并集成麥克風(fēng)&混音等特效