先看一段android 應用調(diào)用java代碼的例子
public class MicRecord extends Thread{
AudioRecord audioRecord;
AudioTrack audioTrack;
volatile boolean canPlay = true;//個人推薦用帶原子操作的對象AtomicBoolean代替
@Override
public void run() {
final int recordbuffsize = AudioRecord.getMinBufferSize(44100, AudioFormat.CHANNEL_IN_DEFAULT,
AudioFormat.ENCODING_PCM_16BIT);
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, 44100,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, recordbuffsize);
final int playBuffsize = AudioTrack.getMinBufferSize(44100, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT);
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, 44100, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, playBuffsize, AudioTrack.MODE_STREAM);
audioRecord.startRecording();
audioTrack.play();
byte[] recordData = new byte[recordbuffsize];
while(canPlay){
int readSize = audioRecord.read(recordData, 0, recordbuffsize);
audioTrack.write(recordData, 0, readSize);
}
audioRecord.stop();
audioRecord.release();
audioTrack.stop();
audioTrack.release();
}
public void stopRecord(){
canPlay = false;
}
}
下面是個人空閑之余整理的簡述AudioRecord 的初始化流程和跨進程和mediaserver 的audioFlinger茶敏,audioPolicy愛恨情仇。和startRecording開始錄音的流程掰读。如果不是底層系統(tǒng)工程師做HAL對接輸入設備驅(qū)動的話鸽疾,流程是大概懂了但是實際功能上是用不上改動這部分代碼的。HAL的用法法則也稍懂些但可惜已經(jīng)不在linux os崗位,故無需詳細了解万俗。熟悉底層原理對做出好應用是也是很有必要的。
AudioRecord初始化
說到AudioRecord.java 這個類就不得不提和它相應的注冊native方法饮怯,在/base/core/jni/android_media_AudioRecord.cpp 文件中该编,具體這些native方法是什么時候load的呢?
int register_android_media_AudioRecord(JNIEnv *env)
這個要看由應用啟動時zygote創(chuàng)建的AndroidRuntime由
int AndroidRuntime::startReg(JNIEnv * env) 接口注冊了硕淑。jni注冊數(shù)組固定在AndroidRuntime.cpp 里面的
static const RegJNIRec gRegJNI[] = {
....
REG_JNI(register_android_media_AudioRecord),
REG_JNI(register_android_media_AudioSystem),
REG_JNI(register_android_media_AudioTrack),
....
};
所以在java類調(diào)用涉及到對象native 方法的都不需要重新加載so等。
AudioRecord對象的創(chuàng)建需要參數(shù) 音頻audioSource嘉赎,采集頻率sampleRateInHz置媳,通道模式channelConfig,音頻格式audioFormat公条,采集數(shù)據(jù)保存區(qū)大小bufferSizeInBytes拇囊。
AudioRecord構造時對參數(shù)進行檢查和進行native_setup
int initResult = native_setup( new WeakReference<AudioRecord>(this),
mAudioAttributes, mSampleRate, mChannelMask, mChannelIndexMask,
mAudioFormat, mNativeBufferSizeInBytes,
session, ActivityThread.currentOpPackageName());
跳到android_media_AudioRecord.cpp去
static jint
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jint sampleRateInHertz, jint channelMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jintArray jSession, jstring opPackageName)
{
......
// create an uninitialized AudioRecord object
sp<AudioRecord> lpRecorder = new AudioRecord(String16(opPackageNameStr.c_str()));
......
const status_t status = lpRecorder->set(paa->source,
sampleRateInHertz,
format, // word length, PCM
channelMask,
frameCount,
recorderCallback,// callback_t
lpCallbackData,// void* user
0, // notificationFrames,
true, // threadCanCallJava
sessionId,
AudioRecord::TRANSFER_DEFAULT,
flags,
-1, -1, // default uid, pid
paa);
......
// save our newly created C++ AudioRecord in the "nativeRecorderInJavaObj" field
// of the Java object
setAudioRecord(env, thiz, lpRecorder);
// save our newly created callback information in the "nativeCallbackCookie" field
// of the Java object (in mNativeCallbackCookie) so we can free the memory in finalize()
env->SetLongField(thiz, javaAudioRecordFields.nativeCallbackCookie, (jlong)lpCallbackData);
return (jint) AUDIO_JAVA_SUCCESS;
}
static sp<AudioRecord> setAudioRecord(JNIEnv* env, jobject thiz, const sp<AudioRecord>& ar)
{
Mutex::Autolock l(sLock);
sp<AudioRecord> old =
(AudioRecord*)env->GetLongField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
if (ar.get()) {
ar->incStrong((void*)setAudioRecord);
}
if (old != 0) {
old->decStrong((void*)setAudioRecord);
}
env->SetLongField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj, (jlong)ar.get());
return old;
}
AudioRecord的native_setup初始化只截留重要部分分析,sp<AudioRecord> lpRecorder 是jni層的/av/media/libmedia/AudioRecord.cpp AudioRecord對象靶橱,創(chuàng)建后用一波反手騷操作寥袭,把這個對象的引用指針賦值給java AudioRecord的(long)mNativeRecorderInJavaObj 來保存路捧,后面如果要用到lpRecorder則會直接獲取該long型引用數(shù)據(jù)強轉(zhuǎn)回jni的AudioRecord引用, jni使用的常用招式。
static sp<AudioRecord> getAudioRecord(JNIEnv* env, jobject thiz)
{
Mutex::Autolock l(sLock);
AudioRecord* const ar =
(AudioRecord*)env->GetLongField(thiz, javaAudioRecordFields.nativeRecorderInJavaObj);
return sp<AudioRecord>(ar);
}
lpRecorder->set(xx[])初始化則是整個錄音流程重點传黄,它會通過audioFlinger來獲取audio_io_handle_t 輸入輸出handle杰扫,打開HAL的設備獲取audio_stream_in_t 輸入流,并且打開創(chuàng)建錄音線程RecordThread等等膘掰。下面描述
status_t AudioRecord::set(
audio_source_t inputSource,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
callback_t cbf,
void* user,
uint32_t notificationFrames,
bool threadCanCallJava,
int sessionId,
transfer_type transferType,
audio_input_flags_t flags,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes)
{
......
if (!audio_is_input_channel(channelMask)) {
ALOGE("Invalid channel mask %#x", channelMask);
return BAD_VALUE;
}
mChannelMask = channelMask;
uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
mChannelCount = channelCount;
if (audio_is_linear_pcm(format)) {
mFrameSize = channelCount * audio_bytes_per_sample(format);
} else {
mFrameSize = sizeof(uint8_t);
}
// mFrameCount is initialized in openRecord_l
mReqFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
// mNotificationFramesAct is initialized in openRecord_l
/**
和audioflinger綁定的會話id
**/
if (sessionId == AUDIO_SESSION_ALLOCATE) {
mSessionId = AudioSystem::newAudioUniqueId();
} else {
mSessionId = sessionId;
}
ALOGV("set(): mSessionId %d", mSessionId);
int callingpid = IPCThreadState::self()->getCallingPid();
int mypid = getpid();
if (uid == -1 || (callingpid != mypid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
}
if (pid == -1 || (callingpid != mypid)) {
mClientPid = callingpid;
} else {
mClientPid = pid;
}
mFlags = flags;
mCbf = cbf;
if (cbf != NULL) {
mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
// thread begins in paused state, and will not reference us until start()
}
// create the IAudioRecord
status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
......
AudioSystem::acquireAudioSessionId(mSessionId, -1);
......
return NO_ERROR;
}
會話id mSessionId 默認是采用分配方式AUDIO_SESSION_ALLOCATE章姓,再通過audioFlinger->newAudioUniqueId創(chuàng)建當前對象獨一無二的id,然后audioFlinger->acquireAudioSessionId(int audioSession, pid_t pid) 會判斷是否重復申請和用vector存放持有這倆個參數(shù)的對象AudioSessionRef识埋。
只關注 status_t status = openRecord_l(0 /epoch/, mOpPackageName);
status_t AudioRecord::openRecord_l(size_t epoch, const String16& opPackageName)
{
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
/*IAudioFlinger實際獲取的是跑在mediaServer里面的binder服務"media.audio_flinger"
*/
if (audioFlinger == 0) {
ALOGE("Could not get audioflinger");
return NO_INIT;
}
// Fast tracks must be at the primary _output_ [sic] sampling rate,
// because there is currently no concept of a primary input sampling rate
uint32_t afSampleRate = AudioSystem::getPrimaryOutputSamplingRate();
if (afSampleRate == 0) {
ALOGW("getPrimaryOutputSamplingRate failed");
}
// Client can only express a preference for FAST. Server will perform additional tests.
if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !((
// either of these use cases:
// use case 1: callback transfer mode
(mTransfer == TRANSFER_CALLBACK) ||
// use case 2: obtain/release mode
(mTransfer == TRANSFER_OBTAIN)) &&
// matching sample rate
(mSampleRate == afSampleRate))) {
ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, primary %u Hz",
mTransfer, mSampleRate, afSampleRate);
// once denied, do not request again if IAudioRecord is re-created
mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
}
IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
pid_t tid = -1;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
trackFlags |= IAudioFlinger::TRACK_FAST;
if (mAudioRecordThread != 0) {
tid = mAudioRecordThread->getTid();
}
}
if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
}
/*
在這里會先通過"media.audio_policy" 服務獲取凡伊,
在/av/media/services/Audiopolicy/service/AudioPolicyInterfaceImpl.cpp
的AudioPolicyService實現(xiàn)類,然后成員mAudioPolicyManager
/av/media/services/Audiopolicy/managerdefault/AudioPolicyManager.cpp 根據(jù)參數(shù)
獲取輸入audio_io_handle_t 和設備的選取窒舟,再通過AudioFlinger 的openInput打開系忙,
到最后真正實現(xiàn)錄音線程RecordThread和AudioStreamIn。
*/
audio_io_handle_t input;
status_t status = AudioSystem::getInputForAttr(&mAttributes, &input,
(audio_session_t)mSessionId,
IPCThreadState::self()->getCallingUid(),
mSampleRate, mFormat, mChannelMask,
mFlags, mSelectedDeviceId);
if (status != NO_ERROR) {
ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
"channel mask %#x, session %d, flags %#x",
mAttributes.source, mSampleRate, mFormat, mChannelMask, mSessionId, mFlags);
return BAD_VALUE;
}
{
// Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
// we must release it ourselves if anything goes wrong.
size_t frameCount = mReqFrameCount;
size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
// but we will still need the original value also
int originalSessionId = mSessionId;
// The notification frame count is the period between callbacks, as suggested by the server.
size_t notificationFrames = mNotificationFramesReq;
/*IMemory 在里面起到共享內(nèi)存作用惠豺,后期的音頻數(shù)據(jù)有可以通過共享內(nèi)存來get到银还。iMem的創(chuàng)建
初始化在AudioFlinger,同時會創(chuàng)建audio_track_cblk_t對象引用指針賦給iMem的iMem->pointer()耕腾。
audio_track_cblk_t作用下面細說见剩,audioFlinger->openRecord最終返回的IAudioRecord
是控制整個錄音流程的關鍵對象涯贞,比如開始錄音悟衩,結束錄音等等。
*/
sp<IMemory> iMem; // for cblk
sp<IMemory> bufferMem;
sp<IAudioRecord> record = audioFlinger->openRecord(input,
mSampleRate,
mFormat,
mChannelMask,
opPackageName,
&temp,
&trackFlags,
tid,
mClientUid,
&mSessionId,
¬ificationFrames,
iMem,
bufferMem,
&status);
ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
"session ID changed from %d to %d", originalSessionId, mSessionId);
if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create record track, status: %d", status);
goto release;
}
ALOG_ASSERT(record != 0);
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
if (iMem == 0) {
ALOGE("Could not get control block");
return NO_INIT;
}
void *iMemPointer = iMem->pointer();
if (iMemPointer == NULL) {
ALOGE("Could not get control block pointer");
return NO_INIT;
}
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
// Starting address of buffers in shared memory.
// The buffers are either immediately after the control block,
// or in a separate area at discretion of server.
void *buffers;
if (bufferMem == 0) {
buffers = cblk + 1;
} else {
buffers = bufferMem->pointer();
if (buffers == NULL) {
ALOGE("Could not get buffer pointer");
return NO_INIT;
}
}
// invariant that mAudioRecord != 0 is true only after set() returns successfully
if (mAudioRecord != 0) {
IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
mDeathNotifier.clear();
}
mAudioRecord = record;
mCblkMemory = iMem;
mBufferMemory = bufferMem;
IPCThreadState::self()->flushCommands();
mCblk = cblk;
// note that temp is the (possibly revised) value of frameCount
if (temp < frameCount || (frameCount == 0 && temp == 0)) {
ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
}
frameCount = temp;
mAwaitBoost = false;
if (mFlags & AUDIO_INPUT_FLAG_FAST) {
if (trackFlags & IAudioFlinger::TRACK_FAST) {
ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
mAwaitBoost = true;
} else {
ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
// once denied, do not request again if IAudioRecord is re-created
mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
}
}
// Make sure that application is notified with sufficient margin before overrun
if (notificationFrames == 0 || notificationFrames > frameCount) {
ALOGW("Received notificationFrames %zu for frameCount %zu", notificationFrames, frameCount);
}
mNotificationFramesAct = notificationFrames;
// We retain a copy of the I/O handle, but don't own the reference
mInput = input;
mRefreshRemaining = true;
mFrameCount = frameCount;
// If IAudioRecord is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
if (frameCount > mReqFrameCount) {
mReqFrameCount = frameCount;
}
/* update proxy AudioRecord client代理筒饰,看到cblk這個參數(shù)就明白它是代理了一些音
頻數(shù)據(jù)獲取相關的功能狼纬。比如obtainBuffer羹呵,從cblk成員指針進行內(nèi)存共享獲取錄音數(shù)據(jù)。*/
mProxy = new AudioRecordClientProxy(cblk, buffers, mFrameCount, mFrameSize);
mProxy->setEpoch(epoch);
mProxy->setMinimum(mNotificationFramesAct);
mDeathNotifier = new DeathNotifier(this);
/**
由于mAudioRecord是通過AudioFlinger獲得的binder接口對象疗琉,所以還是要注冊
死亡監(jiān)聽和通知釋放資源
*/
IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
if (mDeviceCallback != 0) {
AudioSystem::addAudioDeviceCallback(mDeviceCallback, mInput);
}
return NO_ERROR;
}
release:
AudioSystem::releaseInput(input, (audio_session_t)mSessionId);
if (status == NO_ERROR) {
status = NO_INIT;
}
return status;
}
AudioSystem::getInputForAttr這個接口填充一些flags參數(shù)冈欢,采樣率,格式盈简,通道和會話ID mSessionId凑耻, 進程uid等,接下的流程是到/av/media/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp的 "media.audio_policy"服務柠贤,audioPolicy->getInputForAttr 進行一些參數(shù)audio_attributes_t香浩,uid的檢驗, 和還會對申請到的inputType 選擇的類型進行"android.permission.MODIFY_AUDIO_ROUTING" 或"android.permission.CAPTURE_AUDIO_OUTPUT"權限檢測。
l跑在android環(huán)境的linux應用要檢測app應用權限的原理方式也很簡單臼勉,利用"permission"這個跑在systemServer的binder服務跨進程檢查App應用所帶的權限邻吭,參數(shù)pid,uid宴霸,和權限字符串囱晴。
"permission"是在ActivityManagerService.java 里面注冊進Servicemanager里的膏蚓,實現(xiàn)類是
static class PermissionController extends IPermissionController.Stub{}
在這里class PermissionController實現(xiàn)的是相當于Bn端,而Bp端是實現(xiàn)定義在/framework/native/libs/binder/IPermissonController.cpp
status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
uid_t uid,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_input_flags_t flags,
audio_port_handle_t selectedDeviceId)
{
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
// already checked by client, but double-check in case the client wrapper is bypassed
if (attr->source >= AUDIO_SOURCE_CNT && attr->source != AUDIO_SOURCE_HOTWORD &&
attr->source != AUDIO_SOURCE_FM_TUNER) {
return BAD_VALUE;
}
if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
return BAD_VALUE;
}
sp<AudioPolicyEffects>audioPolicyEffects;
status_t status;
AudioPolicyInterface::input_type_t inputType;
// if the caller is us, trust the specified uid
if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
uid_t newclientUid = IPCThreadState::self()->getCallingUid();
if (uid != (uid_t)-1 && uid != newclientUid) {
ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
}
uid = newclientUid;
}
{
Mutex::Autolock _l(mLock);
// the audio_in_acoustics_t parameter is ignored by get_input()
/**
默認的AudioPolicyManager實現(xiàn)在/av/services/audioPolicy/managerdefault/AudioPolicyManager.cpp
*/
status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
samplingRate, format, channelMask,
flags, selectedDeviceId,
&inputType);
audioPolicyEffects = mAudioPolicyEffects;
if (status == NO_ERROR) {
// enforce permission (if any) required for each type of input
switch (inputType) {
case AudioPolicyInterface::API_INPUT_LEGACY:
break;
case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
// FIXME: use the same permission as for remote submix for now.
case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
//權限檢查
if (!captureAudioOutputAllowed()) {
ALOGE("getInputForAttr() permission denied: capture not allowed");
status = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_INPUT_MIX_EXT_POLICY_REROUTE:
//權限檢查
if (!modifyAudioRoutingAllowed()) {
ALOGE("getInputForAttr() permission denied: modify audio routing not allowed");
status = PERMISSION_DENIED;
}
break;
case AudioPolicyInterface::API_INPUT_INVALID:
default:
LOG_ALWAYS_FATAL("getInputForAttr() encountered an invalid input type %d",
(int)inputType);
}
}
if (status != NO_ERROR) {
if (status == PERMISSION_DENIED) {
mAudioPolicyManager->releaseInput(*input, session);
}
return status;
}
}
if (audioPolicyEffects != 0) {
/**
錄音輸入的音效設置相關
*/
// create audio pre processors according to input source
status_t status = audioPolicyEffects->addInputEffects(*input, attr->source, session);
if (status != NO_ERROR && status != ALREADY_EXISTS) {
ALOGW("Failed to add effects on input %d", *input);
}
}
return NO_ERROR;
}
mAudioPolicyManager->getInputForAttr 這里進行策略性獲得audio_devices_t 這個東西實則是uint32所以就可以說成是輸入設備id畸写。獲取完該需要的device和audio_config_t配置和功能性flags參數(shù)驮瞧,就又繞回到AudioFlinger去openInput該設備了。
status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
uid_t uid,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_input_flags_t flags,
audio_port_handle_t selectedDeviceId,
input_type_t *inputType)
{
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
audio_devices_t device;
// handle legacy remote submix case where the address was not always specified
String8 address = String8("");
bool isSoundTrigger = false;
audio_source_t inputSource = attr->source;
audio_source_t halInputSource;
AudioMix *policyMix = NULL;
if (inputSource == AUDIO_SOURCE_DEFAULT) {
inputSource = AUDIO_SOURCE_MIC;
}
halInputSource = inputSource;
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
if (mAvailableInputDevices[i]->getId() == selectedDeviceId) {
deviceDesc = mAvailableInputDevices[i];
break;
}
}
mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
status_t ret = mPolicyMixes.getInputMixForAttr(*attr, &policyMix);
if (ret != NO_ERROR) {
return ret;
}
*inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
address = String8(attr->tags + strlen("addr="));
} else {
device = getDeviceAndMixForInputSource(inputSource, &policyMix);
if (device == AUDIO_DEVICE_NONE) {
ALOGW("getInputForAttr() could not find device for source %d", inputSource);
return BAD_VALUE;
}
if (policyMix != NULL) {
address = policyMix->mRegistrationId;
if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
// there is an external policy, but this input is attached to a mix of recorders,
// meaning it receives audio injected into the framework, so the recorder doesn't
// know about it and is therefore considered "legacy"
*inputType = API_INPUT_LEGACY;
} else {
// recording a mix of players defined by an external policy, we're rerouting for
// an external policy
*inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
}
} else if (audio_is_remote_submix_device(device)) {
address = String8("0");
*inputType = API_INPUT_MIX_CAPTURE;
} else if (device == AUDIO_DEVICE_IN_TELEPHONY_RX) {
*inputType = API_INPUT_TELEPHONY_RX;
} else {
*inputType = API_INPUT_LEGACY;
}
// adapt channel selection to input source
switch (inputSource) {
case AUDIO_SOURCE_VOICE_UPLINK:
channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
break;
case AUDIO_SOURCE_VOICE_DOWNLINK:
channelMask = AUDIO_CHANNEL_IN_VOICE_DNLINK;
break;
case AUDIO_SOURCE_VOICE_CALL:
channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK;
break;
default:
break;
}
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
*input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
ALOGV("SoundTrigger capture on session %d input %d", session, *input);
} else {
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
}
}
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
// samplingRate and flags may be updated by getInputProfile
uint32_t profileSamplingRate = samplingRate;
audio_format_t profileFormat = format;
audio_channel_mask_t profileChannelMask = channelMask;
audio_input_flags_t profileFlags = flags;
for (;;) {
/**
對當前確認好的設備id和配置地址flags進行功能檢查
*/
profile = getInputProfile(device, address,
profileSamplingRate, profileFormat, profileChannelMask,
profileFlags);
if (profile != 0) {
break; // success
} else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
ALOGW("getInputForAttr() could not find profile for device 0x%X, samplingRate %u,"
"format %#x, channelMask 0x%X, flags %#x",
device, samplingRate, format, channelMask, flags);
return BAD_VALUE;
}
}
if (profile->getModuleHandle() == 0) {
ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName());
return NO_INIT;
}
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = profileSamplingRate;
config.channel_mask = profileChannelMask;
config.format = profileFormat;
/**
mpClientInterface就是AudioFlinger的客戶端形式接口
*/
status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
input,
&config,
&device,
address,
halInputSource,
profileFlags);
// only accept input with the exact requested set of parameters
if (status != NO_ERROR || *input == AUDIO_IO_HANDLE_NONE ||
(profileSamplingRate != config.sample_rate) ||
(profileFormat != config.format) ||
(profileChannelMask != config.channel_mask)) {
ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d,"
" channelMask %x",
samplingRate, format, channelMask);
if (*input != AUDIO_IO_HANDLE_NONE) {
mpClientInterface->closeInput(*input);
}
return BAD_VALUE;
}
sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
inputDesc->mInputSource = inputSource;
inputDesc->mRefCount = 0;
inputDesc->mOpenRefCount = 1;
inputDesc->mSamplingRate = profileSamplingRate;
inputDesc->mFormat = profileFormat;
inputDesc->mChannelMask = profileChannelMask;
inputDesc->mDevice = device;
inputDesc->mSessions.add(session);
inputDesc->mIsSoundTrigger = isSoundTrigger;
inputDesc->mPolicyMix = policyMix;
ALOGV("getInputForAttr() returns input type = %d", *inputType);
addInput(*input, inputDesc);
mpClientInterface->onAudioPortListUpdate();
return NO_ERROR;
}
回到AudioFlinger
status_t AudioFlinger::openInput(audio_module_handle_t module,
audio_io_handle_t *input,
audio_config_t *config,
audio_devices_t *devices,
const String8& address,
audio_source_t source,
audio_input_flags_t flags)
{
Mutex::Autolock _l(mLock);
if (*devices == AUDIO_DEVICE_NONE) {
return BAD_VALUE;
}
sp<RecordThread> thread = openInput_l(module, input, config, *devices, address, source, flags);
if (thread != 0) {
// notify client processes of the new input creation
thread->ioConfigChanged(AUDIO_INPUT_OPENED);
return NO_ERROR;
}
return NO_INIT;
}
根據(jù)input艺糜, audio_config_t剧董,audio_devices_t, audio_input_flags_t破停,audio_source_t進行HAL打開輸入流audio_stream_in_t *inStream的同時創(chuàng)建一個RecordThread管理這個輸入流翅楼。
sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module,
audio_io_handle_t *input,
audio_config_t *config,
audio_devices_t devices,
const String8& address,
audio_source_t source,
audio_input_flags_t flags)
{
AudioHwDevice *inHwDev = findSuitableHwDev_l(module, devices);
.......
audio_hw_device_t *inHwHal = inHwDev->hwDevice();
audio_stream_in_t *inStream = NULL;
status_t status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
&inStream, flags, address.string(), source);
......
if (status == NO_ERROR && inStream != NULL) {
/**
tee_sink功能是官方開放用來攔截音頻流管道的原始PCM數(shù)據(jù),并且可以定義生成wav文件真慢。官方功能使用自行百度毅臊,如果單是PCM數(shù)據(jù)想要截取的話,還不如直接在/av/services/audioflinger/Threads.cpp改代碼RecordThread或PlaybackThread來read出PCM數(shù)據(jù)再存放到固定文件黑界。
*/
#ifdef TEE_SINK
// Try to re-use most recently used Pipe to archive a copy of input for dumpsys,
// or (re-)create if current Pipe is idle and does not match the new format
......
switch (kind) {
case TEE_SINK_NEW: {
Pipe *pipe = new Pipe(mTeeSinkInputFrames, format);
size_t numCounterOffers = 0;
const NBAIO_Format offers[1] = {format};
ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
/*
tee sink 功能需要的管道
*/
PipeReader *pipeReader = new PipeReader(*pipe);
numCounterOffers = 0;
index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
mRecordTeeSink = pipe;
mRecordTeeSource = pipeReader;
teeSink = pipe;
}
break;
case TEE_SINK_OLD:
teeSink = mRecordTeeSink;
break;
case TEE_SINK_NO:
default:
break;
}
#endif
/** AudioStreamIn單純一個結構體存放inHwDev和inStream的
*/
AudioStreamIn *inputStream = new AudioStreamIn(inHwDev, inStream);
// Start record thread
// RecordThread requires both input and output device indication to forward to audio
// pre processing modules
sp<RecordThread> thread = new RecordThread(this,
inputStream,
*input,
primaryOutputDevice_l(),
devices,
mSystemReady
#ifdef TEE_SINK
, teeSink
#endif
);
/* 把當前的拿到的RecordThread依賴audio_io_handle_t input存到鍵值表KeyedVector里管嬉,等到客戶端后續(xù)需要調(diào)用該線程做start,stop操作時朗鸠,可以憑自身的input在mRecordThreads取得蚯撩。
*/
mRecordThreads.add(*input, thread);
return thread;
}
}
RecordThread類在/av/services/audioflinger/Thread.cpp里面,順便一提播放音頻線程PlaybackThread類也在Thread.cpp烛占。
完成RecordThread錄音線程創(chuàng)建和HAL輸入流打開的后胎挎,
應用進程里面的Jni里的AudioRecord對象的初始化AudioSystem::getInputForAttr接口算是走完了。此時的AudioRecord初始化還并沒有結束忆家,還有
sp<IAudioRecord> record = audioFlinger->openRecord(input,mSampleRate,mFormat, mChannelMask,opPackageName,
&temp,&trackFlags, tid,mClientUid,&mSessionId, ¬ificationFrames,iMem,
bufferMem,&status);
IMemory iMem這個IInterface接口參數(shù)很重要犹菇。IAudioRecord是固定綁定上AudioFlinger操控錄音線程RecordThread的接口對象,流程去到
sp<IAudioRecord> AudioFlinger::openRecord(
audio_io_handle_t input,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
const String16& opPackageName,
size_t *frameCount,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
int clientUid,
int *sessionId,
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers,
status_t *status)
{
sp<RecordThread::RecordTrack> recordTrack;
sp<RecordHandle> recordHandle;
sp<Client> client;
status_t lStatus;
int lSessionId;
cblk.clear();
buffers.clear();
// 通過包名檢測權限"android.permission.RECORD_AUDIO"
if (!recordingAllowed(opPackageName)) {
ALOGE("openRecord() permission denied: recording not allowed");
lStatus = PERMISSION_DENIED;
goto Exit;
}
......
Mutex::Autolock _l(mLock);
RecordThread *thread = checkRecordThread_l(input);
......
pid_t pid = IPCThreadState::self()->getCallingPid();
client = registerPid(pid);
......
/*
recordTrack是錄音線程內(nèi)部類起核心作用的對象
*/
// TODO: the uid should be passed in as a parameter to openRecord
recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
frameCount, lSessionId, notificationFrames,
clientUid, flags, tid, &lStatus)芽卿;
......
/*
cblk是和共享內(nèi)存相關的引用
*/
cblk = recordTrack->getCblk();
buffers = recordTrack->getBuffers();
......
/**
recordHandle 繼承IAudioRecord揭芍,相當返回給應用調(diào)用端的客戶端一個binder控制RecordTrack接口
*/
// return handle to client
recordHandle = new RecordHandle(recordTrack);
return recordHandle;
}
上面代碼把重要部分保留了,其他省略卸例。checkRecordThread_l函數(shù)称杨,按前面說的會根據(jù)audio_io_handle_t input來從鍵值表里拿出對應的錄音線程RecordThread。RecordThread還要createRecordTrack_l 創(chuàng)建一個recordTrack筷转。RecordTrack錄音軌道里面又新創(chuàng)建了三個關鍵對象RecordBufferConverter 數(shù)據(jù)目標格式轉(zhuǎn)換類列另,AudioRecordServerProxy 被AudioFlinger使用的錄音服務代理和ResamplerBufferProvider看名字取義是重采樣數(shù)據(jù)提供者。
RecordTrack的初始化
mRecordBufferConverter = new RecordBufferConverter(
thread->mChannelMask, thread->mFormat, thread->mSampleRate,
channelMask, format, sampleRate);
mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
mFrameSize, !isExternalTrack());
mResamplerBufferProvider = new ResamplerBufferProvider(this);
為什么說關鍵旦装,mRecordBufferConverter有個轉(zhuǎn)換接口
convert(void *dst, AudioBufferProvider *provider, size_t frames)
在RecordThread的threadLoop里面進行對HAL流讀取到原始數(shù)據(jù)后,會通過mRecordBufferConverter將provider的 getNextBuffer 函數(shù)從RecordThread間接拿到原始數(shù)據(jù)轉(zhuǎn)換成對應目標格式后拷貝到void *dst對應指向的內(nèi)存摊滔。等下會介紹threadLoop函數(shù)的開始錄音到讀取原始數(shù)據(jù)和結束錄音的流程阴绢。而mServerProxy的唯一目的是創(chuàng)造一個循環(huán)形式使用的內(nèi)存buffer用于讀取或?qū)懭脘浺魯?shù)據(jù)店乐。
另外RecordTrack初始化時還為App客戶端AudioRecord初始化用于共享內(nèi)存的cblk和buffers申請了共享內(nèi)存堆。
cblk = recordTrack->getCblk();
buffers = recordTrack->getBuffers();
就是RecordTrack申請完畢后返回的IMemory 引用呻袭≌0耍客戶端會靠這個IMemory來read錄音數(shù)據(jù)。返回的RecordHandle對象裝載了recordTrack左电,不用想廉侧,這個只是集中外觀模式的處理來自客戶端的控制錄音功能。