上一篇對(duì)MediaPlayer底層播放器創(chuàng)建做了具體的分析。我們知道,Binder是C/S結(jié)構(gòu),MediaPlayer.cpp相當(dāng)于C白指,而MediaPlayerService則是S。MediaPlayerService運(yùn)行在MediaServer的子進(jìn)程中印蔗。我們先看它的啟動(dòng)過(guò)程扒最,MediaServer定義在main_mediaserver.cpp文件中。
//由init進(jìn)程啟動(dòng)
int main(int argc __unused, char** argv)
{
signal(SIGPIPE, SIG_IGN);
char value[PROPERTY_VALUE_MAX];
bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
pid_t childPid;
//主進(jìn)程
if (doLog && (childPid = fork()) != 0) {
strcpy(argv[0], "media.log");
//單例創(chuàng)建ProcessState實(shí)例华嘹,同時(shí)打開(kāi)Binder驅(qū)動(dòng)
sp<ProcessState> proc(ProcessState::self());
MediaLogService::instantiate();
ProcessState::self()->startThreadPool();
for (;;) {
siginfo_t info;
//等子進(jìn)程結(jié)束
int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
if (ret == EINTR) {
continue;
}
if (ret < 0) {
break;
}
......
}
} else {
//子進(jìn)程
if (doLog) {
prctl(PR_SET_PDEATHSIG, SIGKILL);
setpgid(0, 0);
}
//單例創(chuàng)建ProcessState實(shí)例吧趣,同時(shí)打開(kāi)Binder驅(qū)動(dòng)
sp<ProcessState> proc(ProcessState::self());
//獲取ServiceManger
sp<IServiceManager> sm = defaultServiceManager();
ALOGI("ServiceManager: %p", sm.get());
//初始化各種媒體服務(wù)
AudioFlinger::instantiate();
//初始化MediaPlayerService
MediaPlayerService::instantiate();
CameraService::instantiate();
AudioPolicyService::instantiate();
SoundTriggerHwService::instantiate();
registerExtensions();
//新建子線程讀寫
ProcessState::self()->startThreadPool();
//啟動(dòng)線程讀寫
IPCThreadState::self()->joinThreadPool();
}
}
MediaServer是由init.rc進(jìn)程啟動(dòng)的。MediaServer啟動(dòng)后fork了一個(gè)子進(jìn)程耙厚,在子進(jìn)程將各種多媒體進(jìn)行注冊(cè)强挫,其中就包括MediaPlayerService。
每個(gè)進(jìn)程只有一個(gè)ProcessState薛躬,顯然self()是一個(gè)單例函數(shù)俯渤,ProcessState構(gòu)建同時(shí)打開(kāi)了Binder驅(qū)動(dòng)。
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
//創(chuàng)建保存為全局變量
gProcess = new ProcessState;
return gProcess;
}
#define BINDER_VM_SIZE ((1*1024*1024) - (4096 *2))//默認(rèn)映射大小為1M-8k
ProcessState::ProcessState()
: mDriverFD(open_driver())//打開(kāi)Binder驅(qū)動(dòng)型宝,并將文件描述符保存在mDriverFD中
, mVMStart(MAP_FAILED)//映射地址初始化為MAP_FAILED
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
//Binder驅(qū)動(dòng)文件描述符
if (mDriverFD >= 0) {
#if !defined(HAVE_WIN32_IPC)
//在Binder驅(qū)動(dòng)中獲取一塊內(nèi)存做映射八匠,并返回映射地址
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
//失敗則關(guān)閉驅(qū)動(dòng)
if (mVMStart == MAP_FAILED) {
ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
}
#else
mDriverFD = -1;
#endif
}
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
ProcessState構(gòu)造函數(shù)調(diào)用了open_driver()打開(kāi)/dev/binder這個(gè)設(shè)備,并通過(guò)mmap進(jìn)行內(nèi)存映射诡曙,映射成功將返回內(nèi)存首地址臀叙,那么,從這塊內(nèi)存讀/寫數(shù)據(jù)价卤,也就是在Binder驅(qū)動(dòng)上進(jìn)行數(shù)據(jù)的讀/寫劝萤,無(wú)需進(jìn)行用戶和內(nèi)核態(tài)的切換。對(duì)于這個(gè)知識(shí)點(diǎn)的理解慎璧,可以看《Binder簡(jiǎn)介》中的示例床嫌。
static int open_driver()
{
//打開(kāi)binder驅(qū)動(dòng)節(jié)點(diǎn),返回文件描述符
int fd = open("/dev/binder", O_RDWR);
if (fd >= 0) {
fcntl(fd, F_SETFD, FD_CLOEXEC);
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
//默認(rèn)最大線程數(shù)為15
size_t maxThreads = 15;
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
ioctl向Binder驅(qū)動(dòng)指定了當(dāng)前的fd最大線程數(shù)為15胸私,返回fd厌处。到此,Binder驅(qū)動(dòng)已經(jīng)被打開(kāi)岁疼,并分配了一塊內(nèi)存來(lái)做進(jìn)程和驅(qū)動(dòng)之間數(shù)據(jù)的交換阔涉,由于ProcessState是進(jìn)程唯一的,因此捷绒,同一個(gè)進(jìn)程Binder驅(qū)動(dòng)只能被打開(kāi)一次瑰排,映射一次。
我們接著看defaultServiceManager函數(shù)暖侨,它返回IServiceManager對(duì)象椭住,我們知道,像MediaPlayerService等服務(wù)都是注冊(cè)在它里面的字逗,它的實(shí)現(xiàn)在IServiceManager.cpp中京郑。
sp<IServiceManager> defaultServiceManager()
{
//gDefaultServiceManager全局變量宅广,保存ServiceManager
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
while (gDefaultServiceManager == NULL) {
//通過(guò)ProcessState的getContextObject獲取ServiceManger
gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
if (gDefaultServiceManager == NULL)
sleep(1);
}
}
return gDefaultServiceManager;
}
IServiceManager也是一個(gè)單例,通過(guò)ProcessState的getContextObject來(lái)獲取些举。
class ProcessState : public virtual RefBase
{
......
private:
......
//entry結(jié)構(gòu)體
struct handle_entry {
IBinder* binder;
RefBase::weakref_type* refs;
};
handle_entry* lookupHandleLocked(int32_t handle);
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
//傳入了0
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
//根據(jù)索引跟狱,返回一個(gè)handle_entry
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
//新創(chuàng)建的資源項(xiàng),b為null
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
Parcel data;
//ping下是否成功
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
//創(chuàng)建BpBinder金拒,handle為0
b = new BpBinder(handle);
//賦值給handle_entry
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
//返回new BpBinder(0)
return result;
}
傳入索引為0的handle兽肤,返回的是Entry結(jié)構(gòu)體,它持有IBinder的地址绪抛。使用0索引,表示要獲取ServerManager在客戶端代理的IBinder电禀。如果IBinder為空幢码,則新建一個(gè)BpBinder對(duì)象。與之對(duì)應(yīng)的還有一個(gè)BBinder類尖飞,它們都從IBinder派生而來(lái)症副。
BpBinder是客戶端,用來(lái)與Server交互的代理類政基,p即Proxy贞铣。此時(shí)的MediaServer是相對(duì)ServerManager的客戶端,BpBinder使用transcat向服務(wù)端發(fā)起請(qǐng)求沮明。
BBinder是服務(wù)端辕坝,它由子類BnInterface繼承,onTranscat()用來(lái)響應(yīng)客戶端的請(qǐng)求荐健。
我們先看BpBinder的構(gòu)建酱畅。
BpBinder::BpBinder(int32_t handle)
: mHandle(handle)//此時(shí)是0
, mAlive(1)
, mObitsSent(0)
, mObituaries(NULL)
{
ALOGV("Creating BpBinder %p handle %d\n", this, mHandle);
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
IPCThreadState::self()->incWeakHandle(handle);
}
此時(shí)已經(jīng)獲取到BpBinder對(duì)象,也就是說(shuō)江场,
gDefaultServiceManager = interface_cast<IServiceManager>(ProcessState::self()->getContextObject(NULL));
相當(dāng)于
gDefaultServiceManager = interface_cast<IServiceManager>(new BpBinder(0));
interface_cast將BpBinder強(qiáng)制轉(zhuǎn)換成IServiceManager對(duì)象纺酸,interface_cast實(shí)現(xiàn)在IInterface.h中。
template<typename INTERFACE>
inline sp<INTERFACE> interface_cast(const sp<IBinder>& obj)
{
return INTERFACE::asInterface(obj);
}
這是一個(gè)泛型函數(shù)址否,即 INTERFACE::asInterface(obj)等價(jià)于IServiceManager::asInterface(obj);我們先看IServiceManager.h
class IServiceManager : public IInterface
{
public:
//很關(guān)鍵的宏定義
DECLARE_META_INTERFACE(ServiceManager);
//獲取服務(wù)
virtual sp<IBinder> getService( const String16& name) const = 0;
//檢查服務(wù)
virtual sp<IBinder> checkService( const String16& name) const = 0;
//注冊(cè)服務(wù)
virtual status_t addService( const String16& name,
const sp<IBinder>& service,
bool allowIsolated = false) = 0;
virtual Vector<String16> listServices() = 0;
enum {
GET_SERVICE_TRANSACTION = IBinder::FIRST_CALL_TRANSACTION,
CHECK_SERVICE_TRANSACTION,
ADD_SERVICE_TRANSACTION,
LIST_SERVICES_TRANSACTION,
};
};
DECLARE_META_INTERFACE宏只是做了定義餐蔬,它在IServiceManager.cpp中實(shí)現(xiàn)。
IMPLEMENT_META_INTERFACE(ServiceManager, "android.os.IServiceManager");
那么DECLARE_META_INTERFACE和IMPLEMENT_META_INTERFACE這兩個(gè)宏出自哪里呢佑附?它們?cè)贗Iinterface.h中樊诺。
#define DECLARE_META_INTERFACE(INTERFACE) \
static const android::String16 descriptor; \
static android::sp<I##INTERFACE> asInterface( \
const android::sp<android::IBinder>& obj); \
virtual const android::String16& getInterfaceDescriptor() const; \
I##INTERFACE(); \
virtual ~I##INTERFACE(); \
#define IMPLEMENT_META_INTERFACE(INTERFACE, NAME) \
const android::String16 I##INTERFACE::descriptor(NAME); \
const android::String16& \
I##INTERFACE::getInterfaceDescriptor() const { \
return I##INTERFACE::descriptor; \
} \
android::sp<I##INTERFACE> I##INTERFACE::asInterface( \
const android::sp<android::IBinder>& obj) \
{ \
android::sp<I##INTERFACE> intr; \
if (obj != NULL) { \
intr = static_cast<I##INTERFACE*>( \
obj->queryLocalInterface( \
I##INTERFACE::descriptor).get()); \
if (intr == NULL) { \
//返回BpServiceManager對(duì)象
intr = new Bp##INTERFACE(obj); \
} \
} \
return intr; \
} \
I##INTERFACE::I##INTERFACE() { } \
I##INTERFACE::~I##INTERFACE() { } \
根據(jù)模板特性,只需將INTERFACE替換為IServiceManager帮匾,NAME替換為"android.os.IServiceManager"啄骇,由此可知,通過(guò)interface_cast強(qiáng)轉(zhuǎn)后將獲得BpServiceManager對(duì)象瘟斜。
這里又再次出現(xiàn)BpXXX的類型缸夹,我們通過(guò)UML圖來(lái)展示IServiceManger相關(guān)類之間完整的繼承關(guān)系痪寻。
在創(chuàng)建BpServiceManager對(duì)象時(shí)傳入了一個(gè)obj,它就是BpBinder(0)虽惭,它將保存在父類BpRefBase的mRemote變量中橡类。
class BpServiceManager : public BpInterface<IServiceManager>
{
public:
//將BpBinder(0)傳給BpInterface
BpServiceManager(const sp<IBinder>& impl)
: BpInterface<IServiceManager>(impl)
{
}
//將BpBinder(0)傳給BpRefBase
template<typename INTERFACE>
inline BpInterface<INTERFACE>::BpInterface(const sp<IBinder>& remote)
: BpRefBase(remote)
{
}
//將BpBinder(0)賦值給mRemote
BpRefBase::BpRefBase(const sp<IBinder>& o)
: mRemote(o.get()), mRefs(NULL), mState(0)
{
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
if (mRemote) {
mRemote->incStrong(this);
mRefs = mRemote->createWeak(this);
}
}
到此,MediaServer的main函數(shù)芽唇,已將Binder 驅(qū)動(dòng)打開(kāi)顾画,并且通過(guò)defaultServiceManager函數(shù),就獲得一個(gè)BpServiceManager 對(duì)象匆笤,它的成員變量mRemote指向BpBinder(0)對(duì)象研侣。
我們回到MediaServer的main函數(shù)中,接著分析MediaServerService的注冊(cè)過(guò)程炮捧,看內(nèi)部函數(shù)是如何工作的庶诡。
//注冊(cè)MediaServerService
void MediaPlayerService::instantiate() {
//獲取到ServiceManager 對(duì)象,將服務(wù)加入
defaultServiceManager()->addService(String16("media.player"), new MediaPlayerService());
}
由上面分析咆课,addService在BpServiceManager 中末誓。
virtual status_t addService(const String16& name, const sp<IBinder>& service,
bool allowIsolated)
{
Parcel data, reply;//信息容器,存儲(chǔ)RPC數(shù)據(jù)
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());//"android.os.IServiceManager"
data.writeString16(name);//"media.player"
data.writeStrongBinder(service);//MediaPlayerService被序列化
data.writeInt32(allowIsolated ? 1 : 0);//默認(rèn)0
//發(fā)送ADD_SERVICE_TRANSACTION請(qǐng)求
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);//將服務(wù)添加到列表中
//接收添加后的應(yīng)答狀態(tài)
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
remote()返回mRemote书蚪,即BpBinder(0)對(duì)象喇澡。
class BpRefBase : public virtual RefBase
{
protected:
......
//返回mRemote
inline IBinder* remote() { return mRemote; }
inline IBinder* remote() const { return mRemote; }
也就是說(shuō),BpServiceManager只是將數(shù)據(jù)序列化成Parcel類型的RPC數(shù)據(jù)殊校,與Binder驅(qū)動(dòng)進(jìn)行通訊的是BpBinder晴玖,但它還要借助另外一個(gè)類,我們接著看transact函數(shù)箩艺。
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
if (mAlive) {
//通過(guò)IPCThreadState對(duì)象來(lái)傳輸
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
IPCThreadState在前面也出現(xiàn)過(guò)許多次窜醉,ProcessState是進(jìn)程獨(dú)有的,IPCThreadState則是線程獨(dú)有艺谆。前面分析過(guò)榨惰,ProcessState用來(lái)打開(kāi)Binder驅(qū)動(dòng),建立線程池静汤。IPCThreadState在構(gòu)造時(shí)就取得了ProcessState的引用琅催,并保存在mProcess中。
IPCThreadState* IPCThreadState::self()
{
//gHaveTLS首次為false虫给,TLS為Thread Local Storage縮寫
if (gHaveTLS) {
restart:
const pthread_key_t k = gTLS;
//獲取IPCThreadState
IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
if (st) return st;
//新建IPCThreadState
return new IPCThreadState;
}
if (gShutdown) return NULL;
//分配線程空間藤抡,
pthread_mutex_lock(&gTLSMutex);
if (!gHaveTLS) {
//傳出內(nèi)存地址gTLS
if (pthread_key_create(&gTLS, threadDestructor) != 0) {
pthread_mutex_unlock(&gTLSMutex);
return NULL;
}
//置為true
gHaveTLS = true;
}
pthread_mutex_unlock(&gTLSMutex);
//回到restart
goto restart;
}
IPCThreadState的self會(huì)先分配一塊本地線程獨(dú)有的內(nèi)存,并構(gòu)建IPCThreadState抹估,將其保存到內(nèi)存中缠黍。
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),//獲取ProcessState
mMyThreadId(androidGetTid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
//將自己保存到本地線程空間中
pthread_setspecific(gTLS, this);
clearCaller();
//接收命令的緩沖區(qū)Parcel
mIn.setDataCapacity(256);
//發(fā)送命令的緩沖區(qū)Parcel
mOut.setDataCapacity(256);
}
IPCThreadState構(gòu)造函數(shù)保存了ProcessState引用,將自己緩存到本地線程空間药蜻,并初始化了mIn和mOut大小瓷式,前者用來(lái)接收Binder驅(qū)動(dòng)傳過(guò)來(lái)的數(shù)據(jù)替饿,后者則往Binder驅(qū)動(dòng)發(fā)數(shù)據(jù)。
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
......
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
//發(fā)送數(shù)據(jù)贸典,handle為0视卢,code為ADD_SERVICE_TRANSACTION,data包含MediaPlayerService
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
......
if (reply) {
//等待數(shù)據(jù)響應(yīng)
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
......
return err;
}
writeTransactionData的第一個(gè)參數(shù)廊驼,BC_TRANSACTION是Binder定義的協(xié)議命令据过,以BC_開(kāi)頭表示IPC層向Binder發(fā)送數(shù)據(jù),BR_開(kāi)頭則相反妒挎。
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
//和binder驅(qū)動(dòng)通信的數(shù)據(jù)結(jié)構(gòu)
binder_transaction_data tr;
tr.target.ptr = 0;
tr.target.handle = handle; //target.handle賦值為0
tr.code = code; //ADD_SERVICE_TRANSACTION
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
} else if (statusBuffer) {
tr.flags |= TF_STATUS_CODE;
*statusBuffer = err;
tr.data_size = sizeof(status_t);
tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
tr.offsets_size = 0;
tr.data.ptr.offsets = 0;
} else {
return (mLastError = err);
}
//將指令BC_TRANSACTION和封裝的數(shù)據(jù)寫入mOut緩沖區(qū)
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
此函數(shù)只是將數(shù)據(jù)寫入線程IPCThreadState的mOut緩沖區(qū)中绳锅,并沒(méi)有傳給Binder驅(qū)動(dòng)。我們接著看waitForResponse函數(shù)饥漫。
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
int32_t cmd;
int32_t err;
while (1) {
//talkWithDriver和驅(qū)動(dòng)交互
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
//讀取響應(yīng)指令判斷處理
cmd = mIn.readInt32();
switch (cmd) {
//讀取完成
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
......
//從mIn讀取數(shù)據(jù)
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
//處理響應(yīng)數(shù)據(jù)
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
看到關(guān)鍵函數(shù)talkWithDriver榨呆,看名字顯然很關(guān)鍵。
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
//驅(qū)動(dòng)文件描述符正常
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
//用來(lái)給驅(qū)動(dòng)寫數(shù)據(jù)庸队,和讀驅(qū)動(dòng)數(shù)據(jù)的結(jié)構(gòu)體
binder_write_read bwr:
//是否需接收數(shù)據(jù)
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
//定義寫數(shù)據(jù)的緩沖區(qū)大小
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
//指向mOut緩沖區(qū)
bwr.write_buffer = (uintptr_t)mOut.data();
//定義讀數(shù)據(jù)的緩沖區(qū)大小
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
//指向mIn緩沖區(qū)
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
//傳輸和讀取的緩沖區(qū)大小都為0
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
//通過(guò)ioctl循環(huán)讀寫,直到EINTR退出
do {
#if defined(HAVE_ANDROID_OS)
//向Binder寫數(shù)據(jù)闯割,同時(shí)將響應(yīng)數(shù)據(jù)讀到mIn緩沖區(qū)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
.....
return err;
}
talkWithDriver彻消,才是用于向Binder驅(qū)動(dòng)寫數(shù)據(jù),同時(shí)將響應(yīng)數(shù)據(jù)讀到mIn緩沖區(qū)中的函數(shù)宙拉,那么讀取到數(shù)據(jù)后將如何處理宾尚?看executeCommand。
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch (cmd) {
case BR_ERROR:
result = mIn.readInt32();
break;
......
case BR_TRANSACTION:
{
binder_transaction_data tr;
//讀取結(jié)果
result = mIn.read(&tr, sizeof(tr));
if (result != NO_ERROR) break;
//構(gòu)建Parcel 來(lái)接數(shù)據(jù)
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
int curPrio = getpriority(PRIO_PROCESS, mMyThreadId);
if (gDisableBackgroundScheduling) {
//默認(rèn)ANDROID_PRIORITY_NORMAL優(yōu)先
if (curPrio > ANDROID_PRIORITY_NORMAL) {
setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL);
}
} else {
//后臺(tái)
if (curPrio >= ANDROID_PRIORITY_BACKGROUND) {
set_sched_policy(mMyThreadId, SP_BACKGROUND);
}
}
//
Parcel reply;
status_t error;
if (tr.target.ptr) {
//強(qiáng)轉(zhuǎn)cookie為BBinder谢澈,實(shí)際為BnServiceManager
sp<BBinder> b((BBinder*)tr.cookie);
error = b->transact(tr.code, buffer, &reply, tr.flags);
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
}
break;
// Binder 驅(qū)動(dòng)傳來(lái)Service死亡消息
case BR_DEAD_BINDER:
{
BpBinder *proxy = (BpBinder*)mIn.readPointer();
proxy->sendObituary();
mOut.writeInt32(BC_DEAD_BINDER_DONE);
mOut.writePointer((uintptr_t)proxy);
} break;
......
//驅(qū)動(dòng)指示要?jiǎng)?chuàng)建新的線程煌贴,用于和Binder通訊
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
default:
printf("*** BAD COMMAND %d received from Binder driver\n", cmd);
result = UNKNOWN_ERROR;
break;
}
}
到此,MediaPlayerService就將被添加到ServiceManager中锥忿。再回到MediaServer的main函數(shù)牛郑,在最后啟動(dòng)線程循環(huán)talkWithDriver等待客戶端的請(qǐng)求和處理Binder回傳的數(shù)據(jù)。
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
//生成線程
spawnPooledThread(true);
}
}
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
//創(chuàng)建線程
sp<Thread> t = new PoolThread(isMain);
//啟動(dòng)
t->run(name.string());
}
}
PoolThread實(shí)際只是一個(gè)Thread敬鬓,定義在ProcessState中淹朋。
class PoolThread : public Thread
{
public:
PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()
{
//開(kāi)啟循環(huán)讀取
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
run方法被執(zhí)行后,會(huì)進(jìn)入while循環(huán)钉答,回調(diào)threadLoop函數(shù)础芍,返回false即跳出循環(huán)。調(diào)用IPCThreadState的joinThreadPool数尿。
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
//發(fā)送命令讀取請(qǐng)求數(shù)據(jù)
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
//處理消息
result = executeCommand(cmd);
set_sched_policy(mMyThreadId, SP_FOREGROUND);
}
return result;
}
void IPCThreadState::processPendingDerefs()
{
if (mIn.dataPosition() >= mIn.dataSize()) {
size_t numPending = mPendingWeakDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
RefBase::weakref_type* refs = mPendingWeakDerefs[i];
refs->decWeak(mProcess.get());
}
mPendingWeakDerefs.clear();
}
//處理已經(jīng)死亡的BBinder對(duì)象
numPending = mPendingStrongDerefs.size();
if (numPending > 0) {
for (size_t i = 0; i < numPending; i++) {
BBinder* obj = mPendingStrongDerefs[i];
obj->decStrong(mProcess.get());
}
mPendingStrongDerefs.clear();
}
}
}
void IPCThreadState::joinThreadPool(bool isMain)
{
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
set_sched_policy(mMyThreadId, SP_FOREGROUND);
//開(kāi)始循環(huán)讀寫
status_t result;
do {
//讀寫數(shù)據(jù)
processPendingDerefs();
//處理返回結(jié)果
result = getAndExecuteCommand();
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
abort();
}
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
//已退出循環(huán)
mOut.writeInt32(BC_EXIT_LOOPER);
//不再接收數(shù)據(jù)
talkWithDriver(false);
}
MediaServer的子進(jìn)程同時(shí)運(yùn)行了5個(gè)Service仑性,但只啟動(dòng)了2個(gè)線程來(lái)讀寫B(tài)inder驅(qū)動(dòng):一個(gè)是通過(guò)startThreadPool新建一個(gè)線程來(lái)讀寫,一個(gè)是在主線程中直接執(zhí)行joinThreadPool進(jìn)行讀寫右蹦。
那么诊杆,BpServiceManager通過(guò)Binder驅(qū)動(dòng)歼捐,申請(qǐng)將MediaPlayerService注冊(cè)在ServiceManager中的消息,將由誰(shuí)來(lái)處理呢刽辙?源碼中并沒(méi)有BnServiceManager這個(gè)類窥岩,它是由service_manager.c來(lái)實(shí)現(xiàn)的。
init進(jìn)程是系統(tǒng)啟動(dòng)的第一個(gè)用戶級(jí)進(jìn)程宰缤。init進(jìn)程啟動(dòng)后就會(huì)啟動(dòng)ServiceManager和MediaServer颂翼,以下為init.rc腳本。
//啟動(dòng)ServiceManager
service servicemanager /system/bin/servicemanager
user system
critical
//啟動(dòng)zygote
onrestart restart zygote
//啟動(dòng)MediaServer
onrestart restart media
也就是init進(jìn)程會(huì)調(diào)用ServiceManager的main函數(shù)慨灭。
int main(int argc, char **argv)
{
//記錄狀態(tài)
struct binder_state *bs;
//打開(kāi)驅(qū)動(dòng)
bs = binder_open(128*1024);
if (!bs) {
ALOGE("failed to open binder driver\n");
return -1;
}
//記錄當(dāng)前進(jìn)程為ServiceManager
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
//linux是否啟動(dòng)
selinux_enabled = is_selinux_enabled();
sehandle = selinux_android_service_context_handle();
if (selinux_enabled > 0) {
if (sehandle == NULL) {
ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");
abort();
}
if (getcon(&service_manager_context) != 0) {
ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");
abort();
}
}
union selinux_callback cb;
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
//這里的svcmgr_handle是變量朦乏,
//BINDER_SERVICE_MANAGER為0
svcmgr_handle = BINDER_SERVICE_MANAGER;
//這里的svcmgr_handle是函數(shù)指針
//循環(huán)處理客戶端發(fā)送過(guò)來(lái)的請(qǐng)求
binder_loop(bs, svcmgr_handler);
return 0;
}
Binder 驅(qū)動(dòng)的打開(kāi)函數(shù)實(shí)現(xiàn)在binder.c中。
struct binder_state *binder_open(size_t mapsize)
{
//存儲(chǔ)結(jié)構(gòu)體
struct binder_state *bs;
struct binder_version vers;
//分配空間
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
//打開(kāi)驅(qū)動(dòng)氧骤,將fd存到bs中
bs->fd = open("/dev/binder", O_RDWR);
if (bs->fd < 0) {
fprintf(stderr,"binder: cannot open device (%s)\n",
strerror(errno));
goto fail_open;
}
//獲取版本
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
fprintf(stderr, "binder: driver version differs from user space\n");
goto fail_open;
}
//上面?zhèn)魅氪笮?28x1024
bs->mapsize = mapsize;
//內(nèi)存映射
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
if (bs->mapped == MAP_FAILED) {
fprintf(stderr,"binder: cannot map device (%s)\n",
strerror(errno));
goto fail_map;
}
//返回bs
return bs;
//失敗操作
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
打開(kāi)驅(qū)動(dòng)的流程和ProcessState中一樣呻疹,這里不再贅述。接著binder_become_context_manager如何成為獨(dú)一無(wú)二的manager筹陵。
int binder_become_context_manager(struct binder_state *bs)
{
//設(shè)置handle為0
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
BINDER_SET_CONTEXT_MGR指令告知Binder驅(qū)動(dòng)刽锤,當(dāng)前進(jìn)程為ServiceManager,并且通過(guò)傳遞0來(lái)做標(biāo)識(shí)朦佩,由于其他Binder實(shí)體在Binder驅(qū)動(dòng)中對(duì)應(yīng)的handle都是大于0的并思,以此保證ServiceManager唯一,而其它用戶進(jìn)程只需使用handle為0的索引语稠,便可以通過(guò)Binder驅(qū)動(dòng)訪問(wèn)ServiceManager宋彼。
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
//寫入BC_ENTER_LOOPER循環(huán)指令
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
//循環(huán)讀取數(shù)據(jù)
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
//通過(guò)func來(lái)處理發(fā)送過(guò)來(lái)的請(qǐng)求
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
循環(huán)讀取發(fā)送過(guò)來(lái)的請(qǐng)求,并通過(guò)func來(lái)處理仙畦,這個(gè)函數(shù)指針是上面?zhèn)鬟f進(jìn)來(lái)的svcmgr_handle输涕。
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
//target.handle是否為BINDER_SERVICE_MANAGER,即0
if (txn->target.handle != svcmgr_handle)
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
//根據(jù)msg獲取服務(wù)名
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
if ((len != (sizeof(svcmgr_id) / 2)) ||
memcmp(svcmgr_id, s, sizeof(svcmgr_id))) {
fprintf(stderr,"invalid id %s\n", str8(s, len));
return -1;
}
if (sehandle && selinux_status_updated() > 0) {
struct selabel_handle *tmp_sehandle = selinux_android_service_context_handle();
if (tmp_sehandle) {
selabel_close(sehandle);
sehandle = tmp_sehandle;
}
}
switch(txn->code) {
//獲取某個(gè)Service慨畸,通過(guò)msg來(lái)取
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
//s為對(duì)應(yīng)Service名字
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
//對(duì)應(yīng)addService請(qǐng)求
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
//獲取handle
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
//注冊(cè)服務(wù)
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
//掃描已注冊(cè)的所有Service的名字
case SVC_MGR_LIST_SERVICES: {
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
也就是說(shuō)莱坎,defaultServiceManager()->addService(String16("media.player"), new MediaPlayerService());的調(diào)用,最終會(huì)進(jìn)入到SVC_MGR_ADD_SERVICE這個(gè)case先口,接著分析do_add_service看具體注冊(cè)流程型奥。
int do_add_service(struct binder_state *bs,
const uint16_t *s, size_t len,
uint32_t handle, uid_t uid, int allow_isolated,
pid_t spid)
{
//單鏈表
struct svcinfo *si;
if (!handle || (len == 0) || (len > 127))
return -1;
//檢查申請(qǐng)的服務(wù)是否有權(quán)限注冊(cè)
if (!svc_can_register(s, len, spid)) {
return -1;
}
//服務(wù)已注冊(cè)
si = find_svc(s, len);
if (si) {
if (si->handle) {
svcinfo_death(bs, si);
}
si->handle = handle;
} else {
//服務(wù)未注冊(cè),為svcinfo 分配內(nèi)存
si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
if (!si) {
return -1;
}
si->handle = handle;
//賦值服務(wù)名
si->len = len;
memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
si->name[len] = '\0';
//服務(wù)退出回調(diào)函數(shù)
si->death.func = (void*) svcinfo_death;
si->death.ptr = si;
si->allow_isolated = allow_isolated;
//頭插法
si->next = svclist;
svclist = si;
}
binder_acquire(bs, handle);
//接受到某個(gè)Service死亡后碉京,做清理工作
binder_link_to_death(bs, handle, &si->death);
return 0;
}
ServiceManager中維護(hù)了一條鏈表厢汹,每個(gè)服務(wù)都被封裝成一個(gè)svcinfo節(jié)點(diǎn),并使用頭插法的方式添加到現(xiàn)有的鏈表中谐宙。
到此烫葬,MediaServer怎樣通過(guò)Binder驅(qū)動(dòng),將MediaPlayerService注冊(cè)到ServiceManager中的整個(gè)流程就講解完畢。
現(xiàn)在ServiceManager中已經(jīng)注冊(cè)了我們需要的MediaPlayerService服務(wù)了搭综,而MediaPlayer是怎樣從ServiceManager中獲取MediaPlayerService并開(kāi)啟播放的垢箕?將留到下一篇分析。