Binder是Android中跨進(jìn)程通信的一種機(jī)制株憾,Binder機(jī)制的優(yōu)點(diǎn)有:
1.高效
Binder數(shù)據(jù)拷貝只需要一次,而管道晒衩、消息隊(duì)列嗤瞎、Socket都需要2次
通過(guò)驅(qū)動(dòng)在內(nèi)核空間拷貝數(shù)據(jù),不需要額外的同步處理
2.安全性高
Binder 機(jī)制為每個(gè)進(jìn)程分配了 UID/PID 來(lái)作為鑒別身份的標(biāo)識(shí)听系,并且在 Binder 通信時(shí)會(huì)根據(jù) UID/PID 進(jìn)行有效性檢測(cè)
3.使用簡(jiǎn)單
采用Client/Server 架構(gòu)
實(shí)現(xiàn) 面向?qū)ο?的調(diào)用方式贝奇,即在使用Binder時(shí)就和調(diào)用一個(gè)本地對(duì)象實(shí)例一樣
我們通過(guò)代碼跟蹤一下Binder的實(shí)現(xiàn)。之前在Handler的分析中提到了ActivityThread的main方法靠胜,在main方法中一個(gè)重要的動(dòng)作是將應(yīng)用進(jìn)程attach到AMS掉瞳,attach的部分代碼:
RuntimeInit.setApplicationObject(mAppThread.asBinder());
final IActivityManager mgr = ActivityManagerNative.getDefault();
try {
mgr.attachApplication(mAppThread);
} catch (RemoteException ex) {
throw ex.rethrowFromSystemServer();
}
final ApplicationThread mAppThread = new ApplicationThread();
private class ApplicationThread extends ApplicationThreadNative {}
public abstract class ApplicationThreadNative extends Binder
implements IApplicationThread {}
IApplicationThread定義了Application的能力, 部分代碼:
void bindApplication(String packageName, ApplicationInfo info, List<ProviderInfo> providers,
ComponentName testName, ProfilerInfo profilerInfo, Bundle testArguments,
IInstrumentationWatcher testWatcher, IUiAutomationConnection uiAutomationConnection,
int debugMode, boolean enableBinderTracking, boolean trackAllocation,
boolean restrictedBackupMode, boolean persistent, Configuration config,
CompatibilityInfo compatInfo, Map<String, IBinder> services, Bundle coreSettings)
throws RemoteException;
void scheduleLaunchActivity(Intent intent, IBinder token, int ident,
ActivityInfo info, Configuration curConfig, Configuration overrideConfig,
CompatibilityInfo compatInfo, String referrer, IVoiceInteractor voiceInteractor,
int procState, Bundle state, PersistableBundle persistentState,
List<ResultInfo> pendingResults, List<ReferrerIntent> pendingNewIntents,
boolean notResumed, boolean isForward, ProfilerInfo profilerInfo) throws RemoteException;
在應(yīng)用進(jìn)程和AMS進(jìn)程通過(guò)Binder交互的過(guò)程中,AMS會(huì)通過(guò)Binder調(diào)用ApplicationThread的一些方法浪漠,這時(shí)陕习,ApplicationThread相當(dāng)于是Server端,AMS相當(dāng)于Client端址愿。而應(yīng)用進(jìn)程通過(guò)binder調(diào)用ams的方法時(shí)该镣,應(yīng)用進(jìn)程是Client端,AMS是Server端响谓。
ActivityManagerNative.getDefault()會(huì)得到單例的ActivityManager的Proxy损合。
IBinder b = ServiceManager.getService("activity");//這里應(yīng)用進(jìn)程首先和ServiceManager的Server端交互,獲取到ActivityManager的Binder的Proxy
IActivityManager am = asInterface(b);
return am;
我們先來(lái)看下IBinder b = ServiceManager.getService("activity");
這基本上相當(dāng)于
ServiceManagerNative.asInterface(BinderInternal.getContextObject()).getService("activity");
BinderInternal.java
public static final native IBinder getContextObject();//這是一個(gè)native方法
android_util_Binder.cpp
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz){
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
return javaObjectForIBinder(env, b);
}
ProcessState.cpp
sp<ProcessState> ProcessState::self(){
Mutex::Autolock _l(gProcessMutex);
if (gProcess != NULL) {
return gProcess;
}
gProcess = new ProcessState;
return gProcess;
}//單例模式
ProcessState::ProcessState()
: mDriverFD(open_driver())//調(diào)用了open_binder函數(shù)
, mVMStart(MAP_FAILED)
, mThreadCountLock(PTHREAD_MUTEX_INITIALIZER)
, mThreadCountDecrement(PTHREAD_COND_INITIALIZER)
, mExecutingThreadsCount(0)
, mMaxThreads(DEFAULT_MAX_BINDER_THREADS)
, mStarvationStartTimeMs(0)
, mManagesContexts(false)
, mBinderContextCheckFunc(NULL)
, mBinderContextUserData(NULL)
, mThreadPoolStarted(false)
, mThreadPoolSeq(1)
{
if (mDriverFD >= 0) {
// mmap the binder, providing a chunk of virtual address space to receive transactions.
mVMStart = mmap(0, BINDER_VM_SIZE, PROT_READ, MAP_PRIVATE | MAP_NORESERVE, mDriverFD, 0);
if (mVMStart == MAP_FAILED) {
// *sigh*
ALOGE("Using /dev/binder failed: unable to mmap transaction memory.\n");
close(mDriverFD);
mDriverFD = -1;
}
}//mmap函數(shù)將binder設(shè)備節(jié)點(diǎn)映射到進(jìn)程的地址空間
LOG_ALWAYS_FATAL_IF(mDriverFD < 0, "Binder driver could not be opened. Terminating.");
}
static int open_driver()
{
int fd = open("/dev/binder", O_RDWR | O_CLOEXEC);//打開(kāi)binder娘纷,類似于打開(kāi)文件
if (fd >= 0) {
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol does not match user space protocol!");
close(fd);
fd = -1;
}
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;//設(shè)置最大binder線程數(shù)塌忽,15
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '/dev/binder' failed: %s\n", strerror(errno));
}
return fd;
}
與binder驅(qū)動(dòng)設(shè)備的交互是通過(guò)ioctl函數(shù)來(lái)完成,暫時(shí)我們只需要知道失驶,ioctl可以給binder驅(qū)動(dòng)發(fā)送命令土居,并且可以帶一定的參數(shù)。
回到sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)//這里傳進(jìn)來(lái)的是NULL
{
return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)//這里傳進(jìn)來(lái)的是0
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);//在mHandleToObject中查找handle_entry
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
if (handle == 0) {
//如果handle是0,先確保對(duì)端可用
Parcel data;
status_t status = IPCThreadState::self()->transact(
0, IBinder::PING_TRANSACTION, data, NULL, 0);
if (status == DEAD_OBJECT)
return NULL;
}
b = new BpBinder(handle);//這里new了一個(gè)BpBinder對(duì)象擦耀,并把handle傳給BpBinder
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
Vector<handle_entry>mHandleToObject;
ProcessState::handle_entry* ProcessState::lookupHandleLocked(int32_t handle)
{
const size_t N=mHandleToObject.size();
if (N <= (size_t)handle) {
handle_entry e;
e.binder = NULL;
e.refs = NULL;
status_t err = mHandleToObject.insertAt(e, N, handle+1-N);
if (err < NO_ERROR) return NULL;
}
return &mHandleToObject.editItemAt(handle);
}
所以sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
相當(dāng)于sp<IBinder> b = new BpBinder(0);
BpBinder.cpp
BpBinder::BpBinder(int32_t handle)
: mHandle(handle)//成員變量mHandle保存handle
, mAlive(1)
, mObitsSent(0)
, mObituaries(NULL)
{
ALOGV("Creating BpBinder %p handle %d\n", this, mHandle);
extendObjectLifetime(OBJECT_LIFETIME_WEAK);
IPCThreadState::self()->incWeakHandle(handle);//這里調(diào)用了IPCThreadState
}
IPCThreadState同樣也是單例模式棉圈,我們直接看它的構(gòu)造方法
IPCThreadState.cpp
IPCThreadState::IPCThreadState()
: mProcess(ProcessState::self()),
mMyThreadId(gettid()),
mStrictModePolicy(0),
mLastTransactionBinderFlags(0)
{
pthread_setspecific(gTLS, this);
clearCaller();
mIn.setDataCapacity(256);
mOut.setDataCapacity(256);
}
Parcel mIn;
Parcel mOut;
void IPCThreadState::incWeakHandle(int32_t handle)//傳進(jìn)來(lái)的是0
{
LOG_REMOTEREFS("IPCThreadState::incWeakHandle(%d)\n", handle);
mOut.writeInt32(BC_INCREFS);
mOut.writeInt32(handle);
}
Parcel可以理解為一個(gè)數(shù)據(jù)包。
我們?cè)倩氐?em>android_util_Binder.cpp
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz){
sp<IBinder> b = ProcessState::self()->getContextObject(NULL);//b是一個(gè)BpBinder眷蜓,mHandle為0
return javaObjectForIBinder(env, b);
}
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
if (val == NULL) return NULL;
AutoMutex _l(mProxyLock);
//忽略部分代碼
jobject object = (jobject)val->findObject(&gBinderProxyOffsets);//第一次找不到
if (object != NULL) {
jobject res = jniGetReferent(env, object);
if (res != NULL) {
ALOGV("objectForBinder %p: found existing %p!\n", val.get(), res);
return res;
}
LOGDEATH("Proxy object %p of IBinder %p no longer in working set!!!", object, val.get());
android_atomic_dec(&gNumProxyRefs);
val->detachObject(&gBinderProxyOffsets);
env->DeleteGlobalRef(object);
}
object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
//new一個(gè)BinderProxy對(duì)象分瘾,是一個(gè)java對(duì)象
if (object != NULL) {
LOGDEATH("objectForBinder %p: created new proxy %p !\n", val.get(), object);
// The proxy holds a reference to the native object.
env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
//將BpBinder指針以java long的形式保存在BinderProxy中
val->incStrong((void*)javaObjectForIBinder);
// The native object needs to hold a weak reference back to the
// proxy, so we can retrieve the same proxy if it is still active.
jobject refObject = env->NewGlobalRef(
env->GetObjectField(object, gBinderProxyOffsets.mSelf));
// BinderProxy() {
// mSelf = new WeakReference(this);
// }
val->attachObject(&gBinderProxyOffsets, refObject,
jnienv_to_javavm(env), proxy_cleanup);
// Also remember the death recipients registered on this proxy
sp<DeathRecipientList> drl = new DeathRecipientList;
drl->incStrong((void*)javaObjectForIBinder);
env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get()));
// Note that a new object reference has been created.
android_atomic_inc(&gNumProxyRefs);
incRefsCreated(env);
}
return object;
}
const char* const kBinderProxyPathName = "android/os/BinderProxy";
static int int_register_android_os_BinderProxy(JNIEnv* env)
{
jclass clazz = FindClassOrDie(env, "java/lang/Error");
gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
clazz = FindClassOrDie(env, kBinderProxyPathName);
gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "()V");
gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz, "sendDeathNotice",
"(Landroid/os/IBinder$DeathRecipient;)V");
gBinderProxyOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
gBinderProxyOffsets.mSelf = GetFieldIDOrDie(env, clazz, "mSelf",
"Ljava/lang/ref/WeakReference;");
gBinderProxyOffsets.mOrgue = GetFieldIDOrDie(env, clazz, "mOrgue", "J");
clazz = FindClassOrDie(env, "java/lang/Class");
gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "()Ljava/lang/String;");
return RegisterMethodsOrDie(
env, kBinderProxyPathName,
gBinderProxyMethods, NELEM(gBinderProxyMethods));
}
我們?cè)倩氐?/p>
ServiceManagerNative.asInterface(BinderInternal.getContextObject()).getService("activity");
BinderInternal.getContextObject()返回了一個(gè)BinderProxy對(duì)象。
static public IServiceManager asInterface(IBinder obj)//傳進(jìn)來(lái)的是BinderProxy
{
if (obj == null) {
return null;
}
IServiceManager in =
(IServiceManager)obj.queryLocalInterface(descriptor);//這里返回了null
if (in != null) {
return in;
}
return new ServiceManagerProxy(obj);//new一個(gè)ServiceManagerProxy對(duì)象
}
public ServiceManagerProxy(IBinder remote) {
mRemote = remote;//這里保存了BinderProxy對(duì)象
}
接下來(lái)調(diào)用ServiceManagerProxy的getService方法吁系。
public IBinder getService(String name) throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
//static final String descriptor = "android.os.IServiceManager";
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
//調(diào)用BinderProxy的transact方法
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
到目前為止德召,我們還沒(méi)有跨進(jìn)程,獲取到的BinderProxy對(duì)象汽纤,在native層對(duì)應(yīng)的是mHandle為0的一個(gè)BpBinder上岗,也是在本進(jìn)程中通過(guò)IPCThreadState的方法得到的。ServiceManagerProxy即是與ServiceManager交互中的Client端代理蕴坪,也就是說(shuō)在Client端調(diào)用ServiceManagerProxy的方法肴掷,系統(tǒng)通過(guò)Binder機(jī)制會(huì)去調(diào)用Server端的相對(duì)應(yīng)的方法完成實(shí)際的邏輯,而對(duì)于Client端進(jìn)程背传,Server端就是透明的呆瞻,Client端并不知道有Server端的存在。
BinderProxy.java
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
return transactNative(code, data, reply, flags);
}
android_util_Binder.cpp
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
jint code, jobject dataObj, jobject replyObj, jint flags) // throws RemoteException
{
Parcel* data = parcelForJavaObject(env, dataObj);
if (data == NULL) {
return JNI_FALSE;
}
Parcel* reply = parcelForJavaObject(env, replyObj);
if (reply == NULL && replyObj != NULL) {
return JNI_FALSE;
}
IBinder* target = (IBinder*)
env->GetLongField(obj, gBinderProxyOffsets.mObject);
//這里得到的即是BpBinder径玖,mHandle為0
if (target == NULL) {
jniThrowException(env, "java/lang/IllegalStateException", "Binder has been finalized!");
return JNI_FALSE;
}
status_t err = target->transact(code, *data, reply, flags);
//調(diào)用BpBinder的transact方法
if (err == NO_ERROR) {
return JNI_TRUE;
} else if (err == UNKNOWN_TRANSACTION) {
return JNI_FALSE;
}
signalExceptionForError(env, obj, err, true /*canThrowRemoteException*/, data->dataSize());
return JNI_FALSE;
}
BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags);
//調(diào)用IPCThreadState的transact方法痴脾,mHandle為0,flags為0,code 0x00000001
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if ((flags & TF_ONE_WAY) == 0) {//如果沒(méi)有設(shè)置one_way
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {//如果設(shè)置了one_way,不需要回復(fù)
err = waitForResponse(NULL, NULL);
}
return err;
}
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();//保存實(shí)際的data
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
}
mOut.writeInt32(cmd);
mOut.write(&tr, sizeof(tr));
return NO_ERROR;
}
struct binder_transaction_data {
union {
__u32 handle;
binder_uintptr_t ptr;
} target;//target記錄handle信息
binder_uintptr_t cookie;
__u32 code;
__u32 flags;
pid_t sender_pid;
uid_t sender_euid;
binder_size_t data_size;
binder_size_t offsets_size;
union {
struct {
binder_uintptr_t buffer;
binder_uintptr_t offsets;
} ptr;
__u8 buf[8];
} data;
};
writeTransactionData就是把數(shù)據(jù)打包到mOut梳星,接下來(lái)會(huì)調(diào)用到waitForResponse
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)//默認(rèn)acquireResult=NULL
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();//從mIn中拿到Server回復(fù)的命令
switch (cmd) {
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
case BR_DEAD_REPLY:
err = DEAD_OBJECT;
goto finish;
case BR_FAILED_REPLY:
err = FAILED_TRANSACTION;
goto finish;
case BR_ACQUIRE_RESULT:
{
ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
const int32_t result = mIn.readInt32();
if (!acquireResult) continue;
*acquireResult = result ? NO_ERROR : INVALID_OPERATION;
}
goto finish;
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
struct binder_write_read {
binder_size_t write_size;
binder_size_t write_consumed;
binder_uintptr_t write_buffer;
binder_size_t read_size;
binder_size_t read_consumed;
binder_uintptr_t read_buffer;
};
status_t IPCThreadState::talkWithDriver(bool doReceive)//默認(rèn)doReceive=true
{
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(__ANDROID__)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
//通過(guò)ioctl和binder驅(qū)動(dòng)交互赞赖,從binder驅(qū)動(dòng)讀寫數(shù)據(jù)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
} while (err == -EINTR);
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
return NO_ERROR;
}
return err;
}
這里talkWithDriver通過(guò)ioctl與binder驅(qū)動(dòng)交互,就實(shí)現(xiàn)了跨進(jìn)程通信丰泊,即Client端進(jìn)程通過(guò)ioctl將數(shù)據(jù)寫入binder薯定,這時(shí)始绍,Client端進(jìn)程掛起瞳购,在Server端,binder循環(huán)中亏推,會(huì)讀取到這些數(shù)據(jù)学赛,并解析讀到的命令,執(zhí)行相關(guān)邏輯吞杭,然后回寫數(shù)據(jù)到binder驅(qū)動(dòng)盏浇,Client端恢復(fù),并讀出數(shù)據(jù)芽狗,讀到的數(shù)據(jù)存放在mIn中绢掰。
我們先來(lái)看一下ServiceManager的Server端,
service_manager.c
int main()
{
struct binder_state *bs;
bs = binder_open(128*1024);
if (!bs) {
ALOGE("failed to open binder driver\n");
return -1;
}
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
//忽略部分代碼
binder_loop(bs, svcmgr_handler);
return 0;
}
binder.c
struct binder_state
{
int fd;
void *mapped;
size_t mapsize;
};
struct binder_state *binder_open(size_t mapsize)
{
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
if (!bs) {
errno = ENOMEM;
return NULL;
}
bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);//打開(kāi)binder設(shè)備
if (bs->fd < 0) {
goto fail_open;
}
if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
goto fail_open;
}
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);//mmap
if (bs->mapped == MAP_FAILED) {
goto fail_map;
}
return bs;
fail_map:
close(bs->fd);
fail_open:
free(bs);
return NULL;
}
int binder_become_context_manager(struct binder_state *bs)
{
return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}
void binder_loop(struct binder_state *bs, binder_handler func)
{
int res;
struct binder_write_read bwr;
uint32_t readbuf[32];
bwr.write_size = 0;
bwr.write_consumed = 0;
bwr.write_buffer = 0;
readbuf[0] = BC_ENTER_LOOPER;
binder_write(bs, readbuf, sizeof(uint32_t));
for (;;) {
bwr.read_size = sizeof(readbuf);
bwr.read_consumed = 0;
bwr.read_buffer = (uintptr_t) readbuf;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//ioctl讀寫binder設(shè)備
if (res < 0) {
ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
break;
}
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);//解析并執(zhí)行命令
if (res == 0) {
ALOGE("binder_loop: unexpected reply?!\n");
break;
}
if (res < 0) {
ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
break;
}
}
}
int binder_write(struct binder_state *bs, void *data, size_t len)
{
struct binder_write_read bwr;
int res;
bwr.write_size = len;
bwr.write_consumed = 0;
bwr.write_buffer = (uintptr_t) data;
bwr.read_size = 0;
bwr.read_consumed = 0;
bwr.read_buffer = 0;
res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);//ioctl與binder驅(qū)動(dòng)交互
if (res < 0) {
fprintf(stderr,"binder_write: ioctl failed (%s)\n",
strerror(errno));
}
return res;
}
struct binder_io
{
char *data; /* pointer to read/write from */
binder_size_t *offs; /* array of offsets */
size_t data_avail; /* bytes available in data buffer */
size_t offs_avail; /* entries available in offsets array */
char *data0; /* start of data buffer */
binder_size_t *offs0; /* start of offsets buffer */
uint32_t flags;
uint32_t unused;
};
int binder_parse(struct binder_state *bs, struct binder_io *bio,
uintptr_t ptr, size_t size, binder_handler func)
{
int r = 1;
uintptr_t end = ptr + (uintptr_t) size;
while (ptr < end) {//如果讀到了數(shù)據(jù)
uint32_t cmd = *(uint32_t *) ptr;
ptr += sizeof(uint32_t);
switch(cmd) {
case BR_NOOP:
break;
case BR_TRANSACTION_COMPLETE:
break;
case BR_INCREFS:
case BR_ACQUIRE:
case BR_RELEASE:
case BR_DECREFS:
ptr += sizeof(struct binder_ptr_cookie);
break;
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
//這個(gè)數(shù)據(jù)包就是在Client端通過(guò)writeTransactionData打包的數(shù)據(jù)
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);//執(zhí)行邏輯
if (txn->flags & TF_ONE_WAY) {//不需要回復(fù),釋放信息
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {//發(fā)送回復(fù)
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
ptr += sizeof(*txn);
break;
}
case BR_REPLY: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: reply too small!\n");
return -1;
}
binder_dump_txn(txn);
if (bio) {
bio_init_from_txn(bio, txn);
bio = 0;
} else {
/* todo FREE BUFFER */
}
ptr += sizeof(*txn);
r = 0;
break;
}
case BR_DEAD_BINDER: {
struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;
ptr += sizeof(binder_uintptr_t);
death->func(bs, death->ptr);
break;
}
case BR_FAILED_REPLY:
r = -1;
break;
case BR_DEAD_REPLY:
r = -1;
break;
default:
ALOGE("parse: OOPS %d\n", cmd);
return -1;
}
}
return r;
}
int svcmgr_handler(struct binder_state *bs,
struct binder_transaction_data *txn,
struct binder_io *msg,
struct binder_io *reply)//處理函數(shù)
{
struct svcinfo *si;
uint16_t *s;
size_t len;
uint32_t handle;
uint32_t strict_policy;
int allow_isolated;
if (txn->target.ptr != BINDER_SERVICE_MANAGER)//target.ptr = 0
return -1;
if (txn->code == PING_TRANSACTION)
return 0;
strict_policy = bio_get_uint32(msg);
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
switch(txn->code) {
case SVC_MGR_GET_SERVICE:
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);//得到需要的service的name
if (s == NULL) {
return -1;
}
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);//查找svcinfo滴劲,并返回handle
if (!handle)
break;
bio_put_ref(reply, handle);
return 0;
case SVC_MGR_ADD_SERVICE:
//add service 將service name和handle信息保存在svcinfo,并加入鏈表
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
case SVC_MGR_LIST_SERVICES: {
uint32_t n = bio_get_uint32(msg);
if (!svc_can_list(txn->sender_pid, txn->sender_euid)) {
ALOGE("list_service() uid=%d - PERMISSION DENIED\n",
txn->sender_euid);
return -1;
}
si = svclist;
while ((n-- > 0) && si)
si = si->next;
if (si) {
bio_put_string16(reply, si->name);
return 0;
}
return -1;
}
default:
ALOGE("unknown code %d\n", txn->code);
return -1;
}
bio_put_uint32(reply, 0);
return 0;
}
這里追蹤到了get service的部分攻晒,現(xiàn)在我們要get的Service是"activity",那么它是在什么時(shí)候add進(jìn)來(lái)的呢?
SystemServer.java
public static void main(String[] args) {
new SystemServer().run();
}
private void run() {
//忽略部分代碼
// Increase the number of binder threads in system_server
BinderInternal.setMaxThreads(sMaxBinderThreads);//31,普通是15
Looper.prepareMainLooper();//準(zhǔn)備Looper
System.loadLibrary("android_servers");//加載native庫(kù)
createSystemContext();
mSystemServiceManager = new SystemServiceManager(mSystemContext);
LocalServices.addService(SystemServiceManager.class, mSystemServiceManager);//加入localServices
startBootstrapServices();
startCoreServices();
startOtherServices();
}
private void createSystemContext() {
ActivityThread activityThread = ActivityThread.systemMain();
mSystemContext = activityThread.getSystemContext();
mSystemContext.setTheme(DEFAULT_SYSTEM_THEME);
}
public static ActivityThread systemMain() {
ActivityThread thread = new ActivityThread();
thread.attach(true);
return thread;
}
attach部分代碼
try {
mInstrumentation = new Instrumentation();
ContextImpl context = ContextImpl.createAppContext(
this, getSystemContext().mPackageInfo);
mInitialApplication = context.mPackageInfo.makeApplication(true, null);
mInitialApplication.onCreate();
} catch (Exception e) {
throw new RuntimeException(
"Unable to instantiate Application():" + e.toString(), e);
}
public ContextImpl getSystemContext() {
synchronized (this) {
if (mSystemContext == null) {
mSystemContext = ContextImpl.createSystemContext(this);
}
return mSystemContext;
}
}
public SystemServiceManager(Context context) {
mContext = context;
}
private void startBootstrapServices() {
//忽略部分代碼
Installer installer = mSystemServiceManager.startService(Installer.class);
mActivityManagerService = mSystemServiceManager.startService(
ActivityManagerService.Lifecycle.class).getService();
mActivityManagerService.setSystemServiceManager(mSystemServiceManager);
mActivityManagerService.setInstaller(installer);
mActivityManagerService.setSystemProcess();
}
public SystemService startService(String className) {
final Class<SystemService> serviceClass;
try {
serviceClass = (Class<SystemService>)Class.forName(className);
} catch (ClassNotFoundException ex) {
Slog.i(TAG, "Starting " + className);
throw new RuntimeException("Failed to create service " + className
+ ": service class not found, usually indicates that the caller should "
+ "have called PackageManager.hasSystemFeature() to check whether the "
+ "feature is available on this device before trying to start the "
+ "services that implement it", ex);
}
return startService(serviceClass);
}
public <T extends SystemService> T startService(Class<T> serviceClass) {
try {
final String name = serviceClass.getName();
// Create the service.
if (!SystemService.class.isAssignableFrom(serviceClass)) {
throw new RuntimeException("Failed to create " + name
+ ": service must extend " + SystemService.class.getName());
}
final T service;
try {
Constructor<T> constructor = serviceClass.getConstructor(Context.class);
service = constructor.newInstance(mContext);
} catch (InstantiationException ex) {
throw new RuntimeException("Failed to create service " + name
+ ": service could not be instantiated", ex);
} catch (IllegalAccessException ex) {
throw new RuntimeException("Failed to create service " + name
+ ": service must have a public constructor with a Context argument", ex);
} catch (NoSuchMethodException ex) {
throw new RuntimeException("Failed to create service " + name
+ ": service must have a public constructor with a Context argument", ex);
} catch (InvocationTargetException ex) {
throw new RuntimeException("Failed to create service " + name
+ ": service constructor threw an exception", ex);
}
// Register it.
mServices.add(service);
//private final ArrayList<SystemService> mServices = new ArrayList<SystemService>();
// Start it.
try {
service.onStart();
} catch (RuntimeException ex) {
throw new RuntimeException("Failed to start service " + name
+ ": onStart threw an exception", ex);
}
return service;
} finally {
Trace.traceEnd(Trace.TRACE_TAG_SYSTEM_SERVER);
}
}
//ActivityManagerService.Lifecycle
public static final class Lifecycle extends SystemService {
private final ActivityManagerService mService;
public Lifecycle(Context context) {
super(context);
mService = new ActivityManagerService(context);//這里真正new了一個(gè)AMS實(shí)例
//AMS的構(gòu)造方法較復(fù)雜班挖,暫時(shí)跳過(guò)
}
@Override
public void onStart() {
mService.start();
}
public ActivityManagerService getService() {
return mService;
}
}
mActivityManagerService = mSystemServiceManager.startService(
ActivityManagerService.Lifecycle.class).getService();
//這里只需要知道鲁捏,創(chuàng)建了AMS實(shí)例,并調(diào)用了它的start方法
public void setSystemProcess() {
try {
ServiceManager.addService(Context.ACTIVITY_SERVICE, this, true);
//public static final String ACTIVITY_SERVICE = "activity";
//如前分析萧芙,這里會(huì)調(diào)用ServiceManagerProxy的addService方法给梅,不過(guò)這里不是在應(yīng)用進(jìn)程,而是在SystemServer進(jìn)程双揪?
//下面忽略
} catch (PackageManager.NameNotFoundException e) {
throw new RuntimeException(
"Unable to find android system package", e);
}
}
public void addService(String name, IBinder service, boolean allowIsolated)
throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
data.writeStrongBinder(service);
data.writeInt(allowIsolated ? 1 : 0);
mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
reply.recycle();
data.recycle();
}
這里的過(guò)程之前已經(jīng)分析過(guò)了动羽,我們只看到service_manager.c的svcmgr_handler函數(shù)
case SVC_MGR_ADD_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = bio_get_ref(msg);
allow_isolated = bio_get_uint32(msg) ? 1 : 0;
if (do_add_service(bs, s, len, handle, txn->sender_euid,
allow_isolated, txn->sender_pid))
return -1;
break;
struct flat_binder_object {
__u32 type;
__u32 flags;
union {
binder_uintptr_t binder;
__u32 handle;
};
binder_uintptr_t cookie;
};
uint32_t bio_get_ref(struct binder_io *bio)
{
struct flat_binder_object *obj;
obj = _bio_get_obj(bio);
if (!obj)
return 0;
if (obj->type == BINDER_TYPE_HANDLE)
return obj->handle;
return 0;
}
static struct flat_binder_object *_bio_get_obj(struct binder_io *bio)
{
size_t n;
size_t off = bio->data - bio->data0;
/* TODO: be smarter about this? */
for (n = 0; n < bio->offs_avail; n++) {
if (bio->offs[n] == off)
return bio_get(bio, sizeof(struct flat_binder_object));
}
bio->data_avail = 0;
bio->flags |= BIO_F_OVERFLOW;
return NULL;
}
static void *bio_get(struct binder_io *bio, size_t size)
{
size = (size + 3) & (~3);
if (bio->data_avail < size){
bio->data_avail = 0;
bio->flags |= BIO_F_OVERFLOW;
return NULL;
} else {
void *ptr = bio->data;
bio->data += size;
bio->data_avail -= size;
return ptr;
}
}
bio->data就是IPCThreadState中writeTransactionData時(shí),打包的tr.data.ptr.buffer = data.ipcData();
data是Parcel對(duì)象盟榴,即struct flat_binder_object *obj與data.ipcData()的某一部分對(duì)應(yīng)曹质。可以認(rèn)為
handle = bio_get_ref(msg);返回的就是一個(gè)代表某個(gè)Service的handle擎场。
后面將這個(gè)service加入鏈表羽德,以供客戶端查詢。
回到前面查詢AMS
case SVC_MGR_CHECK_SERVICE:
s = bio_get_string16(msg, &len);
if (s == NULL) {
return -1;
}
handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
//根據(jù)name查找迅办,找到相應(yīng)的handle
if (!handle)
break;
bio_put_ref(reply, handle);//這里將handle保存在reply
return 0;
回到binder_parse
case BR_TRANSACTION: {
struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
if ((end - ptr) < sizeof(*txn)) {
ALOGE("parse: txn too small!\n");
return -1;
}
binder_dump_txn(txn);
if (func) {
unsigned rdata[256/4];
struct binder_io msg;
struct binder_io reply;
int res;
bio_init(&reply, rdata, sizeof(rdata), 4);
bio_init_from_txn(&msg, txn);
res = func(bs, txn, &msg, &reply);
//到這里了宅静,下面根據(jù)是否設(shè)置了one_way標(biāo)識(shí),發(fā)送回復(fù)
if (txn->flags & TF_ONE_WAY) {
binder_free_buffer(bs, txn->data.ptr.buffer);
} else {
binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
}
}
ptr += sizeof(*txn);
break;
}
void binder_send_reply(struct binder_state *bs,
struct binder_io *reply,
binder_uintptr_t buffer_to_free,
int status)//status=0
{
struct {
uint32_t cmd_free;
binder_uintptr_t buffer;
uint32_t cmd_reply;
struct binder_transaction_data txn;
} __attribute__((packed)) data;
data.cmd_free = BC_FREE_BUFFER;
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY;//回復(fù)的命令是BC_REPLY
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) {
data.txn.flags = TF_STATUS_CODE;
data.txn.data_size = sizeof(int);
data.txn.offsets_size = 0;
data.txn.data.ptr.buffer = (uintptr_t)&status;
data.txn.data.ptr.offsets = 0;
} else {
data.txn.flags = 0;
data.txn.data_size = reply->data - reply->data0;
data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data));//ioctl寫入binder
}
這時(shí)客戶端進(jìn)程恢復(fù)站欺,讀取到Server端發(fā)回的數(shù)據(jù)姨夹,數(shù)據(jù)讀入了mIn這個(gè)Parcel對(duì)象中。
cmd = (uint32_t)mIn.readInt32();//BR_REPLY
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));//解包數(shù)據(jù)
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {//flags = 0
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);//將數(shù)據(jù)轉(zhuǎn)移到reply矾策,這里的reply的java層reply的native層代表
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
回到ServiceManagerNative
public IBinder getService(String name) throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
//transact將需要get的Service的“binder”保存在reply
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
public final IBinder readStrongBinder() {
return nativeReadStrongBinder(mNativePtr);
}
static jobject android_os_Parcel_readStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr)
{
Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
if (parcel != NULL) {
return javaObjectForIBinder(env, parcel->readStrongBinder());
}
return NULL;
}
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
readStrongBinder(&val);
return val;
}
status_t Parcel::readStrongBinder(sp<IBinder>* val) const
{
return unflatten_binder(ProcessState::self(), *this, val);
}
status_t Parcel::readStrongBinder(sp<IBinder>* val) const
{
return unflatten_binder(ProcessState::self(), *this, val);
}
status_t unflatten_binder(const sp<ProcessState>& proc,
const Parcel& in, sp<IBinder>* out)
{
const flat_binder_object* flat = in.readObject(false);//這里用到了跟service_manager.c中一樣的數(shù)據(jù)結(jié)構(gòu)磷账,readObject,讀出了在service_manager中寫入的obj
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = reinterpret_cast<IBinder*>(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE://走這里贾虽,addService設(shè)置了type
*out = proc->getStrongProxyForHandle(flat->handle);//調(diào)用Client端進(jìn)程的getStrongProxyForHandle逃糟,這跟一開(kāi)始獲取ServiceManager的binder類似,當(dāng)時(shí)傳入的handle為0蓬豁,代表的是ServiceManager绰咽,這里傳入了代表ams的handle,會(huì)new一個(gè)BpBinder地粪,并傳入handle
return finish_unflatten_binder(
static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
回到IBinder binder = reply.readStrongBinder();binder即是一個(gè)BinderProxy對(duì)象取募,它的mObject成員保存了native層對(duì)應(yīng)的BpBinder,這個(gè)BpBinder中保存了代表ams的handle蟆技。
回到ActivityManagerNative
private static final Singleton<IActivityManager> gDefault = new Singleton<IActivityManager>() {
protected IActivityManager create() {
IBinder b = ServiceManager.getService("activity");
//已經(jīng)到這里玩敏,b就是一個(gè)BinderProxy對(duì)象斗忌,代表了AMS
IActivityManager am = asInterface(b);
//asInterface相當(dāng)于new ActivityManagerProxy(b);
return am;
}
};
回到ActivityThread
final IActivityManager mgr = ActivityManagerNative.getDefault();
//mgr就是一個(gè)ActivityManagerProxy對(duì)象
try {
mgr.attachApplication(mAppThread);
//通過(guò)這個(gè)Proxy發(fā)起應(yīng)用進(jìn)程和AMS進(jìn)程的交互
} catch (RemoteException ex) {
throw ex.rethrowFromSystemServer();
}
總結(jié)一下,以上我們跟蹤了SystemServer進(jìn)程和ServiceManager進(jìn)程通過(guò)binder的交互旺聚,以及應(yīng)用進(jìn)程和ServiceManager進(jìn)程的交互飞蹂。ServiceManager的handle為0,所以直接可以獲得它的BinderProxy翻屈,繼而進(jìn)行調(diào)用陈哑。SystemServer通過(guò)ServiceManager的BinderProxy即ServiceManagerProxy,將ActivityManagerService的handle加ServiceManager維護(hù)的一個(gè)鏈表伸眶,應(yīng)用進(jìn)程在需要和ActivityManagerService進(jìn)行交互時(shí)惊窖,需要先通過(guò)ServiceManager的
BinderProxy(ServiceManagerProxy)和ServiceManger進(jìn)行交互,獲取到ActivityManagerService的BinderProxy厘贼,即ActivityManagerProxy界酒,然后應(yīng)用進(jìn)程就可以通過(guò)ActivityManagerProxy和ActivityManagerService
進(jìn)行交互了。