參考鏈接:
從一個簡單的AIDL實現(xiàn)看binder原理(一)簡單的AIDL實現(xiàn)
從一個簡單的AIDL實現(xiàn)看binder原理(二)bindService的調(diào)用過程
從一個簡單的AIDL實現(xiàn)看binder原理(三)bindService調(diào)用過程中Binder的傳遞
從一個簡單的AIDL實現(xiàn)看binder原理(四)bindService調(diào)用過程中Binder的寫入
在上一篇博文中恩脂,我們分析到了Android使用Binder進(jìn)行IPC過程中Binder是怎樣一步步寫入到內(nèi)存中準(zhǔn)備進(jìn)行傳遞的帽氓,接下來我們繼續(xù)分析Binder的跨進(jìn)程傳遞
我們都知道,Android內(nèi)存分為用戶態(tài)和內(nèi)核態(tài)兩部分,用戶態(tài)內(nèi)存對于進(jìn)程來說是彼此獨立的,想要進(jìn)行跨進(jìn)程調(diào)用座菠,只能進(jìn)行l(wèi)inux的系統(tǒng)調(diào)用進(jìn)入內(nèi)核態(tài),在內(nèi)核態(tài)中完成跨進(jìn)程過程,簡單的示意如下圖:
而Binder就是為了完成從 用戶空間->內(nèi)核空間->用戶空間這一過程而生的。
Binder驅(qū)動是Android專用的,但底層的驅(qū)動架構(gòu)與Linux驅(qū)動一樣。binder驅(qū)動在以misc設(shè)備進(jìn)行注冊,作為虛擬字符設(shè)備,沒有直接操作硬件,只是對設(shè)備內(nèi)存的處理。主要是驅(qū)動設(shè)備的初始化(binder_init),打開 (binder_open),映射(binder_mmap),數(shù)據(jù)操作(binder_ioctl)唯袄。
用戶態(tài)的程序調(diào)用Kernel層驅(qū)動是需要陷入內(nèi)核態(tài)蔬顾,進(jìn)行系統(tǒng)調(diào)用(syscall)舷胜,比如打開Binder驅(qū)動方法的調(diào)用鏈為: open-> __open() -> binder_open()。 open()為用戶空間的方法,__open()便是系統(tǒng)調(diào)用中相應(yīng)的處理方法,通過查找,對應(yīng)調(diào)用到內(nèi)核binder驅(qū)動的binder_open()方法歪赢,至于其他的從用戶態(tài)陷入內(nèi)核態(tài)的流程也基本一致白对。
繼續(xù)沿著上一篇的 IPCThreadState.cpp->talkWithDriver進(jìn)行分析:
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
...
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
if (doReceive && needRead) {
//接收數(shù)據(jù)緩沖區(qū)信息的填充。如果以后收到數(shù)據(jù),就直接填在mIn中了。
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
//當(dāng)讀緩沖和寫緩沖都為空,則直接返回
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
//通過ioctl不停的讀寫操作,跟Binder Driver進(jìn)行通信
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
...
} while (err == -EINTR); //當(dāng)被中斷煮嫌,則繼續(xù)執(zhí)行
...
return err;
}
這里將我們上篇中得到的mOut對象轉(zhuǎn)換為bwr對象并且通過ioctl方法傳入內(nèi)核層懦冰,其中ioctl的參數(shù)mProcess->mDriverFD就是Binder驅(qū)動的文件描述符内地,在這里,我們正式陷入Binder內(nèi)核態(tài)
調(diào)用過程為:
ioctl -> binder_ioctl -> binder_ioctl_write_read:
// binder.c
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
struct binder_proc *proc = filp->private_data;
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
//將用戶空間bwr結(jié)構(gòu)體拷貝到內(nèi)核空間
copy_from_user(&bwr, ubuf, sizeof(bwr));
...
if (bwr.write_size > 0) {
//將數(shù)據(jù)放入目標(biāo)進(jìn)程
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
...
}
if (bwr.read_size > 0) {
//讀取自己隊列的數(shù)據(jù)
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
...
}
//將內(nèi)核空間bwr結(jié)構(gòu)體拷貝到用戶空間
copy_to_user(ubuf, &bwr, sizeof(bwr));
...
}
binder_thread_write:
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
//拷貝用戶空間的cmd命令秸妥,此時為BC_TRANSACTION
if (get_user(cmd, (uint32_t __user *)ptr)) -EFAULT;
ptr += sizeof(uint32_t);
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
//拷貝用戶空間的binder_transaction_data
if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
...
}
*consumed = ptr - buffer;
}
return 0;
}
這里最關(guān)鍵的一步就是binder_transaction:
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply){
//根據(jù)各種判定咏删,獲取以下信息:
struct binder_thread *target_thread; //目標(biāo)線程
struct binder_proc *target_proc锋喜; //目標(biāo)進(jìn)程
struct binder_node *target_node逼庞; //目標(biāo)binder節(jié)點
struct list_head *target_list; //目標(biāo)TODO隊列
wait_queue_head_t *target_wait; //目標(biāo)等待隊列
...
//分配兩個結(jié)構(gòu)體內(nèi)存
struct binder_transaction *t = kzalloc(sizeof(*t), GFP_KERNEL);
struct binder_work *tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
//從target_proc分配一塊buffer
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
for (; offp < off_end; offp++) {
switch (fp->type) {
case BINDER_TYPE_BINDER: ...
case BINDER_TYPE_WEAK_BINDER: ...
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle,
fp->type == BINDER_TYPE_HANDLE);
...
//此時運行在Service進(jìn)程动知,故ref->node是指向服務(wù)所在進(jìn)程的binder實體妒穴,
//而target_proc為請求服務(wù)所在的進(jìn)程阔墩,此時并不相等蝉娜。
if (ref->node->proc == target_proc) {
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie; //BBinder服務(wù)的地址
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
} else {
struct binder_ref *new_ref;
//請求服務(wù)所在進(jìn)程并非服務(wù)所在進(jìn)程倍阐,則為請求服務(wù)所在進(jìn)程創(chuàng)建binder_ref
new_ref = binder_get_ref_for_node(target_proc, ref->node);
fp->binder = 0;
fp->handle = new_ref->desc; //重新賦予handle值
fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
}
} break;
case BINDER_TYPE_FD: ...
}
}
//分別target_list和當(dāng)前線程TODO隊列插入事務(wù)
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
}
這個過程非常重要咐蚯,分兩種情況來說:
當(dāng)請求服務(wù)的進(jìn)程與服務(wù)屬于不同進(jìn)程,則為請求服務(wù)所在進(jìn)程創(chuàng)建binder_ref對象肺孤,指向服務(wù)進(jìn)程中的binder_node;
當(dāng)請求服務(wù)的進(jìn)程與服務(wù)屬于同一進(jìn)程茫叭,則不再創(chuàng)建新對象揍愁,只是引用計數(shù)加1,并且修改type為BINDER_TYPE_BINDER或BINDER_TYPE_WEAK_BINDER烁登。
因此赌躺,當(dāng)我們向ActivityManagerService中傳遞RemoteService的Binder對象時,這里會在AMS進(jìn)程創(chuàng)建binder_ref對象,指向RemoteService進(jìn)程中的binder_node;
在binder_transaction執(zhí)行完畢后府适,會觸發(fā)目標(biāo)進(jìn)程的binder_thread_read,這里目標(biāo)進(jìn)程既是ActivityManagerService進(jìn)程凯楔;
binder_thread_read的實現(xiàn):
binder_thread_read(...){
...
//當(dāng)線程todo隊列有數(shù)據(jù)則執(zhí)行往下執(zhí)行窜骄;當(dāng)線程todo隊列沒有數(shù)據(jù),則進(jìn)入休眠等待狀態(tài)
ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
...
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
//先從線程todo隊列獲取事務(wù)數(shù)據(jù)
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work, entry);
// 線程todo隊列沒有數(shù)據(jù), 則從進(jìn)程todo對獲取事務(wù)數(shù)據(jù)
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
...
}
switch (w->type) {
case BINDER_WORK_TRANSACTION:
//獲取transaction數(shù)據(jù)
t = container_of(w, struct binder_transaction, work);
break;
case : ...
}
//只有BINDER_WORK_TRANSACTION命令才能繼續(xù)往下執(zhí)行
if (!t) continue;
if (t->buffer->target_node) {
...
} else {
tr.target.ptr = NULL;
tr.cookie = NULL;
cmd = BR_REPLY; //設(shè)置命令為BR_REPLY
}
tr.code = t->code;
tr.flags = t->flags;
tr.sender_euid = t->sender_euid;
if (t->from) {
struct task_struct *sender = t->from->proc->tsk;
//當(dāng)非oneway的情況下,將調(diào)用者進(jìn)程的pid保存到sender_pid
tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
} else {
...
}
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (void *)t->buffer->data +
proc->user_buffer_offset;
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
//將cmd和數(shù)據(jù)寫回用戶空間
put_user(cmd, (uint32_t __user *)ptr);
ptr += sizeof(uint32_t);
copy_to_user(ptr, &tr, sizeof(tr));
ptr += sizeof(tr);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
...
} else {
t->buffer->transaction = NULL;
kfree(t); //通信完成則運行釋放
}
break;
}
done:
*consumed = ptr - buffer;
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
proc->requested_threads++;
// 生成BR_SPAWN_LOOPER命令摆屯,用于創(chuàng)建新的線程
put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)邻遏;
}
return 0;
}
當(dāng)數(shù)據(jù)傳輸時會生成BR_TRANSACTION命令,此時在服務(wù)端進(jìn)程會觸發(fā)IPCThreadState.cpp的executeCommand方法:
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_TRANSACTION:
{
binder_transaction_data tr;
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
//ALOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid);
Parcel reply;
status_t error;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_TRANSACTION thr " << (void*)pthread_self()
<< " / obj " << tr.target.ptr << " / code "
<< TypeCode(tr.code) << ": " << indent << buffer
<< dedent << endl
<< "Data addr = "
<< reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer)
<< ", offsets addr="
<< reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl;
}
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
} else {
error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
}
//ALOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n",
// mCallingPid, origPid, origUid);
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
} else {
LOG_ONEWAY("NOT sending reply to %d!", mCallingPid);
}
mCallingPid = origPid;
mCallingUid = origUid;
mStrictModePolicy = origStrictModePolicy;
mLastTransactionBinderFlags = origTransactionBinderFlags;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj "
<< tr.target.ptr << ": " << indent << reply << dedent << endl;
}
}
break;
}
這里會解析binder_transaction_data數(shù)據(jù)虐骑,找到目標(biāo)BBinder并調(diào)用其transact()方法准验,最終會調(diào)用到Java層的onTransact方法(在bindService這個案例中,這個onTransact方法就是ActivityMnagerService的onTransact)
從這里開始基本就是binder寫入數(shù)據(jù)的逆序過程
主要的方法:
// Parcel.cpp
sp<IBinder> Parcel::readStrongBinder() const
{
sp<IBinder> val;
unflatten_binder(ProcessState::self(), *this, &val);
return val;
}
status_t unflatten_binder(const sp<ProcessState>& proc,
const Parcel& in, sp<IBinder>* out)
{
const flat_binder_object* flat = in.readObject(false);
if (flat) {
switch (flat->type) {
case BINDER_TYPE_BINDER:
*out = reinterpret_cast<IBinder*>(flat->cookie);
return finish_unflatten_binder(NULL, *flat, in);
case BINDER_TYPE_HANDLE:
*out = proc->getStrongProxyForHandle(flat->handle);
//創(chuàng)建BpBinder對象
return finish_unflatten_binder(
static_cast<BpBinder*>(out->get()), *flat, in);
}
}
return BAD_TYPE;
}
//ProcessState.cpp
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
//查找handle對應(yīng)的資源項
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
...
//當(dāng)handle值所對應(yīng)的IBinder不存在或弱引用無效時廷没,則創(chuàng)建BpBinder對象
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
經(jīng)過調(diào)用該方法糊饱,最終創(chuàng)建了指向Binder服務(wù)端的BpBinder代理對象。經(jīng)過javaObjectForIBinder將native層BpBinder對象轉(zhuǎn)換為Java層BinderProxy對象颠黎。 也就是說在RemoteService進(jìn)程發(fā)送一個Binder對象后另锋,經(jīng)過層層轉(zhuǎn)化和傳遞滞项,最終在ActivityMangerService中轉(zhuǎn)化為BinderProxy對象。
當(dāng)然夭坪,我們將RemoteService的對象發(fā)送到AMS后文判,還需要AMS再經(jīng)過一次IPC過程發(fā)送到Activity所在進(jìn)程,但是因為這次發(fā)送的是AMS所持有的BinderProxy對象室梅,這里就不會觸發(fā)Binder->BinderProxy的轉(zhuǎn)化律杠,最終會調(diào)用到ServiceConnection#onServiceConnected方法,將RemoteService的Binder對象的代理發(fā)送到了客戶端(Activty進(jìn)程)
以上竞惋,就是Binder的傳遞和轉(zhuǎn)換過程