1 client端發(fā)起請求
以MediaPlayerService為例,整體來說流程與binder-5大體一致
對于客戶端來說拿到的只是服務端代理對象BPXXX
IMediaPlayerService.cpp
class BpMediaPlayerService: public BpInterface<IMediaPlayerService>
{
virtual sp<IMediaPlayer> create(
const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId,
const AttributionSourceState& attributionSource) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(client));
data.writeInt32(audioSessionId);
data.writeParcelable(attributionSource);
remote()->transact(CREATE, data, &reply);
return interface_cast<IMediaPlayer>(reply.readStrongBinder());
}
}
首先構建parcel類型對象 data儡首,reply嵌灰,向data中寫入文件描述符榄檬,asBinder將IMediaPlayerClient轉換成ibinder對象永高,寫入到data中恋拍,還有audioSessionId垛孔,attributionSource,數據整理好后施敢,執(zhí)行remote()->transact進行數據傳輸周荐,code為
CREATE
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
status = IPCThreadState::self()->transact(binderHandle(), code, data, reply, flags);
}
這里binderHandle函數狭莱,獲取mediaplayerservice的句柄,通過IPCThreadState進行數據傳輸
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
//將data數據寫入ipcthreadstate的mout中
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
if ((flags & TF_ONE_WAY) == 0) {
////默認flags為0,TF_ONE_WAY為1 所以雙向傳輸
#if 0
if (code == 4) { // relayout
ALOGI(">>>>>> CALLING transaction 4");
} else {
ALOGI(">>>>>> CALLING transaction %d", code);
}
#endif
if (reply) {
//reply不為空
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
}
這里首先將data的數據封裝到binder_transaction_data中概作,在寫入mout中
腋妙,接著通過flag判斷同步操作,確認reply不為空執(zhí)行waitForResponse函數
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
//無線循環(huán)
while (1) {
//與驅動交互
if ((err=talkWithDriver()) < NO_ERROR) break;
}
執(zhí)行talkWithDriver函數與驅動交互
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
//檢查binder驅動句柄是否存在
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
//檢查read緩存的數據是否讀寫完成
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
//如果read緩存區(qū)讀取完畢讯榕,就可以進行數據寫入
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
//如果有數據要讀取 設置write_size = 0 骤素,不然就是mout存放的數據長度
bwr.write_size = outAvail;
//bwr.write_buffer指向mout存放的數據
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
//如果需要讀取根據mIn的容量設置bwr.read_size大小
bwr.read_size = mIn.dataCapacity();
// bwr.read_buffer 指向min中讀取數據的空間
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
//驅動沒有可讀數據 那就設置為0
bwr.read_size = 0;
bwr.read_buffer = 0;
}
//不需要從驅動讀寫數據 直接返回
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(__ANDROID__)
//進入驅動側進行交互
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
//每次與驅動交互完,檢查驅動句柄
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
if (err >= NO_ERROR) {
//代表write_consumed代表成功向驅動寫入的數據量
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
//從mout中移除寫入驅動的數據
mOut.remove(0, bwr.write_consumed);
else {
mOut.setDataSize(0);
processPostWriteDerefs();
}
}
//read_consumed代表成功向驅動讀取的數據
if (bwr.read_consumed > 0) {
//設置min數據大小愚屁,和起始位置
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
這里創(chuàng)建binder_write_read結構體谆甜,設置bwr.read_size,bwr.read_buffer集绰,bwr.write_size, bwr.write_buffer 谆棺,因為當次是向遠端服務服務執(zhí)行create方法的請求栽燕,write_size有數據,執(zhí)行寫操作改淑,執(zhí)行ioctl函數與驅動進行交互
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
thread = binder_get_thread(proc);
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
}
}
這里主要關注cmd為BINDER_WRITE_READ的處理碍岔,執(zhí)行binder_ioctl_write_read函數
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!list_empty(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
}
這里因為bwr.write_size>0 所以執(zhí)行binder_thread_write函數
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
//在內核構建binder_transaction_data結構體
struct binder_transaction_data tr;
//將用戶空間數據拷貝到內核空間
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
//這里cmd == BC_REPLY為true
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
}
}
這里解析處理cmd為BC_TRANSACTION的情況,在內核創(chuàng)建binder_transaction_data結構體將用戶空間的tr拷貝到內核朵夏,執(zhí)行binder_transaction函數這里 cmd == BC_REPLY為false
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
} else {
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
target_node = ref->node;
} else {
target_node = binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
}
}
e->to_node = target_node->debug_id;
target_proc = target_node->proc;
if (target_proc == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
goto err_invalid_target_handle;
}
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
goto err_bad_call_stack;
}
while (tmp) {
if (tmp->from && tmp->from->proc == target_proc)
target_thread = tmp->from;
tmp = tmp->from_parent;
}
}
}
if (target_thread) {
//目標線程存在設置目標線程的todo隊列和wait隊列
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
} else {
target_list = &target_proc->todo;
target_wait = &target_proc->wait;
}
//為新事務分配內存空間
t = kzalloc(sizeof(*t), GFP_KERNEL);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
trace_binder_transaction(reply, t, target_node);
//在目標進程的內核劃分一塊區(qū)域蔼啦,將用戶空間數據 拷貝到目標進程的內核緩存區(qū)
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
//將用戶空間的數據buffer和偏移數組拷貝到目標進程的內核空間
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
off_end = (void *)offp + tr->offsets_size;
off_min = 0;
//遍歷偏移數組
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
*offp < off_min ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
proc->pid, thread->pid, (u64)*offp,
(u64)off_min,
(u64)(t->buffer->data_size -
sizeof(*fp)));
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
//條件1根據前面提到的flat_binder_object的數據類型為BINDER_TYPE_HANDLE
//根據handle值在servicemanager進程中找到之前服務注冊時創(chuàng)建的binder_ref
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_failed;
}
if (ref->node->proc == target_proc) {
//條件1 參數所在進程和目標進程屬于同一進程
if (fp->type == BINDER_TYPE_HANDLE)
fp->type = BINDER_TYPE_BINDER;
else
fp->type = BINDER_TYPE_WEAK_BINDER;
fp->binder = ref->node->ptr;
fp->cookie = ref->node->cookie;
binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
trace_binder_transaction_ref_to_node(t, ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
ref->debug_id, ref->desc, ref->node->debug_id,
(u64)ref->node->ptr);
} else {
//條件2 不屬于同一進程
struct binder_ref *new_ref;
//需要在目標進程(發(fā)起請求進程)新創(chuàng)建一個binder_ref節(jié)點
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
//新創(chuàng)建的binder_ref保存desc值賦值給handle
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
} break
}
else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
}
因為reply為false執(zhí)行else邏輯,因為tr->target.handle(目標服務句柄不是0)所以根據目標句柄在元進程的binder_refs中找到對應的binder_ref 根據binder_ref找到binder_node仰猖,根據binder_node找到binder_proc
ref = binder_get_ref(proc, tr->target.handle);
target_node = ref->node;
target_proc = target_node->proc;
判斷當次數據傳輸時同步捏肢,并且源線程有事務棧的情況,根據源線程事務棧嘗試找到目標進程的等待線程(target_thread)
創(chuàng)建新事務t,創(chuàng)建binder_work tcomplete饥侵,在目標進程對應的內核物理空間分配內存用來保存數據鸵赫,將元進程用戶空間的數據buffer和偏移數組拷貝過去
接下來遍歷偏移數組,獲取flat_binder_object對象躏升,因為當次操作傳入的參數包含ibinder對象辩棒,所以是可以獲取的
接下來根據flat_binder_object保存的handle值來獲取binder_ref,在根據binder_ref獲取binder_node 最后獲取binder_proc來和target_proc進行比較是否為同一個進程膨疏,這里就先默認參數所在進程和請求的服務進程屬于同一進程一睁,所以修改fp->type = BINDER_TYPE_BINDE,然后設置bbinder的地址
因為當次操作是同步的佃却,所以設置新事務的need_reply標志位為1者吁,讓新事務和發(fā)起請求的事務棧關聯
else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
}
最后設置新事務t的類型為BINDER_WORK_TRANSACTION并且添加到目標線程的todo隊列,將tcomplete設置類型為BINDER_WORK_TRANSACTION_COMPLETE同時加入到源線程的todo隊列双霍,接下來喚醒目標進程
這里關于tcomplte的處理與binder-5一致砚偶,所以不再重復批销,接下來分析目標進程處理新事務
2 目標進程如何讀取處理事務
這里首先知道目標進程是如何讀取事務的
以mediaplayerservice為例
int main(int argc __unused, char **argv __unused)
{
signal(SIGPIPE, SIG_IGN);
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm(defaultServiceManager());
ALOGI("ServiceManager: %p", sm.get());
InitializeIcuOrDie();
MediaPlayerService::instantiate();
ResourceManagerService::instantiate();
registerExtensions();
ProcessState::self()->startThreadPool();
IPCThreadState::self()->joinThreadPool();
}
可以看到在進程初始化時,會執(zhí)行ProcessState::self()->startThreadPool()函數
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
if (!mThreadPoolStarted) {
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
執(zhí)行spawnPooledThread函數
void ProcessState::spawnPooledThread(bool isMain)
{
if (mThreadPoolStarted) {
String8 name = makeBinderThreadName();
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
sp<Thread> t = new PoolThread(isMain);
t->run(name.string());
}
}
在這里new了一個線程 并且執(zhí)行
class PoolThread : public Thread
{
public:
explicit PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()
{
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
線程執(zhí)行染坯,執(zhí)行threadLoop函數
void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
//mout中寫入BC_ENTER_LOOPER 通知驅動側線程進入循環(huán)
mOut.writeInt32(
? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
status_t result;
do {
processPendingDerefs();
// now get the next command to be processed, waiting if necessary
//處理從驅動讀出來的數據
result = getAndExecuteCommand();
//結果存在問題均芽,abort函數退出
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
mProcess->mDriverFD, result);
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%d\n",
(void*)pthread_self(), getpid(), result);
//退出循環(huán)后向mout寫入BC_EXIT_LOOPER 通知驅動線程退出循環(huán)
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
首先向mout中寫入BC_ENTER_LOOPER,通知驅動单鹿,線程進入循環(huán)狀態(tài)掀宋,線程進入循環(huán)執(zhí)行getAndExecuteCommand,當出現error時仲锄,向mout寫入BC_EXIT_LOOPER劲妙,通知驅動退出循環(huán)
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
//和驅動交互讀取數據
result = talkWithDriver();
if (result >= NO_ERROR) {
//無error情況
//獲取min中讀取的數據大小
size_t IN = mIn.dataAvail();
//如果讀取數據過小 直接返回
if (IN < sizeof(int32_t)) return result;
//讀取cmd
cmd = mIn.readInt32();
IF_LOG_COMMANDS() {
alog << "Processing top-level Command: "
<< getReturnString(cmd) << endl;
}
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount++;
if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs == 0) {
mProcess->mStarvationStartTimeMs = uptimeMillis();
}
pthread_mutex_unlock(&mProcess->mThreadCountLock);
//解析數據
result = executeCommand(cmd);
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount--;
if (mProcess->mExecutingThreadsCount < mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs != 0) {
int64_t starvationTimeMs = uptimeMillis() - mProcess->mStarvationStartTimeMs;
if (starvationTimeMs > 100) {
ALOGE("binder thread pool (%zu threads) starved for %" PRId64 " ms",
mProcess->mMaxThreads, starvationTimeMs);
}
mProcess->mStarvationStartTimeMs = 0;
}
pthread_cond_broadcast(&mProcess->mThreadCountDecrement);
pthread_mutex_unlock(&mProcess->mThreadCountLock);
}
return result;
}
執(zhí)行talkWithDriver與驅動進行交互
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
//檢查binder驅動句柄是否存在
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
//檢查read緩存的數據是否讀寫完成
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
//如果read緩存區(qū)讀取完畢,就可以進行數據寫入
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
//如果有數據要讀取 設置write_size = 0 儒喊,不然就是mout存放的數據長度
bwr.write_size = outAvail;
//bwr.write_buffer指向mout存放的數據
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
//如果需要讀取根據mIn的容量設置bwr.read_size大小
bwr.read_size = mIn.dataCapacity();
// bwr.read_buffer 指向min中讀取數據的空間
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
//驅動沒有可讀數據 那就設置為0
bwr.read_size = 0;
bwr.read_buffer = 0;
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
if (outAvail != 0) {
alog << "Sending commands to driver: " << indent;
const void* cmds = (const void*)bwr.write_buffer;
const void* end = ((const uint8_t*)cmds)+bwr.write_size;
alog << HexDump(cmds, bwr.write_size) << endl;
while (cmds < end) cmds = printCommand(alog, cmds);
alog << dedent;
}
alog << "Size of receive buffer: " << bwr.read_size
<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}
// Return immediately if there is nothing to do.
//不需要從驅動讀寫數據 直接返回
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(__ANDROID__)
//進入驅動側進行交互
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
//每次與驅動交互完镣奋,檢查驅動句柄
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
IF_LOG_COMMANDS() {
alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
<< bwr.write_consumed << " (of " << mOut.dataSize()
<< "), read consumed: " << bwr.read_consumed << endl;
}
if (err >= NO_ERROR) {
//代表write_consumed代表成功向驅動寫入的數據量
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
//從mout中移除寫入驅動的數據
mOut.remove(0, bwr.write_consumed);
else {
mOut.setDataSize(0);
processPostWriteDerefs();
}
}
//read_consumed代表成功向驅動讀取的數據
if (bwr.read_consumed > 0) {
//設置min數據大小,和起始位置
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
alog << "Remaining data size: " << mOut.dataSize() << endl;
alog << "Received commands from driver: " << indent;
const void* cmds = mIn.data();
const void* end = mIn.data() + mIn.dataSize();
alog << HexDump(cmds, mIn.dataSize()) << endl;
while (cmds < end) cmds = printReturnCommand(alog, cmds);
alog << dedent;
}
return NO_ERROR;
}
return err;
}
這里先確認bwr.read_size>0 執(zhí)行ioctl與驅動交互怀愧,讀取數據
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);
if (ret)
goto err;
break;
}
解析cmd命令BINDER_WRITE_READ侨颈,執(zhí)行binder_ioctl_write_read函數
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
if (!(&proc->todo))
wake_up_interruptible(&proc->wait);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
}
bwr.read_size > 0,執(zhí)行binder_thread_read函數
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
int ret = 0;
int wait_for_proc_work;
if (*consumed == 0) {
if (put_user(BR_NOOP, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
}
retry:
wait_for_proc_work = thread->transaction_stack == NULL &&
list_empty(&thread->todo);
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
proc->pid, thread->pid, thread->looper);
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
b
遠端服務開始時,線程的todo隊列是沒有binder_work的芯义,所以堵塞線程
在了解了遠端服務進程是如何讀取驅動數據后哈垢,接下來就是喚醒遠端進程處理驅動數據
3 喚醒遠端進程處理驅動數據
if (target_wait)
wake_up_interruptible(target_wait);
}
遠端服務進程在執(zhí)行binder_thread_read函數時因為thread->todo隊列沒有數據,所以執(zhí)行了wait_event_interruptible函數堵塞了線程扛拨,而源進程在創(chuàng)建新事務后耘分,會添加到目標線程的todo隊列,同時執(zhí)行wake_up_interruptible喚醒目標線程
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
//j檢查線程和進程的todo隊列取出binder_work
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
list_del(&t->work.entry);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
}
}
這里將BR_TRANSACTION和buffer數據拷貝到用戶空間绑警,線程的todo隊列刪除該事務求泰,因為當次操作是同步的,所以設置新事務的所在的目標事務棧和目標線程
binder_thread_read讀取完驅動數據后待秃,一路返回回到IPCThreadState::talkWithDriver函數
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
//read_consumed代表成功向驅動讀取的數據
if (bwr.read_consumed > 0) {
//設置min數據大小拜秧,和起始位置
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
}
根據實際讀取的數據設置min的數據大小
接著回到IPCThreadState::getAndExecuteCommand函數
status_t IPCThreadState::getAndExecuteCommand()
{
if (result >= NO_ERROR) {
//無error情況
//獲取min中讀取的數據大小
size_t IN = mIn.dataAvail();
//如果讀取數據過小 直接返回
if (IN < sizeof(int32_t)) return result;
//讀取cmd
cmd = mIn.readInt32();
//解析數據
result = executeCommand(cmd);
}
從min中讀取驅動命令BR_TRANSACTION, 執(zhí)行executeCommand處理命令
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_TRANSACTION:
{
binder_transaction_data tr;
//從min中讀取數據
result = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(result == NO_ERROR,
"Not enough command data for brTRANSACTION");
if (result != NO_ERROR) break;
Parcel buffer;
buffer.ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
const pid_t origPid = mCallingPid;
const uid_t origUid = mCallingUid;
const int32_t origStrictModePolicy = mStrictModePolicy;
const int32_t origTransactionBinderFlags = mLastTransactionBinderFlags;
mCallingPid = tr.sender_pid;
mCallingUid = tr.sender_euid;
mLastTransactionBinderFlags = tr.flags;
if (tr.target.ptr) {
// We only have a weak reference on the target object, so we must first try to
// safely acquire a strong reference before doing anything else with it.
if (reinterpret_cast<RefBase::weakref_type*>(
tr.target.ptr)->attemptIncStrong(this)) {
error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
&reply, tr.flags);
reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
} else {
error = UNKNOWN_TRANSACTION;
}
}
if ((tr.flags & TF_ONE_WAY) == 0) {
LOG_ONEWAY("Sending reply to %d!", mCallingPid);
if (error < NO_ERROR) reply.setError(error);
sendReply(reply, 0);
}
break;
}
首先從min中讀取驅動拷貝上來的binder_transaction_data數據章郁,定義parcel buffer指向binder_transaction_data的數據緩存枉氮,根據tr.cookie獲取遠端服務bbinder對象地址,執(zhí)行transact函數暖庄,進行數據處理
處理完成后聊替,如果數據是同步的執(zhí)行sendReply函數
status_t BBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
data.setDataPosition(0);
if (reply != nullptr && (flags & FLAG_CLEAR_BUF)) {
reply->markSensitive();
}
status_t err = NO_ERROR;
switch (code) {
case PING_TRANSACTION:
err = pingBinder();
break;
case EXTENSION_TRANSACTION:
CHECK(reply != nullptr);
err = reply->writeStrongBinder(getExtension());
break;
case DEBUG_PID_TRANSACTION:
CHECK(reply != nullptr);
err = reply->writeInt32(getDebugPid());
break;
case SET_RPC_CLIENT_TRANSACTION: {
err = setRpcClientDebug(data);
break;
}
default:
err = onTransact(code, data, reply, flags);
break;
}
// In case this is being transacted on in the same process.
if (reply != nullptr) {
reply->setDataPosition(0);
if (reply->dataSize() > LOG_REPLIES_OVER_SIZE) {
ALOGW("Large reply transaction of %zu bytes, interface descriptor %s, code %d",
reply->dataSize(), String8(getInterfaceDescriptor()).c_str(), code);
}
}
return err;
}
這里處理code命令,由上文可知培廓,代理請求端發(fā)送的code為CREATE
remote()->transact(CREATE, data, &reply);
在bbinder的transact中沒有處理該code的操作惹悄,直接走defalut執(zhí)行onTransact函數
,關于onTransact函數肩钠,bbinder沒有實現泣港,需要找到該進程的實現類
class BnMediaPlayerService: public BnInterface<IMediaPlayerService>
{
public:
virtual status_t onTransact( uint32_t code,
const Parcel& data,
Parcel* reply,
uint32_t flags = 0);
};
}; // namespace android
status_t BnMediaPlayerService::onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
switch (code) {
case CREATE: {
CHECK_INTERFACE(IMediaPlayerService, data, reply);
sp<IMediaPlayerClient> client =
interface_cast<IMediaPlayerClient>(data.readStrongBinder());
audio_session_t audioSessionId = (audio_session_t) data.readInt32();
AttributionSourceState attributionSource;
status_t status = data.readParcelable(&attributionSource);
if (status != NO_ERROR) {
return status;
}
sp<IMediaPlayer> player = create(client, audioSessionId, attributionSource);
reply->writeStrongBinder(IInterface::asBinder(player));
return NO_ERROR;
} break;
}
這里取出發(fā)送方傳遞的參數執(zhí)行create方法構建IMediaPlayer對象暂殖,在執(zhí)行IInterface::asBinder(player)獲取bpbidner對象寫入到reply中進行數據傳遞
到這里我們知道了普通進程之間的進程通信,請求端如何發(fā)起進程通信当纱,接收端如何獲取進程間通信數據呛每,如何解析處理
4 接收端處理完數據如何返回給請求端
這里操作與binder-5一致
繼續(xù)上述分析在executeCommand處理完請求端發(fā)來的數據后,因為當次操作為同步操作所以坡氯,執(zhí)行sendReply函數
status_t IPCThreadState::sendReply(const Parcel& reply, uint32_t flags)
{
status_t err;
status_t statusBuffer;
err = writeTransactionData(BC_REPLY, flags, -1, 0, reply, &statusBuffer);
if (err < NO_ERROR) return err;
return waitForResponse(NULL, NULL);
}
首先將reply的數據封裝到binder_transaction_data中晨横,執(zhí)行waitForResponse函數
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
//無線循環(huán)
while (1) {
//與驅動交互
if ((err=talkWithDriver()) < NO_ERROR) break;
}
}
這里執(zhí)行talkWithDriver函數 與驅動開始交互
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
//檢查binder驅動句柄是否存在
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
// Is the read buffer empty?
//檢查read緩存的數據是否讀寫完成
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
//如果read緩存區(qū)讀取完畢,就可以進行數據寫入
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
//如果有數據要讀取 設置write_size = 0 箫柳,不然就是mout存放的數據長度
bwr.write_size = outAvail;
//bwr.write_buffer指向mout存放的數據
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
//如果需要讀取根據mIn的容量設置bwr.read_size大小
bwr.read_size = mIn.dataCapacity();
// bwr.read_buffer 指向min中讀取數據的空間
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
//驅動沒有可讀數據 那就設置為0
bwr.read_size = 0;
bwr.read_buffer = 0;
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
if (outAvail != 0) {
alog << "Sending commands to driver: " << indent;
const void* cmds = (const void*)bwr.write_buffer;
const void* end = ((const uint8_t*)cmds)+bwr.write_size;
alog << HexDump(cmds, bwr.write_size) << endl;
while (cmds < end) cmds = printCommand(alog, cmds);
alog << dedent;
}
alog << "Size of receive buffer: " << bwr.read_size
<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}
// Return immediately if there is nothing to do.
//不需要從驅動讀寫數據 直接返回
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(__ANDROID__)
//進入驅動側進行交互
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
//每次與驅動交互完手形,檢查驅動句柄
if (mProcess->mDriverFD <= 0) {
err = -EBADF;
}
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
IF_LOG_COMMANDS() {
alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
<< bwr.write_consumed << " (of " << mOut.dataSize()
<< "), read consumed: " << bwr.read_consumed << endl;
}
if (err >= NO_ERROR) {
//代表write_consumed代表成功向驅動寫入的數據量
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < mOut.dataSize())
//從mout中移除寫入驅動的數據
mOut.remove(0, bwr.write_consumed);
else {
mOut.setDataSize(0);
processPostWriteDerefs();
}
}
//read_consumed代表成功向驅動讀取的數據
if (bwr.read_consumed > 0) {
//設置min數據大小,和起始位置
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
alog << "Remaining data size: " << mOut.dataSize() << endl;
alog << "Received commands from driver: " << indent;
const void* cmds = mIn.data();
const void* end = mIn.data() + mIn.dataSize();
alog << HexDump(cmds, mIn.dataSize()) << endl;
while (cmds < end) cmds = printReturnCommand(alog, cmds);
alog << dedent;
}
return NO_ERROR;
}
return err;
}
因為要把返回結果給到請求端悯恍,所以bwr.write_size>0 執(zhí)行ioctl與驅動交互
接下來解析cmd BINDER_WRITE_READ库糠,執(zhí)行binder_ioctl_write_read函數,因為bwr.write_size > 0執(zhí)行binder_thread_write函數涮毫,
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
switch (cmd) {
case BC_TRANSACTION:
case BC_REPLY: {
//在內核構建binder_transaction_data結構體
struct binder_transaction_data tr;
//將用戶空間數據拷貝到內核空間
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
//這里cmd == BC_REPLY為true
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
}
}
cmd為BC_REPLY 將結果拷貝到內核曼玩,執(zhí)行binder_transaction函數
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
struct list_head *target_list;
wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
if (reply) {
in_reply_to = thread->transaction_stack;//先取出servicemanager線程的事務棧的棧頂事務對象
if (in_reply_to == NULL) {
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_empty_call_stack;
}
//設置事務優(yōu)先級
binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
goto err_bad_call_stack;
}
//這里設置servicemanager進程的線程事務棧為null
thread->transaction_stack = in_reply_to->to_parent;
//根據事務棧獲取目標線程(發(fā)起請求的源線程)
target_thread = in_reply_to->from;
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
proc->pid, thread->pid,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
return_error = BR_FAILED_REPLY;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
//根據target_thread獲取對應的目標進程
target_proc = target_thread->proc;
}
if (target_thread) {
//目標線程存在設置目標線程的todo隊列和wait隊列
e->to_thread = target_thread->pid;
target_list = &target_thread->todo;
target_wait = &target_thread->wait;
}
t = kzalloc(sizeof(*t), GFP_KERNEL);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
t->priority = task_nice(current);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
//將用戶空間的數據buffer和偏移數組拷貝到目標進程的內核空間
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
goto err_copy_data_failed;
}
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
//條件1根據前面提到的flat_binder_object的數據類型為BINDER_TYPE_HANDLE
//根據handle值在servicemanager進程中找到之前服務注冊時創(chuàng)建的binder_ref
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
else {
//條件2 不屬于同一進程
struct binder_ref *new_ref;
//需要在目標進程(發(fā)起請求進程)新創(chuàng)建一個binder_ref節(jié)點
new_ref = binder_get_ref_for_node(target_proc, ref->node);
if (new_ref == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
//新創(chuàng)建的binder_ref保存desc值賦值給handle
fp->handle = new_ref->desc;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, new_ref->debug_id,
new_ref->desc, ref->node->debug_id);
}
if (reply) {
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction(target_thread, in_reply_to);
}
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
if (target_wait)
wake_up_interruptible(target_wait);
return;
首先根據目標線程的transaction_stack找到之前發(fā)起請求的事務,設置優(yōu)先級窒百,根據發(fā)起請求的事務獲取請求端線程,在獲取請求端進程和binder_node, 創(chuàng)建新事務t和tcomplete豫尽,封裝事務t篙梢,拷貝目標進程中用戶空間數據到事務所在進程的內核空間中,根據遍歷偏移數組美旧,獲取數據傳輸中的flat_binder_object渤滞,根據handle值在目標進程查找binder_ref節(jié)點,這里因為返回結果所在進程和目標進程不是同一進程榴嗅,驅動重新為目標進程創(chuàng)建該結果的binder_ref節(jié)點妄呕,并且將新的ref值賦值給到fp->handle,這里是回復操作嗽测,從發(fā)起請求端的線程中移除之前的binder事務绪励,將新事務添加到發(fā)起請求端的線程的todo隊列飒焦,tcomplete添加到接受端進程的線程todo隊列庙楚,關于tcomplete的操作不關注了严拒,直接看新事務的處理晴叨,接下來執(zhí)行wake_up_interruptible喚醒發(fā)起請求端的線程todo隊列
5 請求端處理回復數據
在之前請求端進程線程通過talkwithdriver與驅動交互讀取了數據tcomplete驰凛,在處理完后绍昂,因為bwr->readsize>0,所以在執(zhí)行binder_thread_read函數就會在驅動側堵塞該線程鳖悠,當前請求端線程有了新事務要處理拓瞪,所以binder_thread_read不再堵塞繼續(xù)執(zhí)行
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
proc->user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (copy_to_user(ptr, &tr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
}
這里從目標線程的todo隊列獲取binder_work,轉換成新事務官份,在內核創(chuàng)建binder_transaction_data來封裝傳輸的數據只厘,將cmd BR_REPLY和tr拷貝到用戶空間烙丛,因為是BR_REPLY所以這里將內核中事務進行釋放刪除線程todo隊列的事務,表示當次驅動的任務已經完成羔味,數據已經來到發(fā)起請求的進程中
接著返回到IPCThreadState::waitForResponse函數
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
cmd = (uint32_t)mIn.readInt32();
switch (cmd) {
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
}
finish:
if (err != NO_ERROR) {
if (acquireResult) *acquireResult = err;
if (reply) reply->setError(err);
mLastError = err;
}
return err;
}
這里首先從min中讀取cmd河咽,為BR_REPLY,因為reply是存在的,讓reply指向tr中保存的數據介评,接著返回到 sp<IMediaPlayer> create(...)函數
virtual sp<IMediaPlayer> create(
const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId,
const AttributionSourceState& attributionSource) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(client));
data.writeInt32(audioSessionId);
data.writeParcelable(attributionSource);
remote()->transact(CREATE, data, &reply);
return interface_cast<IMediaPlayer>(reply.readStrongBinder());
}
在最后執(zhí)行reply.readStrongBinder()構建bpbinder對象库北,通過interface_cast生成BPMediaPlayer對象
到此關于普通進程間的通信就結束了,大部分與binder-5中進程與servicemanager進程交互方式一致们陆,存在差異點在于普通進程如何讀取驅動數據寒瓦,以及處理驅動數據,binder_transaction函數中獲取目標進程的方式