在之所以需要進程通訊,內核空間的地址空間是所有進程共享的抒巢,但是用戶空間使用的是虛擬地址空間(都有4GB空間)贫贝,不能共享,因此需要類似于binder這樣的驅動蛉谜,讓數據在內核中逛一圈
Binder框架定義了四個角色:Server稚晚,Client,ServiceManager(以后簡稱SMgr)以及Binder驅動型诚。其中Server客燕,Client,SMgr運行于用戶空間狰贯,驅動運行于內核空間也搓。這四個角色的關系和互聯網類似:Server是服務器,Client是客戶終端涵紊,SMgr是域名服務器(DNS)傍妒,驅動是路由器。
Binder優(yōu)點
一次拷貝:只需要通過copy_from_user將client的拷貝到提前mmap好的內存上摸柄,同時也映射進入了server端颤练,因此server可以直接訪問數據,避免了一次copy_to_user
安全性:傳統(tǒng)IPC只能由用戶在數據包里填入UID/PID塘幅,而binder的uid是在驅動中自動填充昔案,并且支持支持實名Binder也支持匿名Binder,安全性高
只提供ioctl接口电媳,不提供write和read踏揣,ioctl一次操作可以先寫后讀
ServiceManager(DNS)
ServiceManager啟動
// service_manager.c
int main()
{
struct binder_state *bs;
bs = binder_open(128*1024); // 打開binder驅動并開辟了128k的虛擬空間
if (binder_become_context_manager(bs)) {
ALOGE("cannot become context manager (%s)\n", strerror(errno));
return -1;
}
selinux_enabled = is_selinux_enabled();
sehandle = selinux_android_service_context_handle();
selinux_status_open(true);
union selinux_callback cb;
cb.func_audit = audit_callback;
selinux_set_callback(SELINUX_CB_AUDIT, cb);
cb.func_log = selinux_log_callback;
selinux_set_callback(SELINUX_CB_LOG, cb);
binder_loop(bs, svcmgr_handler); // 開始loop循環(huán),等待其他進程來addService/ findService..
return 0;
}
binder_open打開/dev/binder 驅動匾乓,并映射到ServiceManager進程
// binder.c
struct binder_state *binder_open(size_t mapsize) {
struct binder_state *bs;
struct binder_version vers;
bs = malloc(sizeof(*bs));
bs->fd = open("/dev/binder", O_RDWR | O_CLOEXEC);
bs->mapsize = mapsize;
bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); // 只開辟了128k的虛擬空間
return bs;
}
addService
添加一個service的時候調用
// IServiceManager.cpp
virtual status_t addService(const String16& name, const sp<IBinder>& service, bool allowIsolated)
{
Parcel data, reply;
data.writeInterfaceToken(IServiceManager::getInterfaceDescriptor());
data.writeString16(name);
data.writeStrongBinder(service);
data.writeInt32(allowIsolated ? 1 : 0);
status_t err = remote()->transact(ADD_SERVICE_TRANSACTION, data, &reply);
return err == NO_ERROR ? reply.readExceptionCode() : err;
}
然后調用BpBinder的transact
BpBinder.cpp
status_t BpBinder::transact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
// Once a binder has died, it will never come back to life.
if (mAlive) {
status_t status = IPCThreadState::self()->transact(
mHandle, code, data, reply, flags); // 這里的mHandle其實是0(也即是serviceManager的binder handle),在defaultServiceManager()中創(chuàng)建的BpBinder捞稿,并傳入的0
if (status == DEAD_OBJECT) mAlive = 0;
return status;
}
return DEAD_OBJECT;
}
然后進入IPCThreadState的transact
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
if (err == NO_ERROR) {
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL); //j將數據寫入mOut,存數據的結構為binder_transaction_data, 在waitForResponse中的talkWithDriver真正下發(fā)娱局,注意BC_TRANSACTION彰亥,這個字段會在binder驅動中解析使用
}
if ((flags & TF_ONE_WAY) == 0) {
if (reply) {
err = waitForResponse(reply); // 開始向驅動寫數據,并一直等到回復(binder是同步的)
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
將用戶數據想寫入mOut中暫存
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
binder_transaction_data tr;
tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
tr.target.handle = handle;
tr.code = code;
tr.flags = binderFlags;
tr.cookie = 0;
tr.sender_pid = 0;
tr.sender_euid = 0;
const status_t err = data.errorCheck();
if (err == NO_ERROR) {
tr.data_size = data.ipcDataSize();
tr.data.ptr.buffer = data.ipcData();
tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
tr.data.ptr.offsets = data.ipcObjects();
}
// 非常重要
mOut.writeInt32(cmd); // cmd傳入driver中用于解析做什么操作衰齐,此時為BC_TRANSACTION
mOut.write(&tr, sizeof(tr)); // 對應driver中bwr.write_buffer
return NO_ERROR;
}
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult) {
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break; // 向調用binder的ioctl進行寫數據任斋,讀回復
err = mIn.errorCheck();
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
switch (cmd) { // 根據回復類型
case BR_TRANSACTION_COMPLETE:
if (!reply && !acquireResult) goto finish;
break;
case BR_DEAD_REPLY:
err = DEAD_OBJECT;
goto finish;
case BR_FAILED_REPLY:
err = FAILED_TRANSACTION;
goto finish;
case BR_ACQUIRE_RESULT:
{
ALOG_ASSERT(acquireResult != NULL, "Unexpected brACQUIRE_RESULT");
const int32_t result = mIn.readInt32();
if (!acquireResult) continue;
*acquireResult = result ? NO_ERROR : INVALID_OPERATION;
}
goto finish;
case BR_REPLY:
{
binder_transaction_data tr;
err = mIn.read(&tr, sizeof(tr));
ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
if (err != NO_ERROR) goto finish;
if (reply) {
if ((tr.flags & TF_STATUS_CODE) == 0) {
reply->ipcSetDataReference(
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t),
freeBuffer, this);
} else {
err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
}
} else {
freeBuffer(NULL,
reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
tr.data_size,
reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
tr.offsets_size/sizeof(binder_size_t), this);
continue;
}
}
goto finish;
default:
err = executeCommand(cmd);
if (err != NO_ERROR) goto finish;
break;
}
}
return err;
}
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data(); // 對應driver中bwr.write_buffer
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(__ANDROID__)
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
} while (err == -EINTR);
}
進入driver中,由于只是寫因此進入binder_thread_write而不是binder_thread_read
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed) {
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr)) // 解析得到用戶空間mOut中的cmd BC_TRANSACTION
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
binder_stats.bc[_IOC_NR(cmd)]++;
proc->stats.bc[_IOC_NR(cmd)]++;
thread->stats.bc[_IOC_NR(cmd)]++;
}
switch (cmd) {
....
case BC_TRANSACTION: // 這和用戶空間writeTransactionData的cmd一致
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr))) // 猜測是開始從用戶空間拷貝數據到內核空間耻涛,也即是binder中一次拷貝地方废酷,拷貝的數據也就是mOut中的binder_transaction_data
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY); // 開始transaction
break;
}
}
}
未完待續(xù)
參考
Android Bander設計與實現 - 設計篇 (理解很透徹)