Binder作爲Android系統的主要進程間通信方式,在驅動層不會有開發的需求,我們主要還是瞭解下binder在驅動層的運行原理;要想掌握linux驅動開發就要理解linux驅動模型,同樣道理,要想理解android系統,那麼就要深入瞭解binder機制,這樣對於以後學習android就會胸有成竹
Binder驅動註冊
binder代碼文件:drivers/android/binder.c
static int __init binder_init(void)
{
......
ret = misc_register(&binder_miscdev);--------binder註冊
......
}
static const struct file_operations binder_fops = {
.owner = THIS_MODULE,
.poll = binder_poll,
.unlocked_ioctl = binder_ioctl,
.compat_ioctl = binder_ioctl,------------------binder主要的交互接口
.mmap = binder_mmap,------------------------用戶層和內核層內存映射接口
.open = binder_open,
.flush = binder_flush,
.release = binder_release,
};
static struct miscdevice binder_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "binder",
.fops = &binder_fops
};
binder註冊成功後,用戶層就可以通過open/mmap/ioctl進行操作,接下來的內容圍繞這幾個問題展開:
- binder如何進行進程間通信
- binder的數據是如何封裝的
- binder內存映射
Binder通信流程(binder_ioctl)
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
...
thread = binder_get_thread(proc);----得到當前線程
...
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, cmd, arg, thread);-----讀寫數據
...
}
...
}
static int binder_ioctl_write_read(struct file *filp,
unsigned int cmd, unsigned long arg,
struct binder_thread *thread)
{
...
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
if (copy_from_user(&bwr, ubuf, sizeof(bwr)))---------從用戶層拷貝binder_write_read結構體
if (bwr.write_size > 0) {
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);------------------寫數據
...
}
if (bwr.read_size > 0) {
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);-----------讀數據
...
}
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))--------------向用戶層拷貝binder_write_read結構體
...
out:
return ret;
}
- binder_thread_write
我們只關心數據傳輸的部分
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
...
switch (cmd) {
...
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);-----數據傳輸重要的函數入口
break;
}
...
}
}
}
- binder_thread_read
binder_transaction
下面來分析一個典型的數據傳輸流程
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
...
if (reply) {
...
} else {
if (tr->target.handle) {
struct binder_ref *ref;
ref = binder_get_ref(proc, tr->target.handle);
...
target_node = ref->node;--------------------非service manager
} else {
target_node = binder_context_mgr_node;------service manager
...
}
target_proc = target_node->proc;-----------獲得傳輸的目標進程
...
}
if (target_thread) {
...
} else {
target_list = &target_proc->todo;---------獲得目標進程的todo鏈表
target_wait = &target_proc->wait;
}
...
t = kzalloc(sizeof(*t), GFP_KERNEL);-------------下面就是爲了把t各個字段賦值
...
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
...
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;----------------------源線程
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;------------目標進程
t->to_thread = target_thread;--------目標線程
t->code = tr->code;------------------用戶數據請求碼
t->flags = tr->flags;----------------用戶數據flags
t->priority = task_nice(current);----當前線程優先級
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));-----根據用戶數據請求相應buffer
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
...
offp = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {---------copy用戶的數據到t->buffer->data
...
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size)) {-----copy用戶offsets的數據到offp,指示偏移數據
...
}
off_end = (void *)offp + tr->offsets_size;
off_min = 0;
//對特殊數據進行處理
for (; offp < off_end; offp++) {
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
*offp < off_min ||
t->buffer->data_size < sizeof(*fp) ||
!IS_ALIGNED(*offp, sizeof(u32))) {
...
goto err_bad_offset;
}
fp = (struct flat_binder_object *)(t->buffer->data + *offp);
off_min = *offp + sizeof(struct flat_binder_object);
switch (fp->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
...
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
...
} break;
case BINDER_TYPE_FD: {
int target_fd;
struct file *file;
...
file = fget(fp->handle);
...
if (security_binder_transfer_file(proc->tsk,
target_proc->tsk,
file) < 0) {
...
}
target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
...
task_fd_install(target_proc, target_fd, file);
...
fp->handle = target_fd;
} break;
...
}
}
...
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);-------------加入目標進程鏈表
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);--------加入本線程鏈表,表示數據傳輸完成
....
}
binder中進程線程和transaction各個結構體的關係如下圖
binder數據封裝描述
總結下用戶層封裝的數據格式