我们基于kernel6.1源码分析
当客户端进程发起binder请求向服务进程传递数据时,客户端获取到BpBinder发起跨进程调用,调用链如下
BpBinder::transact
IPCThreadState::transact
IPCThreadState::waitForResponse
IPCThreadState::talkWithDriver
IPCThreadState::ioctlioctl函数传入的cmd是 BINDER_WRITE_READ
ioctl函数陷入内核后的调用链如下:
//kernel-6.1/drivers/android/binder.c
binder_ioctl
binder_ioctl_write_read
binder_thread_write
binder_transaction
binder驱动中一次拷贝的逻辑是
通过binder_mmap分别为用户空间和内核空间申请一段相同大小的虚拟内存地址
binder_transaction拷贝数据时调用binder_alloc_new_buf申请物理内存页面
将binder_mmap申请的内核和用户空间内存地址都域映射到物理内存
将客户端用户空间的数据拷贝到内核内存区域
由于内核和用户空间映射到同一块物理内存区域,所以服务端进程的用户空间可以直接访问到数据,不再需要copy_to_user,这部分属于跨进程的核心内容,由函数binder_transaction实现
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
struct binder_transaction *t;
...
//申请物理内存
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY), current->tgid);
...
//将数据从用户空间拷贝到内核空间
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
t->buffer,
ALIGN(tr->data_size, sizeof(void *)),
(const void __user *)
(uintptr_t)tr->data.ptr.offsets,
tr->offsets_size)) {...}
...
}binder_alloc_new_buf函数直接调用了binder_alloc_new_buf_locked
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
size_t extra_buffers_size,
int is_async,
int pid)
{
struct rb_node *n = alloc->free_buffers.rb_node;
struct binder_buffer *buffer;
size_t buffer_size;
...
//1.数据对齐
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
/* Pad 0-size buffers so they get assigned unique addresses */
size = max(size, sizeof(void *));
//2
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
if (size < buffer_size) {
best_fit = n;
n = n->rb_left;
} else if (size > buffer_size)
n = n->rb_right;
else {
best_fit = n;
break;
}
}
...
//3
has_page_addr = (void __user *)
(((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
WARN_ON(n && buffer_size != size);
end_page_addr =
(void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
//4
ret = binder_update_page_range(alloc, 1, (void __user *)
PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
buffer->free = 0;
buffer->allow_user_free = 0;
//5
binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
buffer->async_transaction = is_async;
buffer->extra_buffers_size = extra_buffers_size;
buffer->pid = pid;
buffer->oneway_spam_suspect = false;
...
return buffer;
}数据对齐,并且计算size
首在alloc->free_buffers.rb_node这颗红黑树中查找空闲且空间足够大的buffer
计算我们内存对应的页面
执行
binder_update_page_range方法将使用的Buffer插入到
allocated_buffers中
上面并没有看到申请内存的函数,我们继续看binder_update_page_range
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
void __user *start, void __user *end)
{
void __user *page_addr;
bool need_mm = false;
// 1
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
if (!page->page_ptr) {
need_mm = true;
break;
}
}
// 2
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
bool on_lru;
size_t index;
index = (page_addr - alloc->buffer) / PAGE_SIZE;
page = &alloc->pages[index];
//3
if (page->page_ptr) {
continue;
}
//4
page->page_ptr = alloc_page(GFP_KERNEL |
__GFP_HIGHMEM |
__GFP_ZERO);
page->alloc = alloc;
INIT_LIST_HEAD(&page->lru);
user_page_addr = (uintptr_t)page_addr;
//5
ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
}
}判断是否需要申请页面内存
判断物理Page是否存在
页面存在
申请页面
将申请的page插入到binder_alloc->vma中
binder_alloc持有vma、binder_buffer的空闲列表以及在使用中的列表,还有物理页面,它持有了申请内存相关的数据,binder_buffer持有当次通信所需的数据地址以及数据大小.
binder_alloc_new_buf_locked 最重要的是申请binder_buffer,以及申请物理Page。
复制数据
binder_alloc_copy_user_to_buffer负责将数据拷贝到上班申请的物理地址
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer,
binder_size_t buffer_offset,
const void __user *from,
size_t bytes)
{
if (!check_buffer(alloc, buffer, buffer_offset, bytes))
return bytes;
while (bytes) {
unsigned long size;
unsigned long ret;
struct page *page;
pgoff_t pgoff;
void *kptr;
//1
page = binder_alloc_get_page(alloc, buffer,
buffer_offset, &pgoff);
size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
//2
kptr = kmap_local_page(page) + pgoff;
//3
ret = copy_from_user(kptr, from, size);
//4
kunmap_local(kptr);
if (ret)
return bytes - size + ret;
bytes -= size;
from += size;
buffer_offset += size;
}
return 0;
}拿到我们之前申请到的物理页面
拿到一个临时指向物理内存的内核虚拟内存地址指针ptr
将数据从用户空间拷贝到内核空间
释放临时地址
参考链接
Binder驱动 binder_transaction一次拷贝的核心实现: https://juejin.cn/post/7266417942182821947