Binder系列---服务添加过程(3)

84 阅读5分钟

binder_ioctl_write_read

static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    struct binder_proc *proc = filp->private_data;
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;
    //将用户空间bwr结构体拷贝到内核空间
    copy_from_user(&bwr, ubuf, sizeof(bwr));

    if (bwr.write_size > 0) {
        //将数据放入目标进程
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer,
                      bwr.write_size,
                      &bwr.write_consumed);
    }
    if (bwr.read_size > 0) {
        //读取自己队列的数据 
        ret = binder_thread_read(proc, thread, bwr.read_buffer,
             bwr.read_size,
             &bwr.read_consumed,
             filp->f_flags & O_NONBLOCK);
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait);
    }

    //将内核空间bwr结构体拷贝到用户空间
    copy_to_user(ubuf, &bwr, sizeof(bwr));
}  

binder_thread_write

static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
{
    uint32_t cmd;
    void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
    void __user *ptr = buffer + *consumed;
    void __user *end = buffer + size;
    while (ptr < end && thread->return_error == BR_OK) {
        //拷贝用户空间的cmd命令,此时为BC_TRANSACTION
        if (get_user(cmd, (uint32_t __user *)ptr)) -EFAULT;
        ptr += sizeof(uint32_t);
        switch (cmd) {
        case BC_TRANSACTION:
        case BC_REPLY: {
            struct binder_transaction_data tr;
            //拷贝用户空间的binder_transaction_data
            if (copy_from_user(&tr, ptr, sizeof(tr)))   return -EFAULT;
            ptr += sizeof(tr);          
            binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
            break;
        }
    }
    *consumed = ptr - buffer;
  }
  return 0;
} 

binder_transaction

static void binder_transaction(struct binder_proc *proc,
               struct binder_thread *thread,
               struct binder_transaction_data *tr, int reply){
    struct binder_transaction *t;
   	struct binder_work *tcomplete;
    if (reply) {
        ...
    }else {
        if (tr->target.handle) {
            ...
        } else {
            // handle=0则找到servicemanager实体
            target_node = binder_context_mgr_node;
        }
        //target_proc为servicemanager进程
        target_proc = target_node->proc;
    }
    if (target_thread) {
        ...
    } else {
        //找到servicemanager进程的todo队列
        target_list = &target_proc->todo;
        target_wait = &target_proc->wait;
    }

    t = kzalloc(sizeof(*t), GFP_KERNEL);
    tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);

    //非oneway的通信方式,把当前thread保存到transaction的from字段
    if (!reply && !(tr->flags & TF_ONE_WAY))
        t->from = thread;
    else
        t->from = NULL;

    t->to_proc = target_proc; //此次通信目标进程为servicemanager进程
    t->code = tr->code;  //此次通信code = ADD_SERVICE_TRANSACTION
    t->flags = tr->flags;  // 此次通信flags = 0

    //从servicemanager进程中分配buffer
    t->buffer = binder_alloc_buf(target_proc, tr->data_size,
        tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));

    t->buffer->allow_user_free = 0;
    t->buffer->transaction = t;
    t->buffer->target_node = target_node;

    if (target_node)
        binder_inc_node(target_node, 1, 0, NULL); //引用计数加1

    //分别拷贝用户空间的binder_transaction_data中ptr.buffer和ptr.offsets到内核
    copy_from_user(t->buffer->data,
        (const void __user *)(uintptr_t)tr->data.ptr.buffer, tr->data_size);
    copy_from_user(offp,
        (const void __user *)(uintptr_t)tr->data.ptr.offsets, tr->offsets_size);

    off_end = (void *)offp + tr->offsets_size;

    for (; offp < off_end; offp++) {
        struct flat_binder_object *fp;
        fp = (struct flat_binder_object *)(t->buffer->data + *offp);
        off_min = *offp + sizeof(struct flat_binder_object);
        switch (fp->type) {
            case BINDER_TYPE_BINDER:
            case BINDER_TYPE_WEAK_BINDER: {
              struct binder_ref *ref;
              // 从binder_proc来根据binder指针ptr值,查询相应的binder_node。
              struct binder_node *node = binder_get_node(proc, fp->binder);
              if (node == NULL) {
                //服务所在进程 创建binder_node实体。1.红黑树位置查找 2.给新创建的binder_node 分配内核空间
				//3.将新创建的node添加到proc红黑树. 
                node = binder_new_node(proc, fp->binder, fp->cookie);
              }
              //servicemanager进程binder_ref
              ref = binder_get_ref_for_node(target_proc, node);
              ...
              //调整type为HANDLE类型
              if (fp->type == BINDER_TYPE_BINDER)
                fp->type = BINDER_TYPE_HANDLE;
              else
                fp->type = BINDER_TYPE_WEAK_HANDLE;
              fp->binder = 0;
              fp->handle = ref->desc; //设置handle值
              fp->cookie = 0;
              binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
                       &thread->todo);
            } break;
            case :...
    }

    if (reply) {
        ..
    } else if (!(t->flags & TF_ONE_WAY)) {
        //BC_TRANSACTION 且 非oneway,则设置事务栈信息
        t->need_reply = 1;
        t->from_parent = thread->transaction_stack;
        thread->transaction_stack = t;
    } 
    //将BINDER_WORK_TRANSACTION添加到目标队列,本次通信的目标队列为target_proc->todo
    t->work.type = BINDER_WORK_TRANSACTION;
    list_add_tail(&t->work.entry, target_list);

    //将BINDER_WORK_TRANSACTION_COMPLETE添加到当前线程的todo队列
    tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
    list_add_tail(&tcomplete->entry, &thread->todo);

    //唤醒等待队列,本次通信的目标队列为target_proc->wait
    if (target_wait)
        wake_up_interruptible(target_wait);
    return;
}

binder_get_node从binder_proc来根据binder指针ptr值,查询相应的binder_node。 binder_get_ref_for_node中handle值计算方法规律:

  • 每个进程binder_proc所记录的binder_ref的handle值是从1开始递增的;
  • 所有进程binder_proc所记录的handle=0的binder_ref都指向service manager;
  • 同一个服务的binder_node在不同进程的binder_ref的handle值可以不同;

注册服务的过程,传递的是BBinder对象,故writeStrongBinder()过程中localBinder不为空, 从而flat_binder_object.type等于BINDER_TYPE_BINDER。

服务注册过程是在服务所在进程创建binder_node,在servicemanager进程创建binder_ref。 对于同一个binder_node,每个进程只会创建一个binder_ref对象。

向servicemanager的binder_proc->todo添加BINDER_WORK_TRANSACTION事务,接下来进入ServiceManager进程。

四. ServiceManager

由启动ServiceManager已介绍其原理,循环在binder_loop()过程, 会调用binder_parse()方法。

binder_parse

int binder_parse(struct binder_state *bs, struct binder_io *bio, uintptr_t ptr, size_t size, binder_handler func) {
    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        switch(cmd) {
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if (func) {        
                bio_init_from_txn(&msg, txn); //从txn解析出binder_io信息
                 // 收到Binder事务 svcmgr_handler
                res = func(bs, txn, &msg, &reply);
                // 发送reply事件 binder_write进入binder驱动后,将BC_FREE_BUFFER和BC_REPLY命令协议发送给Binder驱动, 向client端发送reply.
                binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
            }
            break;
        }
    }
    return r;
}

svcmgr_handler

int svcmgr_handler(struct binder_state *bs, struct binder_transaction_data *txn, struct binder_io *msg, struct binder_io *reply) {
    switch(txn->code) {
      case SVC_MGR_ADD_SERVICE:
          s = bio_get_string16(msg, &len);
          handle = bio_get_ref(msg); //获取handle
          allow_isolated = bio_get_uint32(msg) ? 1 : 0;
           //注册指定服务 
          if (do_add_service(bs, s, len, handle, txn->sender_euid,
              allow_isolated, txn->sender_pid))
              return -1;
          break;
    }
    return 0;
}

# Binder系列---服务添加过程(1)

# Binder系列---服务添加过程(2)

# Binder系列---服务添加过程(3)

# Binder系列---服务添加过程(4)