透彻!android binder机制原来是这样的

1,433 阅读17分钟

文章开始前我们先看一张神图:

1684068402327.png

刚看到这张图的时候我有点懵,怎么在内核空间有两个缓存区,是不是有点太奢侈了。原来是这样的:android为了保障系统的安全与稳定,图中的数据发送进程不会与数据接收缓存区直接建立映射,而是把数据交给binder驱动后让其与数据接收进程沟通。所以原本该放到数据发送进程用户空间的内核缓存区放到了内核空间的binder中。

那么你可能又问,为什么不直接让数据接收进程的内存直接映射到内核缓存区呢?大哥,内核缓存区的是虚拟地址,用户空间的也是虚拟地址,他们只能映射到物理地址上,所以会有一块儿数据接收缓存区这样的物理地址让他俩去映射,这下明白了吧!

为了讲清楚binder机制我以VibratorService为例把它拆成三个步骤: 一、ServiceManager管家;二、注册VibratorService;三、获取VibratorService

注:以下内容的源码基于android8.0

在开始前我们要先看下android系统启动流程:

android启动流程.jpg

一、ServiceManager管家

init进程根据servicemanager.rc启动了ServiceManager进程:

service servicemanager /system/bin/servicemanager
    class core animation
    user system
    group system readproc
    critical
    ...

我们来到service_manager.c的main方法:

frameworks/native/cmds/servicemanager/service_manager.c

#include "binder.h"

int main(int argc, char** argv)
{
    struct binder_state *bs;
    union selinux_callback cb;
    char *driver;

    if (argc > 1) {
        driver = argv[1];
    } else {
        driver = "/dev/binder";  // binder驱动文件目录
    }

    // step1、打开binder驱动文件 bs是binder_state
    bs = binder_open(driver, 128*1024);
    
    ...

    // step2、注册成为binder驱动的管家
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }

    ...
    
    // step3、进入循环,处理client端发来的请求(ServiceManager作为服务端)
    binder_loop(bs, svcmgr_handler);

    return 0;
}

step1:我们进入binder_open方法(binder_open方法在binder.c中,注意这个binder.c不是我们的binder驱动文件,什么?binder驱动文件又是什么?我们下文会讲):

frameworks/native/cmds/servicemanager/binder.c

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));
    if (!bs) {
        errno = ENOMEM;
        return NULL;
    }

    // 系统调用,打开binder驱动 drive="/dev/binder"  bs是binder_state
    bs->fd = open(driver, O_RDWR | O_CLOEXEC);
    if (bs->fd < 0) {
        fprintf(stderr,"binder: cannot open %s (%s)\n",
                driver, strerror(errno));
        goto fail_open;
    }

    if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||
        (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
        fprintf(stderr,
                "binder: kernel driver version (%d) differs from user space version (%d)\n",
                vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);
        goto fail_open;
    }

    bs->mapsize = mapsize;
    
    // mmap就是映射,在这里分配内存空间和建立映射
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    if (bs->mapped == MAP_FAILED) {
        fprintf(stderr,"binder: cannot map device (%s)\n",
                strerror(errno));
        goto fail_map;
    }

    return bs;

fail_map:
    close(bs->fd);
fail_open:
    free(bs);
    return NULL;
}

binder驱动是个文件,Linux万物皆文件嘛,它的路径在手机的dev目录下:

1684156757995.png

它区别于普通文件,对驱动文件做某些操作时都会调驱动对应的方法,映射关系如下:

static const struct file_operations binder_fops = {
	.owner = THIS_MODULE,
	.poll = binder_poll,
	.unlocked_ioctl = binder_ioctl,
	.compat_ioctl = binder_ioctl,
	.mmap = binder_mmap,
	.open = binder_open,
	.flush = binder_flush,
	.release = binder_release,
};

所以我们在调用open(driver, O_RDWR | O_CLOEXEC)时,就调用了binder驱动的binder_open方法。 我们进入驱动文件binder.c的binder_open方法(链接:android.googlesource.com/kernel/msm/…

static int binder_open(struct inode *nodp, struct file *filp)
{
    // 创建了binder_proc
    struct binder_proc *proc;
    struct binder_device *binder_dev;
    binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
                 current->group_leader->pid, current->pid);
    // kzalloc 分配内核空间给binder_proc
    proc = kzalloc(sizeof(*proc), GFP_KERNEL);
    if (proc == NULL)
            return -ENOMEM;
    get_task_struct(current->group_leader);
    proc->tsk = current->group_leader;
    
    // 初始化binder_proc的todo列表
    INIT_LIST_HEAD(&proc->todo);
    init_waitqueue_head(&proc->wait);
    proc->default_priority = task_nice(current);
    binder_dev = container_of(filp->private_data, struct binder_device,
                              miscdev);
    proc->context = &binder_dev->context;
    
    //加锁
    binder_lock(__func__);
    binder_stats_created(BINDER_STAT_PROC);
    
    // 把当前binder_proc加入binder驱动的binder_procs列表
    hlist_add_head(&proc->proc_node, &binder_procs);
    proc->pid = current->group_leader->pid;
    INIT_LIST_HEAD(&proc->delivered_death);
    filp->private_data = proc;
    binder_unlock(__func__);
    
    ...
    
    return 0;
}

这里主要创建了binder_proc,初始化了todo列表,并把binder_proc赋值给filp->private_data

frameworks/native/cmds/servicemanager/binder.c

struct binder_state *binder_open(const char* driver, size_t mapsize)
{
    struct binder_state *bs;
    struct binder_version vers;

    bs = malloc(sizeof(*bs));
    ...

    // 打开binder驱动 drive="/dev/binder"  bs是binder_state
    bs->fd = open(driver, O_RDWR | O_CLOEXEC);
    ...

    bs->mapsize = mapsize;
    
    // mmap就是映射,在这里分配内存空间和建立映射
    bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
    ...
}

open完之后进入mmap方法,由上面binder_fops的映射关系我们进入binder驱动的binder_mmap方法:

static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
	int ret;
	struct vm_struct *area;
        
        //拿到我们open时创建的binder_proc
	struct binder_proc *proc = filp->private_data;
	const char *failure_string;
	struct binder_buffer *buffer;
	if (proc->tsk != current->group_leader)
		return -EINVAL;
                
        // 限制映射区域不能大于4M,也就是binder最大传输容量不能超过4M
	if ((vma->vm_end - vma->vm_start) > SZ_4M)
		vma->vm_end = vma->vm_start + SZ_4M;
	...
        
	mutex_lock(&binder_mmap_lock);
	if (proc->buffer) {
		ret = -EBUSY;
		failure_string = "already mapped";
		goto err_already_mapped;
	}
        
        // 为binder内核区分配和用户空间vma一样大小的虚拟空间
	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
	if (area == NULL) {
		ret = -ENOMEM;
		failure_string = "get_vm_area";
		goto err_get_vm_area_failed;
	}
        // 把这块儿区域赋值给binder_proc->buffer
	proc->buffer = area->addr;
	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
	mutex_unlock(&binder_mmap_lock);
        ...

	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
	if (proc->pages == NULL) {
		ret = -ENOMEM;
		failure_string = "alloc page array";
		goto err_alloc_pages_failed;
	}
	proc->buffer_size = vma->vm_end - vma->vm_start;
	vma->vm_ops = &binder_vm_ops;
	vma->vm_private_data = proc;
	/* binder_update_page_range assumes preemption is disabled */
	preempt_disable();
        
        // 为用户空间vma和内核空间buffer在内核空间分配物理地址并建立映射
	ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
	...
        
	proc->free_async_space = proc->buffer_size / 2;
	barrier();
	proc->files = get_files_struct(current);
        
        //将vma赋值给 binder_proc->vma
	proc->vma = vma;
	proc->vma_vm_mm = vma->vm_mm;
	/*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
	return 0;
        ...
}

在binder_mmap方法中,拿到之前binder_open创建的binder_proc,为binder内核分配了一块儿和用户空间一样大小的虚拟空间,并把他们映射到真正的物理空间上,相关数据赋值给binder_proc,可见binder_proc是非常重要的,他是我们当前进程(ServiceManager)在binder驱动里的代表。

int main(int argc, char** argv)
{

    // step1、打开binder驱动文件 bs是binder_state
    bs = binder_open(driver, 128*1024);
    ...

    // step2、注册成为binder驱动的管家
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
    ...
    
    // step3、进入循环,处理client端发来的请求(ServiceManager作为服务端)
    binder_loop(bs, svcmgr_handler);
    
    return 0;
}

step2:binder_become_context_manager 注册成为binder驱动的管家,这里忽略

step3:binder_loop 进入循环,处理client端发来的请求(ServiceManager作为服务端)

frameworks/native/cmds/servicemanager/binder.c

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;
    struct binder_write_read bwr;
    uint32_t readbuf[32];

    bwr.write_size = 0;
    bwr.write_consumed = 0;
    bwr.write_buffer = 0;

    readbuf[0] = BC_ENTER_LOOPER;
    binder_write(bs, readbuf, sizeof(uint32_t));

    for (;;) {
        bwr.read_size = sizeof(readbuf);
        bwr.read_consumed = 0;
        bwr.read_buffer = (uintptr_t) readbuf;

        // ioctl会调到binder驱动的binder_ioctl方法
        res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);

        if (res < 0) {
            ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));
            break;
        }
        
        // 处理从binder驱动里拿到的数据
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        if (res == 0) {
            ALOGE("binder_loop: unexpected reply?!\n");
            break;
        }
        if (res < 0) {
            ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));
            break;
        }
    }
}

在binder_loop方法里开启了一个无限循环,通过ioctl不停的从binder驱动拿数据,拿到数据后交给binder_parse方法处理。

好了,ServiceManager先讲到这里。

二、注册VibratorService

段落开始前先看下binder通信的整体流程:

dadacasca.jpg

init的进程不仅启动了ServiceManager进程还启动了Zygote进程,Zygote进程又fork出SystemServer进程,我们的VibratorService就是SystemServer进程的一个服务,里面还有大名鼎鼎的WindowManagerService、ActivityManagerService等。

frameworks/base/services/java/com/android/server/SystemServer.java

private void startOtherServices() {
    ...
    vibrator = new VibratorService(context); 
    ServiceManager.addService("vibrator", vibrator);
    ...
}

这里创建了VibratorService,并注册到了ServiceManager中,这里的ServiceManager就是Client,我们进入ServiceManager的addService方法:

public static void addService(String name, IBinder service) {
    try {
        getIServiceManager().addService(name, service, false);
    } catch (RemoteException e) {
        Log.e(TAG, "error in addService", e);
    }
}

 private static IServiceManager getIServiceManager() {
    if (sServiceManager != null) {
        return sServiceManager;
    }

    // Find the service manager
    sServiceManager = ServiceManagerNative
            .asInterface(Binder.allowBlocking(BinderInternal.getContextObject()));
    return sServiceManager;
}


frameworks/base/core/java/android/os/ServiceManagerNative.java

static public IServiceManager asInterface(IBinder obj) {
    if (obj == null) {
        return null;
    }
    IServiceManager in =
        (IServiceManager)obj.queryLocalInterface(descriptor);
    if (in != null) {
        return in;
    }
    
    // 创建ServiceManagerProxy对象
    return new ServiceManagerProxy(obj);
}

这里的ServiceManagerProxy就是BinderProxy,创建ServiceManagerProxy的时候传入了BinderInternal.getContextObject(),我们进去看下:

public class BinderInternal {
    ...
    public static final native IBinder getContextObject();
    ...
}

frameworks/base/core/jni/android_util_Binder.cpp

static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
    // ProcessState是个单例,一个进程里只有一个
    sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
    return javaObjectForIBinder(env, b);
}


frameworks/native/libs/binder/ProcessState.cpp

sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/) { 
    // 传入的handle是0
    return getStrongProxyForHandle(0); 
}

sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);

    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
        // We need to create a new BpBinder if there isn't currently one, OR we
        // are unable to acquire a weak reference on this current one.  See comment
        // in getWeakProxyForHandle() for more info about this.
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            if (handle == 0) {
                ...

                Parcel data;
                status_t status = IPCThreadState::self()->transact(
                        0, IBinder::PING_TRANSACTION, data, NULL, 0);
                if (status == DEAD_OBJECT)
                   return NULL;
            }
            
            // 在native层是BpBinder
            b = new BpBinder(handle); 
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            ...
        }
    }

    return result;
}

最终返回的是new BpBinder(handle),这里的handle是0,getContextObject顾名思义获取上下文对象,ServiceManager进程在创建的时候就通过binder_become_context_manager方法设置自己为binder驱动的上下文管理者。

所以相当于new ServiceManagerProxy(BpBinder(handle = 0))(我这里说的相当于是因为BinderManagerProxy是Java层,而BpBinder是Native层的)

class ServiceManagerProxy implements IServiceManager {
    public ServiceManagerProxy(IBinder remote) {
        mRemote = remote;
    }
    ...
}

把BpBinder赋值给mRemote,好了Client、BinderProxy、BpBinder都有了,我们接下来看addService做了什么:

public static void addService(String name, IBinder service) {
    try {
        // getIServiceManager() 就是 ServiceManagerProxy
        getIServiceManager().addService(name, service, false);
    } catch (RemoteException e) {
        Log.e(TAG, "error in addService", e);
    }
}

class ServiceManagerProxy implements IServiceManager {
    ...
    public void addService(String name, IBinder service, boolean allowIsolated)
            throws RemoteException {
        Parcel data = Parcel.obtain();
        Parcel reply = Parcel.obtain();
        data.writeInterfaceToken(IServiceManager.descriptor);
        data.writeString(name);
        data.writeStrongBinder(service);
        data.writeInt(allowIsolated ? 1 : 0);
        // mRemote就是BpBinder
        mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
        reply.recycle();
        data.recycle();
    }
    ...
}

getIServiceManager()就是ServiceManagerProxy,在其addService中把数据封装到Pacel之后调用了mRemote的transact方法,mRemote就是我们的BpBinder,我们进入BpBinder的transact方法:

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}

又出现一个非常重要的类IPCThreadState,它是线程私有的,创建好之后会被存于ThreadLocal中,同样也是单例,每个线程只能有一个IPCThreadState,感兴趣的同学可以进入它的self方法看下。我们进入IPCThreadState的transact方法:

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck();
    ...

    if (err == NO_ERROR) {
        LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
            (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
        // 进入writeTransactionData方法
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }

    if (err != NO_ERROR) {
        if (reply) reply->setError(err);
        return (mLastError = err);
    }

    if ((flags & TF_ONE_WAY) == 0) { // 非one_way
        ...
        // 等待回复
        if (reply) {
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
        ...
    } else {
        err = waitForResponse(NULL, NULL);
    }

    return err;
}

one_way:顾名思义就是一条道走到黑,不需要回复。我们这里是非one_way的,需要回复的。进入writeTransactionData方法:

status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    // 进一步封装成binder_transaction_data,顾名思义binder传输data
    binder_transaction_data tr;

    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
    tr.target.handle = handle;
    tr.code = code;
    tr.flags = binderFlags;
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();
        tr.data.ptr.buffer = data.ipcData();
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t);
        tr.data.ptr.offsets = data.ipcObjects();
    } else if (statusBuffer) {
        tr.flags |= TF_STATUS_CODE;
        *statusBuffer = err;
        tr.data_size = sizeof(status_t);
        tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
        tr.offsets_size = 0;
        tr.data.ptr.offsets = 0;
    } else {
        return (mLastError = err);
    }

    // 跟cmd(BC_TRANSACTION)一起写入Parcel中,mOut是IPCThreadState的全局变量,对应的还有个mIn
    mOut.writeInt32(cmd);
    mOut.write(&tr, sizeof(tr));

    return NO_ERROR;
}

writeTransactionData把数据进一步封装成binder_transaction_data跟随cmd(BC_TRANSACTION)一起写入mOut中。再看下waitForResponse方法:

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;

    while (1) {
        // talkWithDriver:跟binder驱动对话
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        ...
    }
    ...
}

talkWithDriver:跟binder驱动通话,这里起了个while循环,不停从binder驱动取数据。看下talkWithDriver方法:

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    if (mProcess->mDriverFD <= 0) {
        return -EBADF;
    }

    binder_write_read bwr;

    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();

    // We don't want to write anything if we are still reading
    // from data left in the input buffer and the caller
    // has requested to read the next data.
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;

    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
    ...

    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        IF_LOG_COMMANDS() {
            alog << "About to read/write, write size = " << mOut.dataSize() << endl;
        }
#if defined(__ANDROID__)
        // ioctl 跟binder驱动通信
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            err = -errno;
#else
        err = INVALID_OPERATION;
#endif
        if (mProcess->mDriverFD <= 0) {
            err = -EBADF;
        }
        IF_LOG_COMMANDS() {
            alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
        }
    } while (err == -EINTR);

    IF_LOG_COMMANDS() {
        alog << "Our err: " << (void*)(intptr_t)err << ", write consumed: "
            << bwr.write_consumed << " (of " << mOut.dataSize()
                        << "), read consumed: " << bwr.read_consumed << endl;
    }

    if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        IF_LOG_COMMANDS() {
            TextOutput::Bundle _b(alog);
            alog << "Remaining data size: " << mOut.dataSize() << endl;
            alog << "Received commands from driver: " << indent;
            const void* cmds = mIn.data();
            const void* end = mIn.data() + mIn.dataSize();
            alog << HexDump(cmds, mIn.dataSize()) << endl;
            while (cmds < end) cmds = printReturnCommand(alog, cmds);
            alog << dedent;
        }
        return NO_ERROR;
    }

    return err;
}

把mIn和mOut封装到binder_write_read中,通过系统调用ioctl跟binder驱动通信,由上面binder驱动里的映射关系,ioctl binder会调用binder的binder_ioctl方法

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    int ret;
    // 拿到当前进程在binder驱动的代表 binder_proc
    struct binder_proc *proc = filp->private_data;
    struct binder_context *context = proc->context;
    struct binder_thread *thread;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
    trace_binder_ioctl(cmd, arg);
    ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret)
            goto err_unlocked;
    binder_lock(__func__);
    thread = binder_get_thread(proc);
    if (thread == NULL) {
            ret = -ENOMEM;
            goto err;
    }
    switch (cmd) {
    case BINDER_WRITE_READ: {
            struct binder_write_read bwr;
            if (size != sizeof(struct binder_write_read)) {
                    ret = -EINVAL;
                    goto err;
            }
            if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
                    ret = -EFAULT;
                    goto err;
            }
            binder_debug(BINDER_DEBUG_READ_WRITE,
                         "%d:%d write %lld at %016llx, read %lld at %016llx\n",
                         proc->pid, thread->pid,
                         (u64)bwr.write_size, (u64)bwr.write_buffer,
                         (u64)bwr.read_size, (u64)bwr.read_buffer);
            if (bwr.write_size > 0) {
                    // 进入binder_thread_write方法
                    ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
                    trace_binder_write_done(ret);
                    if (ret < 0) {
                            bwr.read_consumed = 0;
                            if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
                                    ret = -EFAULT;
                            goto err;
                    }
            }
            if (bwr.read_size > 0) {
                    ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
                    trace_binder_read_done(ret);
                    if (!list_empty(&proc->todo))
                            wake_up_interruptible(&proc->wait);
                    if (ret < 0) {
                            if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
                                    ret = -EFAULT;
                            goto err;
                    }
            }
            binder_debug(BINDER_DEBUG_READ_WRITE,
                         "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
                         proc->pid, thread->pid,
                         (u64)bwr.write_consumed, (u64)bwr.write_size,
                         (u64)bwr.read_consumed, (u64)bwr.read_size);
            if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
                    ret = -EFAULT;
                    goto err;
            }
            break;
            ...
    }
    ret = 0;
err:
    if (thread)
            thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
    binder_unlock(__func__);
    wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
    if (ret && ret != -ERESTARTSYS)
            pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
    trace_binder_ioctl_done(ret);
    return ret;
}

当前cmd=BINDER_WRITE_READ,binder_write_read的write_size大于0,进入binder_thread_write方法:

static int binder_thread_write(struct binder_proc *proc,
			struct binder_thread *thread,
			binder_uintptr_t binder_buffer, size_t size,
			binder_size_t *consumed)
{
	uint32_t cmd;
	struct binder_context *context = proc->context;
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;
	while (ptr < end && thread->return_error == BR_OK) {
		if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
		trace_binder_command(cmd);
		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
			binder_stats.bc[_IOC_NR(cmd)]++;
			proc->stats.bc[_IOC_NR(cmd)]++;
			thread->stats.bc[_IOC_NR(cmd)]++;
		}
		switch (cmd) {
		...
		
		case BC_TRANSACTION:
		case BC_REPLY: {
			struct binder_transaction_data tr;
                    // copy_from_user 这里只是复制的结构体
			if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr,
					   cmd == BC_REPLY, 0);
			break;
		}
		...
		
	}
	return 0;
}

cmd=BC_TRANSACTION,进入binder_transaction方法:

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
{
	int ret;
	struct binder_transaction *t;
	struct binder_work *tcomplete;
	binder_size_t *offp, *off_end, *off_start;
	binder_size_t off_min;
	u8 *sg_bufp, *sg_buf_end;
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct list_head *target_list;
	wait_queue_head_t *target_wait;
	struct binder_transaction *in_reply_to = NULL;
	struct binder_transaction_log_entry *e;
	uint32_t return_error;
	struct binder_buffer_object *last_fixup_obj = NULL;
	binder_size_t last_fixup_min_off = 0;
	struct binder_context *context = proc->context;
	e = binder_transaction_log_add(&binder_transaction_log);
	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
	e->from_proc = proc->pid;
	e->from_thread = thread->pid;
	e->target_handle = tr->target.handle;
	e->data_size = tr->data_size;
	e->offsets_size = tr->offsets_size;
	e->context_name = proc->context->name;
	if (reply) {
		...
	} else {
		if (tr->target.handle) {
			...
		} else {
			target_node = context->binder_context_mgr_node;
			if (target_node == NULL) {
				return_error = BR_DEAD_REPLY;
				goto err_no_context_mgr_node;
			}
		}
		e->to_node = target_node->debug_id;
		target_proc = target_node->proc;
		if (target_proc == NULL) {
			return_error = BR_DEAD_REPLY;
			goto err_dead_binder;
		}
		if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
			return_error = BR_FAILED_REPLY;
			goto err_invalid_target_handle;
		}
		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
			...
		}
	}
	if (target_thread) {
		e->to_thread = target_thread->pid;
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;
	} else {
		target_list = &target_proc->todo;
		target_wait = &target_proc->wait;
	}
	e->to_proc = target_proc->pid;
	/* TODO: reuse incoming transaction for reply */
	t = kzalloc_preempt_disabled(sizeof(*t));
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_t_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION);
	tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_alloc_tcomplete_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
	t->debug_id = ++binder_last_id;
	e->debug_id = t->debug_id;
	...
	
	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread;
	else
		t->from = NULL;
	t->sender_euid = proc->tsk->cred->euid;
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	t->code = tr->code;
	t->flags = tr->flags;
	t->priority = task_nice(current);
	trace_binder_transaction(reply, t, target_node);
	// 为clint传过来的数据在service和binder共同映射区分配空间
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, extra_buffers_size,
		!reply && (t->flags & TF_ONE_WAY));
	if (t->buffer == NULL) {
		return_error = BR_FAILED_REPLY;
		goto err_binder_alloc_buf_failed;
	}
	t->buffer->allow_user_free = 0;
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t;
	t->buffer->target_node = target_node;
	trace_binder_transaction_alloc_buf(t->buffer);
	if (target_node)
		binder_inc_node(target_node, 1, 0, NULL);
	off_start = (binder_size_t *)(t->buffer->data +
				      ALIGN(tr->data_size, sizeof(void *)));
	offp = off_start;
	// 通过copy_from_user把client用户空间的数据复制到t->buffer
	if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {
		binder_user_error("%d:%d got transaction with invalid data ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {
		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		goto err_copy_data_failed;
	}
	...
	
	off_end = (void *)off_start + tr->offsets_size;
	sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
	sg_buf_end = sg_bufp + extra_buffers_size;
	off_min = 0;
	for (; offp < off_end; offp++) {
		struct binder_object_header *hdr;
		size_t object_size = binder_validate_object(t->buffer, *offp);
		if (object_size == 0 || *offp < off_min) {
			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
					  proc->pid, thread->pid, (u64)*offp,
					  (u64)off_min,
					  (u64)t->buffer->data_size);
			return_error = BR_FAILED_REPLY;
			goto err_bad_offset;
		}
		hdr = (struct binder_object_header *)(t->buffer->data + *offp);
		off_min = *offp + object_size;
		switch (hdr->type) {
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
			struct flat_binder_object *fp;
			fp = to_flat_binder_object(hdr);
			// 进入binder_translate_binder方法
			ret = binder_translate_binder(fp, t, thread);
			if (ret < 0) {
				return_error = BR_FAILED_REPLY;
				goto err_translate_failed;
			}
		} break;
		...
	}
	if (reply) {
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction(target_thread, in_reply_to);
	} else if (!(t->flags & TF_ONE_WAY)) { // 非one_way
		BUG_ON(t->buffer->async_transaction != 0);
		t->need_reply = 1;
		t->from_parent = thread->transaction_stack;
		// 把t赋值给thread->transaction_stack
		thread->transaction_stack = t;
	} else {
		BUG_ON(target_node == NULL);
		BUG_ON(t->buffer->async_transaction != 1);
		if (target_node->has_async_transaction) {
			target_list = &target_node->async_todo;
			target_wait = NULL;
		} else
			target_node->has_async_transaction = 1;
	}
	t->work.type = BINDER_WORK_TRANSACTION;
	// 加入ServiceManager的binder_proc->todo队列
	list_add_tail(&t->work.entry, target_list);
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	// 加入VibratorService的thread->todo队列
	list_add_tail(&tcomplete->entry, &thread->todo);
	if (target_wait) {
		if (reply || !(t->flags & TF_ONE_WAY)) {
			wake_up_interruptible_sync(target_wait);
		}
		else {
			wake_up_interruptible(target_wait);
		}
	}
	return;
	...
}

下面是binder_translate_binder方法:

static int binder_translate_binder(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{
	struct binder_node *node;
	struct binder_ref *ref;
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
	node = binder_get_node(proc, fp->binder);
	if (!node) {
		node = binder_new_node(proc, fp->binder, fp->cookie);
		if (!node)
			return -ENOMEM;
		node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
		node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
	}
	if (fp->cookie != node->cookie) {
		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
				  proc->pid, thread->pid, (u64)fp->binder,
				  node->debug_id, (u64)fp->cookie,
				  (u64)node->cookie);
		return -EINVAL;
	}
	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
		return -EPERM;
        // 从ServiceManager的refs_by_node红黑树拿ref
	ref = binder_get_ref_for_node(target_proc, node);
	if (!ref)
		return -EINVAL;
	if (fp->hdr.type == BINDER_TYPE_BINDER)
            // 更改type为BINDER_TYPE_HANDLE
		fp->hdr.type = BINDER_TYPE_HANDLE;
	else
		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
	fp->binder = 0;
	fp->handle = ref->desc;
	fp->cookie = 0;
	binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
	trace_binder_transaction_node_to_ref(t, node, ref);
	binder_debug(BINDER_DEBUG_TRANSACTION,
		     "        node %d u%016llx -> ref %d desc %d\n",
		     node->debug_id, (u64)node->ptr,
		     ref->debug_id, ref->desc);
	return 0;
}

在这里通过binder_get_node获取client端binder_proc->nodes红黑树里对应的的node,如果没有则通过binder_new_node创建,binder_proc是我们的进程的话binder_node就相当于进程里的线程,比如SystemServer有VibratorService、WMS、AMS等。

有了Client端node之后就会通过binder_get_ref_for_node从ServiceManager的binder_proc->refs_by_node红黑树拿ref,在该方法里查询不到就创建新的ref,并把该ref插入到refs_by_desc和refs_by_node红黑树中,该过程会生成新的handle,该值是从0递增的,这个handle就是存放于ServiceManager的句柄了!这样我们client端的node就被插入到service端的binder_proc红黑树中了。binder_get_ref_for_node方法我们后面会进去看下。

再回到binder_transaction方法,list_add_tail会把binder_transaction数据加入到ServiceManager的binder_proc->todo队列,注意这里的 t->work.type = BINDER_WORK_TRANSACTION。那么什么时候从这个队列里取数据呢?我们回到文章第一步的WindowManager管家,在binder_loop方法里同样的通过ioctl调用binder驱动的binder_ioctl方法:

static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	...
	switch (cmd) {
	case BINDER_WRITE_READ: {
		...
		if (bwr.write_size > 0) {
			ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
			...
		}
		if (bwr.read_size > 0) { // mIn非空
			ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
			...
                        
		}
		...
		break;
	}
	...
}

binder_thread_write执行完之后会来到binder_thread_read方法:

static int binder_thread_read(struct binder_proc *proc,
			      struct binder_thread *thread,
			      binder_uintptr_t binder_buffer, size_t size,
			      binder_size_t *consumed, int non_block)
{
	void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
	void __user *ptr = buffer + *consumed;
	void __user *end = buffer + size;
	int ret = 0;
	int wait_for_proc_work;
	if (*consumed == 0) {
		if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
			return -EFAULT;
		ptr += sizeof(uint32_t);
	}
retry:
	...
	while (1) {
		uint32_t cmd;
		struct binder_transaction_data tr;
		struct binder_work *w;
		struct binder_transaction *t = NULL;
		if (!list_empty(&thread->todo))
			w = list_first_entry(&thread->todo, struct binder_work, entry);
		else if (!list_empty(&proc->todo) && wait_for_proc_work)
                    // 从todo列表中取值
			w = list_first_entry(&proc->todo, struct binder_work, entry);
		else {
			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
				goto retry;
			break;
		}
		if (end - ptr < sizeof(tr) + 4)
			break;
		switch (w->type) {
		case BINDER_WORK_TRANSACTION: {
                    // 拿到binder_transaction
			t = container_of(w, struct binder_transaction, work);
		} break;
		...
	}
done:
	*consumed = ptr - buffer;
	if (proc->requested_threads + proc->ready_threads == 0 &&
	    proc->requested_threads_started < proc->max_threads &&
	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
	     /*spawn a new thread if we leave this out */) {
		proc->requested_threads++;
		binder_debug(BINDER_DEBUG_THREADS,
			     "%d:%d BR_SPAWN_LOOPER\n",
			     proc->pid, thread->pid);
		if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
			return -EFAULT;
		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
	}
	return 0;
}

通过list_first_entry从binder_proc的todo列表中取值,调用container_of拿到了我们client在binder_transaction方法里创建的binder_transaction,最终会写入到IPCThreadState的mIn中,拿到数据后我们看ServiceManager的binder_loop是怎么处理的:

void binder_loop(struct binder_state *bs, binder_handler func)
{
	...
	for (;;) {
		...
		res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
		...
		// 处理从binder驱动里拿到的数据
		res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
		...
	}
}
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;
    uintptr_t end = ptr + (uintptr_t) size;

    while (ptr < end) {
        uint32_t cmd = *(uint32_t *) ptr;
        ptr += sizeof(uint32_t);
#if TRACE
        fprintf(stderr,"%s:\n", cmd_name(cmd));
#endif
        switch(cmd) {
        ...
        case BR_TRANSACTION: {
            struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;
            if ((end - ptr) < sizeof(*txn)) {
                ALOGE("parse: txn too small!\n");
                return -1;
            }
            binder_dump_txn(txn);
            if (func) {
                unsigned rdata[256/4];
                struct binder_io msg;
                struct binder_io reply;
                int res;

                bio_init(&reply, rdata, sizeof(rdata), 4);
                bio_init_from_txn(&msg, txn);
                // func是svcmgr_handler
                res = func(bs, txn, &msg, &reply);
                if (txn->flags & TF_ONE_WAY) {
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else {
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ptr += sizeof(*txn);
            break;
        }
        ...
        }
    }

    return r;
}

这个func是在servicemanager的main方法在调用binder_loop传进来的

int main(int argc, char** argv)
    {

        // step1、打开binder驱动文件 bs是binder_state
        bs = binder_open(driver, 128*1024);
        ...

        // step2、注册成为binder驱动的管家
        if (binder_become_context_manager(bs)) {
            ALOGE("cannot become context manager (%s)\n", strerror(errno));
            return -1;
        }
        ...
        
        // step3、进入循环,处理client端发来的请求(ServiceManager作为服务端)
        binder_loop(bs, svcmgr_handler);
        
        return 0;
    }

我们进入svcmgr_handler方法:

int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)
{
    struct svcinfo *si;
    uint16_t *s;
    size_t len;
    uint32_t handle;
    uint32_t strict_policy;
    int allow_isolated;
    ...
    switch(txn->code) {
    ...
    case SVC_MGR_ADD_SERVICE:
        // 拿到binder驱动传进来的ServiceName
        s = bio_get_string16(msg, &len);
        if (s == NULL) {
            return -1;
        }
        // 拿到binder驱动传进来的handle
        handle = bio_get_ref(msg);
        allow_isolated = bio_get_uint32(msg) ? 1 : 0;
        // 调用do_add_service
        if (do_add_service(bs, s, len, handle, txn->sender_euid,
            allow_isolated, txn->sender_pid))
            return -1;
        break;
        ...
        
    bio_put_uint32(reply, 0);
    return 0;
}

拿到从binder驱动传进来的serviceName和handle,进入do_add_service方法:

int do_add_service(struct binder_state *bs,
                   const uint16_t *s, size_t len,
                   uint32_t handle, uid_t uid, int allow_isolated,
                   pid_t spid)
{
    struct svcinfo *si;

    //ALOGI("add_service('%s',%x,%s) uid=%d\n", str8(s, len), handle,
    //        allow_isolated ? "allow_isolated" : "!allow_isolated", uid);

    if (!handle || (len == 0) || (len > 127))
        return -1;

    if (!svc_can_register(s, len, spid, uid)) {
        ALOGE("add_service('%s',%x) uid=%d - PERMISSION DENIED\n",
             str8(s, len), handle, uid);
        return -1;
    }

    si = find_svc(s, len);
    if (si) {
        if (si->handle) {
            ALOGE("add_service('%s',%x) uid=%d - ALREADY REGISTERED, OVERRIDE\n",
                 str8(s, len), handle, uid);
            svcinfo_death(bs, si);
        }
        si->handle = handle;
    } else {
        si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
        if (!si) {
            ALOGE("add_service('%s',%x) uid=%d - OUT OF MEMORY\n",
                 str8(s, len), handle, uid);
            return -1;
        }
        si->handle = handle;
        si->len = len;
        memcpy(si->name, s, (len + 1) * sizeof(uint16_t));
        si->name[len] = '\0';
        si->death.func = (void*) svcinfo_death;
        si->death.ptr = si;
        si->allow_isolated = allow_isolated;
        si->next = svclist;
        svclist = si;
    }

    binder_acquire(bs, handle);
    binder_link_to_death(bs, handle, &si->death);
    return 0;
}

存入svclist中。之后的回复我们就不看了。

三、获取VibratorService

开局一张图(图片来自网络):

binder - 副本.png 我们最终获取到的handler跟ServiceManager存的是一样的吗?答案是不一样!但可能碰巧值是一样的。 有个addService就会有getService:

// name = vibrator
public static IBinder getService(String name) {
    try {
        IBinder service = sCache.get(name);
        if (service != null) {
            return service;
        } else {
            return Binder.allowBlocking(getIServiceManager().getService(name));
        }
    } catch (RemoteException e) {
        Log.e(TAG, "error in getService", e);
    }
    return null;
}

又看到了我们熟悉的getIServiceManager方法,我们直接进入ServiceManagerProxy的getService:

public IBinder getService(String name) throws RemoteException {
    Parcel data = Parcel.obtain();
    Parcel reply = Parcel.obtain();
    data.writeInterfaceToken(IServiceManager.descriptor);
    data.writeString(name);
    mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
    IBinder binder = reply.readStrongBinder();
    reply.recycle();
    data.recycle();
    return binder;
}

仍然是mRemote.transact,mRemote就是BpBinder,不过这次的cmd是GET_SERVICE_TRANSACTION,我们直接看ServiceManager是怎么处理的:

int svcmgr_handler(struct binder_state *bs,
                       struct binder_transaction_data *txn,
                       struct binder_io *msg,
                       struct binder_io *reply)
    {
        ...
        switch(txn->code) {
        case SVC_MGR_GET_SERVICE:
        case SVC_MGR_CHECK_SERVICE:
            s = bio_get_string16(msg, &len);
            if (s == NULL) {
                return -1;
            }
            handle = do_find_service(s, len, txn->sender_euid, txn->sender_pid);
            if (!handle)
                break;
            bio_put_ref(reply, handle);
            return 0;
        ...
        }

        bio_put_uint32(reply, 0);
        return 0;
    }


    uint32_t do_find_service(const uint16_t *s, size_t len, uid_t uid, pid_t spid)
    {
        struct svcinfo *si = find_svc(s, len);

        if (!si || !si->handle) {
            return 0;
        }

        if (!si->allow_isolated) {
            // If this service doesn't allow access from isolated processes,
            // then check the uid to see if it is isolated.
            uid_t appid = uid % AID_USER;
            if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) {
                return 0;
            }
        }

        if (!svc_can_find(s, len, spid, uid)) {
            return 0;
        }

        return si->handle;
    }

    struct svcinfo \*find\_svc(const uint16\_t \*s16, size\_t len)
    {
    struct svcinfo \*si;

        for (si = svclist; si; si = si->next) {
            if ((len == si->len) &&
                !memcmp(s16, si->name, len * sizeof(uint16_t))) {
                return si;
            }
        }
        return NULL;
    }

根据serviceName从svclist取出对应的handle,传给binder:

int main(int argc, char** argv)
{

    // step1、打开binder驱动文件 bs是binder_state
    bs = binder_open(driver, 128*1024);
    ...

    // step2、注册成为binder驱动的管家
    if (binder_become_context_manager(bs)) {
        ALOGE("cannot become context manager (%s)\n", strerror(errno));
        return -1;
    }
    ...
    
    // step3、进入循环,处理client端发来的请求(ServiceManager作为服务端)
    binder_loop(bs, svcmgr_handler);
    
    return 0;
}

void binder_loop(struct binder_state *bs, binder_handler func)
{
    ...
    for (;;) {
        ...
        // 处理从binder驱动里拿到的数据
        res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);
        ...
    }
}

int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
{
    ...
    while (ptr < end) {
        ...
        switch(cmd) {
        ...
        case BR_TRANSACTION: {
            ...
            if (func) {
                ...
                // func是svcmgr_handler
                res = func(bs, txn, &msg, &reply);
                if (txn->flags & TF_ONE_WAY) {
                    binder_free_buffer(bs, txn->data.ptr.buffer);
                } else { //非one_way
                    binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);
                }
            }
            ...
        }
        ...
        }
    }

    return r;
}

从svcmgr_handler方法出来之后进入binder_send_reply方法:

void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)
{
    struct {
        uint32_t cmd_free;
        binder_uintptr_t buffer;
        uint32_t cmd_reply;
        struct binder_transaction_data txn;
    } __attribute__((packed)) data;

    data.cmd_free = BC_FREE_BUFFER;
    data.buffer = buffer_to_free;
    // 指令是BC_REPLY
    data.cmd_reply = BC_REPLY;
    data.txn.target.ptr = 0;
    data.txn.cookie = 0;
    data.txn.code = 0;
    if (status) {
        data.txn.flags = TF_STATUS_CODE;
        data.txn.data_size = sizeof(int);
        data.txn.offsets_size = 0;
        data.txn.data.ptr.buffer = (uintptr_t)&status;
        data.txn.data.ptr.offsets = 0;
    } else {
        data.txn.flags = 0;
        data.txn.data_size = reply->data - reply->data0;
        data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
        data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
        data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
    }
    binder_write(bs, &data, sizeof(data));
}

会进入binder_write方法:

int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;
    int res;

    bwr.write_size = len;
    bwr.write_consumed = 0;
    bwr.write_buffer = (uintptr_t) data;
    bwr.read_size = 0;
    bwr.read_consumed = 0;
    bwr.read_buffer = 0;
    // 熟悉的ioctl
    res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
    if (res < 0) {
        fprintf(stderr,"binder_write: ioctl failed (%s)\n",
                strerror(errno));
    }
    return res;
}

看到了我们熟悉的ioctl,注意当前的指令是BC_REPLY,我们直接看BC_REPLY做了什么处理:

static int binder_thread_write(struct binder_proc *proc,
			struct binder_thread *thread,
			binder_uintptr_t binder_buffer, size_t size,
			binder_size_t *consumed)
{
	...
	while (ptr < end && thread->return_error == BR_OK) {
		...
		switch (cmd) {
		...
		case BC_TRANSACTION:
		case BC_REPLY: {
			struct binder_transaction_data tr;
			if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
				return -EFAULT;
			ptr += sizeof(tr);
			binder_transaction(proc, thread, &tr,
					   cmd == BC_REPLY, 0);
			break;
		}
		...
	}
	return 0;
}

又进入binder_transaction,不过这次是reply:

static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
{
	int ret;
	struct binder_transaction *t;
	...
	struct binder_proc *target_proc;
	struct binder_thread *target_thread = NULL;
	...
	if (reply) {
		in_reply_to = thread->transaction_stack;
		...
		binder_set_nice(in_reply_to->saved_priority);
		...
		thread->transaction_stack = in_reply_to->to_parent;
		target_thread = in_reply_to->from;
		...
		target_proc = target_thread->proc;
	} else {
		...
	}
	if (target_thread) {
		e->to_thread = target_thread->pid;
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;
	} else {
		...
	}
	...
	for (; offp < off_end; offp++) {
		...
		switch (hdr->type) {
		...
		case BINDER_TYPE_HANDLE:
		case BINDER_TYPE_WEAK_HANDLE: {
			struct flat_binder_object *fp;
			fp = to_flat_binder_object(hdr);
			ret = binder_translate_handle(fp, t, thread);
			if (ret < 0) {
				return_error = BR_FAILED_REPLY;
				goto err_translate_failed;
			}
		} break;
		...
		}
	}
	...
	t->work.type = BINDER_WORK_TRANSACTION;
	// 当前的target_list是Client端的thread->todo
	list_add_tail(&t->work.entry, target_list);
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	list_add_tail(&tcomplete->entry, &thread->todo);
	...
}

这里还是拿到了target_list,并把binder_transaction加入队列,不过这里的target_list是target_thread->todo,也就是我们Client端的todo队列。在之前的binder_translate_binder中已经把fp的type改成了BINDER_TYPE_HANDLE,我们进入binder_translate_handle:

static int binder_translate_handle(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{
	struct binder_ref *ref;
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
    
    // 从ServiceManager的binder_proc的refs_by_desc红黑树中通过handle拿到对应的ref
	ref = binder_get_ref(proc, fp->handle,
			     fp->hdr.type == BINDER_TYPE_HANDLE);
	if (!ref) {
		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
				  proc->pid, thread->pid, fp->handle);
		return -EINVAL;
	}
	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
		return -EPERM;
	if (ref->node->proc == target_proc) { //如果是一个进程
		if (fp->hdr.type == BINDER_TYPE_HANDLE)
			fp->hdr.type = BINDER_TYPE_BINDER;
		else
			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
		fp->binder = ref->node->ptr;
		fp->cookie = ref->node->cookie;
		binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
				0, NULL);
		trace_binder_transaction_ref_to_node(t, ref);
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> node %d u%016llx\n",
			     ref->debug_id, ref->desc, ref->node->debug_id,
			     (u64)ref->node->ptr);
	} else { // 如果不是同一进程
		struct binder_ref *new_ref;
            // 在target_proc创建新的binder_ref并保存到两颗红黑树中
		new_ref = binder_get_ref_for_node(target_proc, ref->node);
		if (!new_ref)
			return -EINVAL;
		fp->binder = 0;
		fp->handle = new_ref->desc;
		fp->cookie = 0;
		binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
			       NULL);
		trace_binder_transaction_ref_to_ref(t, ref, new_ref);
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
			     ref->debug_id, ref->desc, new_ref->debug_id,
			     new_ref->desc, ref->node->debug_id);
	}
	return 0;
}

这个方法把存储在ServiceManager的VibratorServiceb的inder_node给到我们当前进程的binder_proc中,这样我们Client端进程的binder_proc就有了VibratorServiceb的binder_proc!!!。我们看下binder_get_ref_for_node方法是怎么进行插入的:

static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
						  struct binder_node *node)
{
	struct rb_node *n;
	struct rb_node **p = &proc->refs_by_node.rb_node;
	struct rb_node *parent = NULL;
	struct binder_ref *ref, *new_ref;
	struct binder_context *context = proc->context;
	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_node);
		if (node < ref->node)
			p = &(*p)->rb_left;
		else if (node > ref->node)
			p = &(*p)->rb_right;
		else
			return ref;
	}
        // 如果没有则创建新的ref
	new_ref = kzalloc_preempt_disabled(sizeof(*ref));
	if (new_ref == NULL)
		return NULL;
	binder_stats_created(BINDER_STAT_REF);
	new_ref->debug_id = ++binder_last_id;
	new_ref->proc = proc;
	new_ref->node = node;
	rb_link_node(&new_ref->rb_node_node, parent, p);
    
        // 插入到binder_proc的refs_by_node红黑树中
	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
	new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
		ref = rb_entry(n, struct binder_ref, rb_node_desc);
		if (ref->desc > new_ref->desc)
			break;
		new_ref->desc = ref->desc + 1;
	}
	p = &proc->refs_by_desc.rb_node;
	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
		if (new_ref->desc < ref->desc)
			p = &(*p)->rb_left;
		else if (new_ref->desc > ref->desc)
			p = &(*p)->rb_right;
		else
			BUG();
	}
	rb_link_node(&new_ref->rb_node_desc, parent, p);
    
      // 插入到binder_proc的refs_by_desc红黑树中
	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
	if (node) {
		hlist_add_head(&new_ref->node_entry, &node->refs);
		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
			     "%d new ref %d desc %d for node %d\n",
			      proc->pid, new_ref->debug_id, new_ref->desc,
			      node->debug_id);
	} else {
		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
			     "%d new ref %d desc %d for dead node\n",
			      proc->pid, new_ref->debug_id, new_ref->desc);
	}
	return new_ref;
}

所有进程的binder_proc的handle=0都是ServiceManager,handle是从0递增的,需要某个Service的时候才会添加进去,所以VibratorService在不同进程的binder_proc中的的handle大概率是不一样的,也不需要一样。

参考:

gityuan.com/2014/01/03/…