Binder死亡通知记录

1,194 阅读12分钟

Binder死亡通知是用于Service死亡后通知Client,这篇算是一个研究记录。

linkToDeath、unlinkToDeath出自BinderProxy类,其逻辑主要交由native层实现

class BinderProxy{

	private long mObject; //long类型实际记录native的void*,可以相互转换
	private long mOrgue;
	private WeakReference mSelf;
        
        public native void linkToDeath(DeathRecipient recipient, int flags) throws RemoteException;
        public native boolean unlinkToDeath(DeathRecipient recipient, int flags);
} 

在JNI对应binderproxy_offsets_t结构

static struct binderproxy_offsets_t
{
    // Class state.
    jclass mClass;
    jmethodID mConstructor;
    jmethodID mSendDeathNotice;

    // Object state.
    jfieldID mObject;
    jfieldID mSelf;
    jfieldID mOrgue;

} gBinderProxyOffsets;

看下gBinderProxyOffsets与一个BinderProxy对象的映射

static int int_register_android_os_BinderProxy(JNIEnv* env)
{
    jclass clazz = FindClassOrDie(env, "java/lang/Error");
    gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);

    clazz = FindClassOrDie(env, kBinderProxyPathName);
    gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "()V");
    gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz, "sendDeathNotice",
            "(Landroid/os/IBinder$DeathRecipient;)V");

    gBinderProxyOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
    gBinderProxyOffsets.mSelf = GetFieldIDOrDie(env, clazz, "mSelf",
                                                "Ljava/lang/ref/WeakReference;");
    gBinderProxyOffsets.mOrgue = GetFieldIDOrDie(env, clazz, "mOrgue", "J");

    clazz = FindClassOrDie(env, "java/lang/Class");
    gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "()Ljava/lang/String;");

    return RegisterMethodsOrDie(
        env, kBinderProxyPathName,
        gBinderProxyMethods, NELEM(gBinderProxyMethods));
}

一、 注册死亡通知

1.1 BinderProxy#linkToDeath
static void android_os_BinderProxy_linkToDeath(JNIEnv* env, jobject obj,
        jobject recipient, jint flags) // throws RemoteException
{
    if (recipient == NULL) {
        jniThrowNullPointerException(env, NULL);
        return;
    }

    IBinder* target = (IBinder*) // target类型为BpBinder* 
        env->GetLongField(obj, gBinderProxyOffsets.mObject);
    if (target == NULL) {
        ALOGW("Binder has been finalized when calling linkToDeath() with recip=%p)\n", recipient);
        assert(false);
    }

    if (!target->localBinder()) {// BpBinder进入if块
        DeathRecipientList* list = (DeathRecipientList*)
                env->GetLongField(obj, gBinderProxyOffsets.mOrgue);
                
        sp<JavaDeathRecipient> jdr = new JavaDeathRecipient(env, recipient, list);
        
        status_t err = target->linkToDeath(jdr, NULL, flags);//调用BpBinder的linkToDeath
        
        if (err != NO_ERROR) {
            jdr->clearReference();
            signalExceptionForError(env, obj, err, true /*canThrowRemoteException*/);
        }
    }
}

简单看一下linkToDeath中的 gBinderProxyOffsets.mObject被赋值的过程

static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
    sp<IBinder> b = ProcessState::self()->getContextObject(NULL); // 返回指向BpBinder的指针
    return javaObjectForIBinder(env, b);
}
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
{
    return getStrongProxyForHandle(0);
}
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
    sp<IBinder> result;

    AutoMutex _l(mLock);

    handle_entry* e = lookupHandleLocked(handle);

    if (e != NULL) {
       
        IBinder* b = e->binder;
        if (b == NULL || !e->refs->attemptIncWeak(this)) {
            if (handle == 0) {
                // Special case for context manager... handle == 0是service manager的引用
                ...
            }

            b = new BpBinder(handle); // 新建Bpbinder
            e->binder = b;
            if (b) e->refs = b->getWeakRefs();
            result = b;
        } else {
            ...
        }
    }

    return result;
}

ProcessState::getContextObject(NULL)返回了BpBinder*

jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val) //val类型为 const Binder* &
{
    ...
    object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
    if (object != NULL) {
        ...
        //这里做了赋值
        env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
        ...
    }
1.2 BpBinder#linkToDeath
status_t BpBinder::linkToDeath(
    const sp<DeathRecipient>& recipient, void* cookie, uint32_t flags)
{
    Obituary ob;
    ob.recipient = recipient; //recipient保存在ob中
    ob.cookie = cookie;       //cookie为NULL
    ob.flags = flags;

    {
        AutoMutex _l(mLock);

        if (!mObitsSent) {      // mObitsSent初始值是0
            if (!mObituaries) { // 若mObituaries为空,创建新的集合
                mObituaries = new Vector<Obituary>;
                if (!mObituaries) {
                    return NO_MEMORY;
                }
                
                getWeakRefs()->incWeak(this);
                IPCThreadState* self = IPCThreadState::self();
                self->requestDeathNotification(mHandle, this); //  在parcel中添加要发送的数据
                self->flushCommands();                         // 数据包发送到binder driver
            }
            ssize_t res = mObituaries->add(ob); // ob对象添加到mObituaries集合中
            return res >= (ssize_t)NO_ERROR ? (status_t)NO_ERROR : res;
        }
    }

    return DEAD_OBJECT;
}
1.3 IPCThreadState#requestDeathNotification
status_t IPCThreadState::requestDeathNotification(int32_t handle, BpBinder* proxy)
{
    mOut.writeInt32(BC_REQUEST_DEATH_NOTIFICATION);
    mOut.writeInt32((int32_t)handle);
    mOut.writePointer((uintptr_t)proxy);
    return NO_ERROR;
}

可以看到requestDeathNotification在打包注册死亡通知所需要的数据。其中包括,

  • BC_REQUEST_DEATH_NOTIFICATION指定binder协议要进行的操作
  • handle为Service在Client端的引用
  • proxy为要接收通知的Client指针

以上都写入parcel(mOut)包中

1.4 IPCThreadState#flushCommands
void IPCThreadState::flushCommands()
{
    if (mProcess->mDriverFD <= 0)
        return;
    talkWithDriver(false);
}
1.5 IPCThreadState#talkWithDriver

talkWithDriver函数表示Client发送数据后,是否需要需要等待接收Server的回复(reply)。 binder_write_read结构兼具读写,主要搭配ioctl系统调用使用。也就是一次ioctl就可以完成读、写两个动作,所以能看到下面对读、写相关的设置。

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    if (mProcess->mDriverFD <= 0) {
        return -EBADF;
    }
 
    binder_write_read bwr; 
    
    // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    
    //doReceive为false, 所以 outAvail = mOut.dataSize()
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0; 
    
    bwr.write_size = outAvail; // 需要写的数据大小
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {  
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else { 
        bwr.read_size = 0;     // doReceive为false,进入else
        bwr.read_buffer = 0;
    }
    
    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    do {
        
#if defined(HAVE_ANDROID_OS)
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0) //调用ioctl系统调用
            err = NO_ERROR;
        else
            err = -errno;
#else
        err = INVALID_OPERATION;
#endif
        if (mProcess->mDriverFD <= 0) {
            err = -EBADF;
        }
        IF_LOG_COMMANDS() {
            alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
        }
    } while (err == -EINTR);

    if (err >= NO_ERROR) {   // bwr.write_consumed表示已写的数据大小
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else
                mOut.setDataSize(0);
        }
        if (bwr.read_consumed > 0) {  // bwr.read_consumed表示已读的数据大小
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
        return NO_ERROR;
    }
    
    return err;
}
1.6 ioctl

ioctl为struct file_operations中的一个函数指针,在binder初始化时,ioctl初始化为指向binder_ioctl函数

static const struct file_operations binder_fops = { 
    .owner = THIS_MODULE,
    .poll = binder_poll,
    .unlocked_ioctl = binder_ioctl,
    .compat_ioctl = binder_ioctl,
    .mmap = binder_mmap,
    .open = binder_open,
    .flush = binder_flush,
    .release = binder_release, 
};

看下binder_ioctl截取的一部分,

1.7 binder_ioctl
switch (cmd) {
   case BINDER_WRITE_READ:      //进行binder的读写操作
       ret = binder_ioctl_write_read(filp, cmd, arg, thread);
       if (ret)
           goto err;
       break;
   case BINDER_SET_MAX_THREADS: //设置binder最大支持的线程数
       ...
   case BINDER_SET_CONTEXT_MGR: //设置servicemanager
       ...
   case BINDER_THREAD_EXIT:     //释放binder线程
       ...
   case BINDER_VERSION: {       //获取binder协议版本号
       ...
   }

其中binder ioctl命令主要有几种,

iotcl命令用途
BINDER_WRITE_READ用于进程间收发数据。binder通信常用的一个命令
BINDER_SET_MAX_THREADS用于设置Service binder线程数的最大数。主要考虑到Service可能并发处理多个Client的请求,所以Service中有binder线程池。
BINDER_SET_CONTEXT_MGR用来设置service manager(特殊的Service, handle为0)
BINDER_THREAD_EXITService退出时,需要对线程进行释放删除操作
BINDER_VERSION Binder提供协议版本号

为Client注册死亡通知同样属于Client和Server之间的一次普通通信,命令用BINDER_WRITE_READ。所以,重点看下binder_ioctl_write_read函数

1.8 binder_ioctl_write_read
static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
{
    int ret = 0;
    struct binder_proc *proc = filp->private_data;
    unsigned int size = _IOC_SIZE(cmd);
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;

   
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { //把数据ubuf拷贝到bwr (用户空间到内核空间的拷贝)
        ret = -EFAULT;
        goto out;
    }

   
    if (bwr.write_size > 0) {// 2.3中 bwr.write_size == mOut.dataSize(),进入if block
        //当写缓存中有数据,则执行binder写操作
        ret = binder_thread_write(proc, thread,
                      bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
        trace_binder_write_done(ret);
        if (ret < 0) { //当写失败,再将bwr数据写回用户空间,并返回
            bwr.read_consumed = 0;
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }
    
    //note 先写后读
    //bwr.read_size = 0;
    //bwr.read_buffer = 0;
    if (bwr.read_size > 0) {// 2.3中bwr.read_size设置为0,所以不会进入该if block
        //当读缓存中有数据,则执行binder读操作
        ret = binder_thread_read(proc, thread,
                      bwr.read_buffer, bwr.read_size, &bwr.read_consumed,
                      filp->f_flags & O_NONBLOCK);
        trace_binder_read_done(ret);
        if (!list_empty(&proc->todo))
            wake_up_interruptible(&proc->wait); //唤醒等待状态的线程
        if (ret < 0) { //当读失败,再将bwr数据写回用户空间,并返回
            if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
                ret = -EFAULT;
            goto out;
        }
    }

    if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { //将内核数据bwr拷贝到用户空间ubuf
        ret = -EFAULT;
        goto out;
    }
out:
    return ret;
}

1.5节中提到bwr.write_size赋值为mOut.dataSize(),bwr.read_size设置为0。所以,调用binder_thread_write 函数

1.9 binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
      struct binder_thread *thread,
      binder_uintptr_t binder_buffer, size_t size,
      binder_size_t *consumed)
{
  uint32_t cmd;
  //proc, thread都是指当前发起端进程的信息
  struct binder_context *context = proc->context;
  void __user *buffer = (void __user *)(uintptr_t)binder_buffer; // binder_buffer中保存着传入的数据
  void __user *ptr = buffer + *consumed; 
  void __user *end = buffer + size;
  while (ptr < end && thread->return_error == BR_OK) {
    get_user(cmd, (uint32_t __user *)ptr); //获取BC_REQUEST_DEATH_NOTIFICATION cmd
    ptr += sizeof(uint32_t);
    switch (cmd) {
        case BC_REQUEST_DEATH_NOTIFICATION:{ //注册死亡通知
            uint32_t target;
            void __user *cookie;
            struct binder_ref *ref;
            struct binder_ref_death *death;

            get_user(target, (uint32_t __user *)ptr); //获取target(2.1小节传入的handler)
            ptr += sizeof(uint32_t);
            get_user(cookie, (void __user * __user *)ptr); //获取BpBinder指针(2.1小节传入的BinderProxy*)
            ptr += sizeof(void *);

            ref = binder_get_ref(proc, target); //拿到目标服务的binder_ref

            if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
                //只允许注册一个死亡通知
                if (ref->death) {
                    break; 
                }
                death = kzalloc(sizeof(*death), GFP_KERNEL); //分配一个binder_ref_death对象

                INIT_LIST_HEAD(&death->work.entry);
                death->cookie = cookie; //BpBinder指针保存到death中
                ref->death = death;     //death再保存到binder_ref中
                
                
                //当目标binder服务所在进程已死,则直接发送死亡通知。
                if (ref->node->proc == NULL) { 
                    ref->death->work.type = BINDER_WORK_DEAD_BINDER;
                    //当前线程为binder线程,则直接添加到当前线程的todo队列. 
                    if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
                        list_add_tail(&ref->death->work.entry, &thread->todo);
                    } else {
                        list_add_tail(&ref->death->work.entry, &proc->todo);
                        wake_up_interruptible(&proc->wait);
                    }
                }
            } else {
                ...
            }
        } break;
      case ...;
    }
    *consumed = ptr - buffer;
  }
}
1.10 小结

死亡消息注册只发生在Client身上,用于在Server(无论正常与否)停掉,通知Client做相应的逻辑操作。所以整个linkToDeath的过程,其实就是Java侧的Decipient对象通过binder机制被记录的过程。所以linkToDeath整个流程,我们重点关注Decipient对象的传递及存储即可。这里对上面流程做个简单的小结。

  1. 整个流程涉及Java、JNI、native;
  2. BinderProxy#linkToDeath本身就是一个native方法。方法参数包括Java侧的Decipient对象JavaDecipient;
  3. 进入JNI。基于JavaDecipient构建JavaDeathRecipient对象并返回指向自身指针jdr。JNI层的BinderProxy保存有BpBinder指针,调用BpBinder#linkToDeath(jdr作为参数传入);
  4. 进入native。BpBinder#linkToDeath中创建一个Obituary对象ob,传入的jdr保存到ob的recipient字段中。随后ob存入BpBinder对象的mBituaries集合中。这里建立了BpBinder与Decipient的联系,后续传递BpBinder即可。随后,打包相应的数据(操作命令BC_REQUEST_DEATH_NOTIFICATION+handle+BpBinder指针),发送到binder driver中;
  5. binder driver中BpBinder指针保存到binder_ref_death中, binder_ref_death再存入binder_ref(基于Service的引用handle获取到binder_ref)。

以上Java侧Decipient对象经过层层传递,最后保存在BpBinder中,BpBinder经进程用户空间传递到内核空间,再经过binder_ref_death,又保存在binder_ref中。最终完成死亡通知的注册。这个也好理解,binder_ref代表了Service在Client中的引用,Service死亡通知要通知到Client,自然和binder_ref有关。

二、 死亡通知Client

从注册死亡通知可以了解到,通知对象注册到内核,所以通知也是从内核触发。

binder driver是一种虚拟字符设备,在进程间通信过程中需要调binder_open打开。当Service所在进程关闭或者主动关闭driver时调用驱动release函数,此处相应的binder_release().

2.1 binder_release
static const struct file_operations binder_fops = { 
    .owner = THIS_MODULE, 
    .poll = binder_poll, 
    .unlocked_ioctl = binder_ioctl, 
    .compat_ioctl = binder_ioctl, 
    .mmap = binder_mmap, 
    .open = binder_open, 
    .flush = binder_flush, 
    .release = binder_release, 
 };

经binder_deferred_func -> binder_deferred_release -> binder_node_release这中间的代码不去细究,这里只记录下调用链即可。

2.2 binder_node_release
static int binder_node_release(struct binder_node *node, int refs)
{
    struct binder_ref *ref;
    int death = 0;

    list_del_init(&node->work.entry);

    binder_release_work(&node->async_todo);

    if (hlist_empty(&node->refs)) {// Service没有被引用,直接释放Service的node
        kfree(node); 
        binder_stats_deleted(BINDER_STAT_NODE);
        return refs;
    }

    node->proc = NULL;
    node->local_strong_refs = 0;
    node->local_weak_refs = 0;
    hlist_add_head(&node->dead_node, &binder_dead_nodes);

    hlist_for_each_entry(ref, &node->refs, node_entry) {// 开始遍历node->refs
        refs++;
        if (!ref->death)// Client没有注册死亡通知,跳过即可
            continue;
        death++;

        if (list_empty(&ref->death->work.entry)) {
            //添加BINDER_WORK_DEAD_BINDER事务到todo队列
            ref->death->work.type = BINDER_WORK_DEAD_BINDER;
            list_add_tail(&ref->death->work.entry, &ref->proc->todo);
            wake_up_interruptible(&ref->proc->wait);
        }
    }
    return refs;
}

所有Service经driver时,都会创建binder_node结构,Service被其他进程引用时则会创建binder_ref结构。所以,真实场景中node与ref为一对多的关系,当然node也会做记录这种关系。binder_node_release遍历node中refs,依次看ref是否注册死亡通知。如果注册过,添加一个work到ref所在进程的todo队列中,并唤醒该进程。

2.3 binder_thread_read
static int binder_thread_read(struct binder_proc *proc,
                              struct binder_thread *thread,
                              binder_uintptr_t binder_buffer, size_t size,
                              binder_size_t *consumed, int non_block){
    ...
    //唤醒等待中的binder线程
    wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
    ...
    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        //从todo队列拿出前面放入的binder_work, 此时type为BINDER_WORK_DEAD_BINDER
        if (!list_empty(&thread->todo)) {
            w = list_first_entry(&thread->todo, struct binder_work, entry);
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
            w = list_first_entry(&proc->todo, struct binder_work, entry);
        }

        switch (w->type) {
            case BINDER_WORK_DEAD_BINDER: {
                struct binder_ref_death *death;
                uint32_t cmd;

                death = container_of(w, struct binder_ref_death, work);
                if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
                    ...
                else
                    cmd = BR_DEAD_BINDER; //进入此分支
                put_user(cmd, (uint32_t __user *)ptr);//BR_DEAD_BINDER cmd拷贝到用户空间
                ptr += sizeof(uint32_t);

                //此处的cookie是前面传递的BpBinder指针
                put_user(death->cookie, (binder_uintptr_t __user *)ptr);
                ptr += sizeof(binder_uintptr_t);

                if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
                    ...
                } else
                    //把该work加入到delivered_death队列
                    list_move(&w->entry, &proc->delivered_death);
                if (cmd == BR_DEAD_BINDER)
                    goto done;
            } break;
       }
   }
}

Bp进程被唤醒后,开始处理todo队列,将BR_DEAD_BINDER和BpBinder指针拷贝到Bp进程用户空间。

2.4 IPCThreadState::getAndExecuteCommand
status_t IPCThreadState::getAndExecuteCommand()
{
    status_t result;
    int32_t cmd;

    result = talkWithDriver();
    if (result >= NO_ERROR) {
        size_t IN = mIn.dataAvail();
        if (IN < sizeof(int32_t)) return result;
        cmd = mIn.readInt32();
        ...
        result = executeCommand(cmd);//获取到BR_DEAD_BINDER
        ...
status_t IPCThreadState::executeCommand(int32_t cmd)
{
    BBinder* obj;
    RefBase::weakref_type* refs;
    status_t result = NO_ERROR;
    
    switch ((uint32_t)cmd) {
        case BR_DEAD_BINDER:
        {
            BpBinder *proxy = (BpBinder*)mIn.readPointer();
            proxy->sendObituary();
            ...
        } break;
     ...

executeCommand找到BR_DEAD_BINDER分支,获取到指向BpBinder的指针,然后调用BpBinder的sendObituary,

2.5 BpBinder::sendObituary
void BpBinder::sendObituary()
{
    mAlive = 0;
    if (mObitsSent) return;

    mLock.lock();
    //还记得注册时,obituary加到了mObituaries的逻辑吗
    Vector<Obituary>* obits = mObituaries; 
    if(obits != NULL) {
        IPCThreadState* self = IPCThreadState::self();
        //已经在处理死亡通知了,所以就可以把这条死亡通知清理掉
        self->clearDeathNotification(mHandle, this);
        self->flushCommands();
        mObituaries = NULL;
    }
    mObitsSent = 1;
    mLock.unlock();

    if (obits != NULL) {//遍历obituary集合,挨个通知
        const size_t N = obits->size();
        for (size_t i=0; i<N; i++) {
            reportOneDeath(obits->itemAt(i));
        }

        delete obits;
    }
}
2.6 BpBinder::reportOneDeath
void BpBinder::reportOneDeath(const Obituary& obit)
{
    sp<DeathRecipient> recipient = obit.recipient.promote(); //找到obituary中的recipient
    
    if (recipient == NULL) return;

    recipient->binderDied(this);//回调成功
}

如果是bindService中死亡通知,会找到LoadedApk#DeathMonitor,看下DeathMonitor的具体实现。

private final class DeathMonitor implements IBinder.DeathRecipient
{
    DeathMonitor(ComponentName name, IBinder service) {
        mName = name;
        mService = service;
    }

    public void binderDied() {
        death(mName, mService);
    }

    final ComponentName mName;
    final IBinder mService;
}
2.7 小结

(Service进程正常/意外)死亡通知Client的过程:

  1. binder driver关闭的过程会释放Service相应的binder_node及node关联的binder_ref集合。当然binder_ref也一定持有相关的结构,这其中就可能包括死亡通知binder_ref_death结构。如果有这个结构,将death结构、BINDER_WORK_DEAD_BINDER添加到ref进程的todo队列,随后唤醒该进程;
  2. ref进程从todo队列中取出待处理的binder_work,继而拿到binder_ref_death结构,也就拿到了BpBinder指针,将BpBinder指针、BR_DEAD_BINDER(cmd进行了变换)从ref进程内核进程拷贝到ref进程用户空间;
  3. 调用BpBinder的sendObituary方法。obituary对象都保存在BpBinder的mObituaries集合中,遍历集合,调用reportOneDeath。同时要记得将Bp进程的死亡通知都做清除;
  4. reportOneDeath函数从obituray获取到DeathRecipient对象,再调recipient的binderDied函数。再经JNI通知到Java层。

以上完成通知。

三、清除死亡通知

3.1 BinderProxy#unlinkToDeath
public native boolean unlinkToDeath(DeathRecipient recipient, int flags);
static jboolean android_os_BinderProxy_unlinkToDeath(JNIEnv* env, jobject obj,
                                                 jobject recipient, jint flags)
{
    jboolean res = JNI_FALSE;
    if (recipient == NULL) {
        jniThrowNullPointerException(env, NULL);
        return res;
    }

    IBinder* target = (IBinder*)
        env->GetLongField(obj, gBinderProxyOffsets.mObject); // 同样获取指向BpBinder对象的指针
    ...

    if (!target->localBinder()) {// 进入if块
        status_t err = NAME_NOT_FOUND;

        // If we find the matching recipient, proceed to unlink using that
        DeathRecipientList* list = (DeathRecipientList*)
                env->GetLongField(obj, gBinderProxyOffsets.mOrgue);// 获取指向DeathRecipientList指针
        sp<JavaDeathRecipient> origJDR = list->find(recipient);  //linkToDeath的时候将recipient添加到了list中。所以这里先从list找到recipient
        LOGDEATH("   unlink found list %p and JDR %p", list, origJDR.get());
        if (origJDR != NULL) {
            wp<IBinder::DeathRecipient> dr;
            
            err = target->unlinkToDeath(origJDR, NULL, flags, &dr);// 调用BpBinder#unlinkToDeath
            if (err == NO_ERROR && dr != NULL) {
                sp<IBinder::DeathRecipient> sdr = dr.promote();
                JavaDeathRecipient* jdr = static_cast<JavaDeathRecipient*>(sdr.get());
                if (jdr != NULL) {
                    jdr->clearReference();
                }
            }
        }

        if (err == NO_ERROR || err == DEAD_OBJECT) {
            res = JNI_TRUE;
        } else {
            jniThrowException(env, "java/util/NoSuchElementException",
                              "Death link does not exist");
        }
    }

    return res;
}
3.2 BpBinder#unlinkToDeath
status_t BpBinder::unlinkToDeath(
    const wp<DeathRecipient>& recipient, void* cookie, uint32_t flags,
    wp<DeathRecipient>* outRecipient)
{
    AutoMutex _l(mLock);

    if (mObitsSent) {
        return DEAD_OBJECT;
    }

    const size_t N = mObituaries ? mObituaries->size() : 0;
    for (size_t i=0; i<N; i++) {
        const Obituary& obit = mObituaries->itemAt(i);
        if ((obit.recipient == recipient
                    || (recipient == NULL && obit.cookie == cookie))
                && obit.flags == flags) {
            if (outRecipient != NULL) {
                *outRecipient = mObituaries->itemAt(i).recipient;
            }
            mObituaries->removeAt(i); //首先从集合中移除死亡通知对象
            if (mObituaries->size() == 0) {
                ALOGV("Clearing death notification: %p handle %d\n", this, mHandle);
                IPCThreadState* self = IPCThreadState::self();
                self->clearDeathNotification(mHandle, this); //打包注销死亡通知数据
                self->flushCommands();                       //向binder driver发送数据包
                delete mObituaries;
                mObituaries = NULL;
            }
            return NO_ERROR;
        }
    }

    return NAME_NOT_FOUND;
}

注册死亡通知的过程可知,死亡通知Decipient一方面存到了BpBinder的mObituaries集合中,一方面传递到内核空间中。BpBinder首先将死亡通知从mbituaries集合中移除,然后发送清除死亡通知到binder drvier中。 看下clearDeathNotification函数

3.3 IPCThreadState::clearDeathNotification
status_t IPCThreadState::clearDeathNotification(int32_t handle, BpBinder* proxy)
{
    mOut.writeInt32(BC_CLEAR_DEATH_NOTIFICATION); // binder协议命令
    mOut.writeInt32((int32_t)handle);             // Service引用
    mOut.writePointer((uintptr_t)proxy);          // BpBinder指针
    return NO_ERROR;
}

IPCThreadState::flushCommand见1.4, 经IPCThreadState#talkWithDriver -> binder_ioctl -> binder_ioctl_write_read -> binder_thread_write,过程前面已做阐述。直接看下binder_thread_write

3.4 binder_thread_write
static int binder_thread_write(struct binder_proc *proc,
      struct binder_thread *thread,
      binder_uintptr_t binder_buffer, size_t size,
      binder_size_t *consumed)
{
  uint32_t cmd;
  //proc, thread都是指当前发起端进程的信息
  struct binder_context *context = proc->context;
  void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
  void __user *ptr = buffer + *consumed; 
  void __user *end = buffer + size;
  while (ptr < end && thread->return_error == BR_OK) {
    get_user(cmd, (uint32_t __user *)ptr); //获取BC_CLEAR_DEATH_NOTIFICATION
    ptr += sizeof(uint32_t);
    switch (cmd) {
        case BC_CLEAR_DEATH_NOTIFICATION: { //清除死亡通知
            uint32_t target;
            void __user *cookie;
            struct binder_ref *ref;
            struct binder_ref_death *death;

            get_user(target, (uint32_t __user *)ptr); //获取target
            ptr += sizeof(uint32_t);
            get_user(cookie, (void __user * __user *)ptr); //获取BpBinder指针
            ptr += sizeof(void *);

            ref = binder_get_ref(proc, target); //获取目标服务的binder_ref

            if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
                ...
            } else {// cmd == BC_CLEAR_DEATH_NOTIFICATION
              if (ref->death == NULL) {
                break;
              }
              death = ref->death;           // 获取binder_ref中存储的binder_ref_death
              if (death->cookie != cookie) {//比较是当前的BpBinder
                break; 
              }
              ref->death = NULL;            //重置死亡通知为NULL
              
              if (list_empty(&death->work.entry)) {
                //添加BINDER_WORK_CLEAR_DEATH_NOTIFICATION事务
                death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
                if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
                  list_add_tail(&death->work.entry, &thread->todo);
                } else {
                  list_add_tail(&death->work.entry, &proc->todo);
                  wake_up_interruptible(&proc->wait);
                }
              } else {
                death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
              }
            }
        } break;
      case ...;
    }
  }
}

binder driver收到清除死亡通知的数据包后,找到相应的BpBinder,将binder_ref记录的binder_ref_death置为NULL。最后将binder_ref_death对象worktype设置为BINDER_WORK_CLEAR_DEATH_NOTIFICATION,添加到对应进程/线程的todo队列中进行后续处理。

3.5 binder_thread_read
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
    ...
    //唤醒等待中的binder线程
    wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
    
    ...

    while (1) {
        uint32_t cmd;
        struct binder_transaction_data tr;
        struct binder_work *w;
        struct binder_transaction *t = NULL;

        //从todo队列拿出前面放入的binder_work, 此时type为BINDER_WORK_CLEAR_DEATH_NOTIFICATION
        if (!list_empty(&thread->todo)) {
            w = list_first_entry(&thread->todo, struct binder_work,
                         entry);
        } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
            w = list_first_entry(&proc->todo, struct binder_work,
                         entry);
        }

        switch (w->type) {
          case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
            struct binder_ref_death *death;
            uint32_t cmd;

            death = container_of(w, struct binder_ref_death, work);
            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
              cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; //清除完成
            ...
            
            if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
              list_del(&w->entry); //将清除死亡通知的binder_work从队列中移除
              kfree(death);        //释放binder_ref_death
              binder_stats_deleted(BINDER_STAT_DEATH);
            } 
            ...
            
            if (cmd == BR_DEAD_BINDER)
              goto done;
          } break;
        }
    }
    ...
    return 0;
}

worktype为BINDER_WORK_CLEAR_DEATH_NOTIFICATION的binder_ref_death对象后续处理工作主要是将binder_work从todo队列中移除,同时释放binder_ref_death占用空间,重置指针。

3.6 小结

清除死亡通知和注册死亡通知的函数调用流程大体一致,只是操作刚好相反。对比注册死亡通知的流程,清除死亡通知也很好理解:

  1. 先从BpBinder对象的mBituaries集合中移除decipient对象;
  2. 随后,打包相应的数据(操作命令BC_CLEAR_DEATH_NOTIFICATION+handle+BpBinder指针),发送到binder driver中;
  3. binder driver将binder_ref的death字段置NULL,随后将binder_ref_death回收。

完成清除动作.