Binder系列---服务添加过程(2)

183 阅读4分钟
flatten_binder
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;
    if (binder != NULL) {
        IBinder *local = binder->localBinder(); //本地Binder不为空
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.type = BINDER_TYPE_HANDLE;
            obj.binder = 0;
            obj.handle = handle;
            obj.cookie = 0;
        } else { //进入该分支
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
            obj.cookie = reinterpret_cast<uintptr_t>(local);
        }
    } else {
        ...
    }
    return finish_flatten_binder(binder, obj, out);
}

将Binder对象扁平化,转换成flat_binder_object对象。

  • 对于Binder实体,则cookie记录Binder实体的指针;
  • 对于Binder代理,则用handle记录Binder代理的句柄;
finish_flatten_binder
inline static status_t finish_flatten_binder(
    const sp<IBinder>& , const flat_binder_object& flat, Parcel* out)
{
    return out->writeObject(flat, false);
}

Parcel.cpp中flat_binder_object

struct flat_binder_object{
    type: BINDER_TYPE_BINDER
    binder: service对象的弱引用
    cookie:service对象的强指针
}

BpBinder::transact

status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    if (mAlive) {
        // code=ADD_SERVICE_TRANSACTION
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        return status;
    }
    return DEAD_OBJECT;
}

Binder代理类调用transact()方法,真正工作还是交给IPCThreadState来进行transact工作。

IPCThreadState::self
IPCThreadState* IPCThreadState::self()
{
    if (gHaveTLS) {
restart:
        const pthread_key_t k = gTLS;
        IPCThreadState* st = (IPCThreadState*)pthread_getspecific(k);
        if (st) return st;
        return new IPCThreadState;  //初始IPCThreadState 
    }

    if (gShutdown) return NULL;

    pthread_mutex_lock(&gTLSMutex);
    if (!gHaveTLS) { //首次进入gHaveTLS为false
        if (pthread_key_create(&gTLS, threadDestructor) != 0) { //创建线程的TLS
            pthread_mutex_unlock(&gTLSMutex);
            return NULL;
        }
        gHaveTLS = true;
    }
    pthread_mutex_unlock(&gTLSMutex);
    goto restart;
}

TLS是指Thread local storage(线程本地储存空间),每个线程都拥有自己的TLS,并且是私有空间,线程之间不会共享。通过pthread_getspecific/pthread_setspecific函数可以获取/设置这些空间中的内容。从线程本地存储空间中获得保存在其中的IPCThreadState对象。

IPCThreadState初始化
IPCThreadState::IPCThreadState()
    : mProcess(ProcessState::self()),
      mMyThreadId(gettid()),
      mStrictModePolicy(0),
      mLastTransactionBinderFlags(0)
{
    pthread_setspecific(gTLS, this);
    clearCaller();
    mIn.setDataCapacity(256);
    mOut.setDataCapacity(256);
}

每个线程都有一个IPCThreadState,每个IPCThreadState中都有一个mIn、一个mOut。成员变量mProcess保存了ProcessState变量(每个进程只有一个)。

  • mIn 用来接收来自Binder设备的数据,默认大小为256字节;
  • mOut用来存储发往Binder设备的数据,默认大小为256字节。

IPC::transact

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    status_t err = data.errorCheck(); //数据错误检查
    if (err == NO_ERROR) { // 传输数据 
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
    if ((flags & TF_ONE_WAY) == 0) {
        if (reply) {
            //等待响应 
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            err = waitForResponse(&fakeReply);
        }
    } else {
        //oneway,则不需要等待reply的场景
        err = waitForResponse(NULL, NULL);
    }
    return err;
}

IPCThreadState进行transact事务处理分3部分:

  • errorCheck() //数据错误检查
  • writeTransactionData() // 传输数据
  • waitForResponse() //等待响应

IPC.writeTransactionData

tatus_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;
    tr.target.ptr = 0;
    tr.target.handle = handle; // handle = 0
    tr.code = code;            // code = ADD_SERVICE_TRANSACTION
    tr.flags = binderFlags;    // binderFlags = 0
    tr.cookie = 0;
    tr.sender_pid = 0;
    tr.sender_euid = 0;

    // data为记录Media服务信息的Parcel对象
    const status_t err = data.errorCheck();
    if (err == NO_ERROR) {
        tr.data_size = data.ipcDataSize();  // mDataSize
        tr.data.ptr.buffer = data.ipcData(); //mData
        tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t); //mObjectsSize
        tr.data.ptr.offsets = data.ipcObjects(); //mObjects
    } else if (statusBuffer) {
        ...
    } else {
        return (mLastError = err);
    }

    mOut.writeInt32(cmd);         //cmd = BC_TRANSACTION
    mOut.write(&tr, sizeof(tr));  //写入binder_transaction_data数据
    return NO_ERROR;
}

其中handle的值用来标识目的端,注册服务过程的目的端为service manager,此处handle=0所对应的是binder_context_mgr_node对象,正是service manager所对应的binder实体对象。binder_transaction_data结构体是binder驱动通信的数据结构,该过程最终是把Binder请求码BC_TRANSACTION和binder_transaction_data结构体写入到mOut

transact过程,先写完binder_transaction_data数据,其中Parcel data的重要成员变量:

  • mDataSize:保存再data_size,binder_transaction的数据大小;
  • mData: 保存在ptr.buffer, binder_transaction的数据的起始地址;
  • mObjectsSize:保存在ptr.offsets_size,记录着flat_binder_object结构体的个数;
  • mObjects: 保存在offsets, 记录着flat_binder_object结构体在数据偏移量;
cmd = BC_TRANSACTION
struct binder_transcation_data{
    target.handle = 0;
    data_size;
    data.ptr.buffer;
}
data_size 是data的大小
data.ptr.bufferr 是data的数据
{
    目标的名称:android.os.IServiceManager
    服务的名称: media.player
    service: flat_binder_object
    allowIsolated: 0
}

IPC.waitForResponse

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    int32_t cmd;
    int32_t err;
    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        cmd = mIn.readInt32();
        switch (cmd) {
            case BR_TRANSACTION_COMPLETE: ...
            case BR_REPLY: ...
                goto finish;

            default:
                err = executeCommand(cmd);  
                if (err != NO_ERROR) goto finish;
                break;
        }
    }
    return err;
}

在waitForResponse过程, 首先执行BR_TRANSACTION_COMPLETE;另外,目标进程收到事务后,处理BR_TRANSACTION事务。 然后发送给当前进程,再执行BR_REPLY命令。

IPC.talkWithDriver

status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;
    bwr.write_size = outAvail;
    bwr.write_buffer = (uintptr_t)mOut.data();
    if (doReceive && needRead) {
        //接收数据缓冲区信息的填充。如果以后收到数据,就直接填在mIn中了。
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }
    //当读缓冲和写缓冲都为空,则直接返回
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
    status_t err;
    do {
        //通过ioctl不停的读写操作,跟Binder Driver进行通信
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
    } while (err == -EINTR); //当被中断,则继续执行
    return err;
}

binder_write_read结构体用来与Binder设备交换数据的结构, 通过ioctl与mDriverFD通信,是真正与Binder驱动进行数据读写交互的过程。 主要是操作mOut和mIn变量。

struct binder_write_read bwr{
    write_size;
    write_buffer;
    read_size;
    read_buffer;
    write_consumed;
    read_consumed;
}
write_size 是 mOut的大小
write_buffer是 mOut的数据

image.png

三. Binder Driver

ioctl -> binder_ioctl -> binder_ioctl_write_read

# Binder系列---服务添加过程(1)

# Binder系列---服务添加过程(3)

# Binder系列---服务添加过程(4)