binder

243 阅读28分钟

Java层binder对象有两个,一个binderproxy, 一个本地binder

对应c++层binder对象也有两个, 一个BpBinder.cpp, 一个JavaBBinder继承BBinder.cpp

数据流动上,Java层parcel,对应c++层parcel.cpp

Java层binderproxy对象通过handle句柄标识,对应c++层binderproxy

BinderProxy(java) -> BpBinder.cpp(android_util_Binder.cpp jni) -> IPCThreadState.cpp -> ioctl

JavaBBinder.cpp -> BBinder.cpp -> Binder.java

ServiceManager.java
  private static IServiceManager getIServiceManager() {
        if (sServiceManager != null) {
            return sServiceManager;
        }

        // Find the service manager
        // 获取handle为0的c++层BpBinder对象,封装层java层的BpBinder.java对象,封装层ServiceManagerNative接口对象
        sServiceManager = ServiceManagerNative
                .asInterface(Binder.allowBlocking(BinderInternal.getContextObject()));
        return sServiceManager;
    }

    BinderInternal.getContextObject()----->
    sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& /*caller*/)
    {
        return getStrongProxyForHandle(0);
    }

    ProcessState::getStrongProxyForHandle() {
           ...
           b = BpBinder::create(handle);
           ...
    }

    // ServiceManagerNative增加服务
    // mRemote对象对应了c++层BpBinder对象, handle值为0
    public void addService(String name, IBinder service, boolean allowIsolated, int dumpPriority)
                throws RemoteException {
            Parcel data = Parcel.obtain();
            Parcel reply = Parcel.obtain();
            data.writeInterfaceToken(IServiceManager.descriptor);
            data.writeString(name);
            data.writeStrongBinder(service);
            data.writeInt(allowIsolated ? 1 : 0);
            data.writeInt(dumpPriority);
            mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
            reply.recycle();
            data.recycle();
        }

java parcel -> c++ parcel

 // binder扁平化数据,存储的是上层对象的指针
struct flat_binder_object {
   struct binder_object_header    hdr;
   __u32           flags;

   /* 8 bytes of data. */
   // 32位机器(其描述的地址空间为0x0000 0000 0000 0000 ~ 232-1) ,指针需要4 bytes
   // 64位机器, 指针需要8 bytes
   union {
      binder_uintptr_t   binder;    /* local object */ //本地对象 指针其实可能也就是个地址(整数)
      __u32        handle;    /* remote object */ // 远程对象 handler句柄 (一个整数)
   };

   /* extra data associated with local object */
   binder_uintptr_t   cookie;
};


inline static status_t finish_flatten_binder(
    const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
{
    return out->writeObject(flat, false);
}

// binder对象扁平化成flat_binder_object,填充到Parcel
// IBinder对象对应c++层BBinder/BPBinder
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;

    if (IPCThreadState::self()->backgroundSchedulingDisabled()) {
        /* minimum priority for all nodes is nice 0 */
        obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
    } else {
        /* minimum priority for all nodes is MAX_NICE(19) */
        obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    }

    if (binder != NULL) {
        IBinder *local = binder->localBinder();
        if (!local) {
            // 代理binder
            BpBinder *proxy = binder->remoteBinder();
            if (proxy == NULL) {
                ALOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            obj.hdr.type = BINDER_TYPE_HANDLE; // 类型为BINDER_TYPE_HANDLE
            obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
            obj.handle = handle; // 代理binder handle句柄
            obj.cookie = 0;
        } else {
            // 本地binder
            obj.hdr.type = BINDER_TYPE_BINDER; // 类型为BINDER_TYPE_BINDER
            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
            obj.cookie = reinterpret_cast<uintptr_t>(local); // cookie存储binder指针
        }
    } else {
        obj.hdr.type = BINDER_TYPE_BINDER;
        obj.binder = 0;
        obj.cookie = 0;
    }

    return finish_flatten_binder(binder, obj, out);
}



status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
{
    const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
    const bool enoughObjects = mObjectsSize < mObjectsCapacity;
    // 判断Data/Objects是否足够空间
    if (enoughData && enoughObjects) {
restart_write: // 重试
        *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val; // 填充 flat_binder_object val数据到mData指针位置mDataPos

        // remember if it's a file descriptor
        if (val.hdr.type == BINDER_TYPE_FD) {
            if (!mAllowFds) {
                // fail before modifying our object index
                return FDS_NOT_ALLOWED;
            }
            mHasFds = mFdsKnown = true;
        }

        // Need to write meta-data?
        if (nullMetaData || val.binder != 0) {
            mObjects[mObjectsSize] = mDataPos; // 记录该flat_binder_object对象在mData中的指针位置到mObjects数组中
            acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
            mObjectsSize++; // mObjects最后一个位置size + 1

        }

        return finishWrite(sizeof(flat_binder_object)); // mData偏移指针位置mDataPos移动flat_binder_object对象长度
    }

    if (!enoughData) {
        const status_t err = growData(sizeof(val)); // Data数据空间不够,扩充空间
        if (err != NO_ERROR) return err;
    }
    if (!enoughObjects) { // flat_binder_object对象在mData中的位置指针集合数组 数量不够,扩容
        size_t newSize = ((mObjectsSize+2)*3)/2;
        if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY;   // overflow
        binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
        if (objects == NULL) return NO_MEMORY;
        mObjects = objects;
        mObjectsCapacity = newSize;
    }

    goto restart_write;
}

BpBinder.cpp

// c++层Binder代理对象传输parcel数据
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    // Once a binder has died, it will never come back to life.
    if (mAlive) {
    // 通过IPCThreadState对象 transact方法传输
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;
}

status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    ...
        // 将parcel数据写入binder_transaction_data, 填充(cmd + binder_transaction_data)到IPCThreadState Parcel mout 输出对象
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    ...
    if ((flags & TF_ONE_WAY) == 0) {
        ...非oneway
        if (reply) {
                    err = waitForResponse(reply);
                } else {
                    Parcel fakeReply;
                    err = waitForResponse(&fakeReply);
                }
        ...
    } else {
              err = waitForResponse(NULL, NULL);
    }
}

enum transaction_flags {
   TF_ONE_WAY = 0x01,    /* this is a one-way call: async, no return */
   TF_ROOT_OBJECT = 0x04,    /* contents are the component's root object */
   TF_STATUS_CODE = 0x08,    /* contents are a 32-bit status code */
   TF_ACCEPT_FDS  = 0x10,    /* allow replies with file descriptors */
};
// parcel数据写入binder_transaction_data数据, (cmd + binder_transaction_data)再填充parcel mOut, parcel mOut写入binder_write_read write buffer
// parcel数据填充到binder_transaction_data的data.ptr结构体(parcel.mData -> data.ptr.buffer parcel.mObjects -> data.ptr.offsets)
// parcel data数据长度填充到binder_transaction_data的data_size, mObjects长度填充到offsets_size
struct binder_transaction_data {
   /* The first two are only used for bcTRANSACTION and brTRANSACTION,
    * identifying the target and contents of the transaction.
    */
   union {
      /* target descriptor of command transaction 目标desc */
      __u32  handle;
      /* target descriptor of return transaction */
      binder_uintptr_t ptr;
   } target;
   binder_uintptr_t   cookie;    /* target object cookie  目标cookie指针,对应c++层BBinder对象*/
   __u32     code;     /* transaction command */

   /* General information about the transaction. */
   __u32          flags;
   pid_t     sender_pid;
   uid_t     sender_euid;
   binder_size_t  data_size; /* number of bytes of data */
   binder_size_t  offsets_size;  /* number of bytes of offsets */

   /* If this transaction is inline, the data immediately
    * follows here; otherwise, it ends with a pointer to
    * the data buffer.
    */
   union {
      struct {
         /* transaction data */
         binder_uintptr_t   buffer;
         /* offsets from buffer to flat_binder_object structs */
         binder_uintptr_t   offsets;
      } ptr;
      __u8   buf[8];
   } data;
};

//  Parcel   mIn; (IPCThreadState.h 定义)
//  Parcel   mOut;
// 封装c++层Parcel对象为binder_transaction_data, 写入Parcel mout输出缓冲区(Parcel存储 (cmd + binder_transaction_data))
// 数据协议为cmd + binder_transaction_data
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
    binder_transaction_data tr;
    tr.target.ptr = 0; /* Don't pass uninitialized stack data to a remote process */
        tr.target.handle = handle; // 目标target handle值
        tr.code = code; // cmd命令码
        tr.flags = binderFlags;
        tr.cookie = 0;
        tr.sender_pid = 0;
        tr.sender_euid = 0;

        const status_t err = data.errorCheck();
        if (err == NO_ERROR) {
            tr.data_size = data.ipcDataSize(); // 传输数据data大小
            tr.data.ptr.buffer = data.ipcData(); // 传输数据data起始指针
            tr.offsets_size = data.ipcObjectsCount()*sizeof(binder_size_t); // 扁平化binder数据长度
            tr.data.ptr.offsets = data.ipcObjects(); //  扁平化binder数据在mData中偏移指针数组
        } else if (statusBuffer) {
            tr.flags |= TF_STATUS_CODE;
            *statusBuffer = err;
            tr.data_size = sizeof(status_t);
            tr.data.ptr.buffer = reinterpret_cast<uintptr_t>(statusBuffer);
            tr.offsets_size = 0;
            tr.data.ptr.offsets = 0;
        } else {
            return (mLastError = err);
        }

        mOut.writeInt32(cmd);
        // 写入mOut parcel长度是sizeof(tr), 并没有将buffer/offsets数据写入parcel,仅仅是写入buffer指针,offsets数据指针
        mOut.write(&tr, sizeof(tr)); 
}

status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
     while (1) {
            // talkWithDriver, 打开binder驱动,将mout缓冲区数据ioctl到binder驱动空间
            if ((err=talkWithDriver()) < NO_ERROR) break;
            // mIn 输入缓冲区读取binder驱动返回数据
            err = mIn.errorCheck();
            if (err < NO_ERROR) break;
            if (mIn.dataAvail() == 0) continue;
            // 读取命令
            cmd = (uint32_t)mIn.readInt32();
             switch (cmd) {
               case BR_TRANSACTION_COMPLETE:
                        if (!reply && !acquireResult) goto finish;
                        break;
                        ...binder_transaction_data
                 // 处理binder驱动返回数据
            case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr)); // 读取binder_transaction_data长度数据
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                    // 获取buffer数据offsets数据填充到c++层Parcel对象
                    // tr.data.ptr.buffer 长度tr.data_size 填充到Parcel mData/ mDataSize (全部数据)
                    // tr.data.ptr.offsets/tr.offsets_size/sizeof(binder_size_t) 填充到 mObjects, mObjectsSize (binder扁平化数据偏移地址数组)
                       reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(binder_size_t), this);
                    continue;
                }
            }
            goto finish;

        default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;
        }
    }

finish:
    if (err != NO_ERROR) {
        if (acquireResult) *acquireResult = err;
        if (reply) reply->setError(err);
        mLastError = err;
    }

    return err;
}


status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    binder_write_read bwr;

        // Is the read buffer empty?
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    ...
    // mIn mOut填充binder_write_read
    bwr.write_size = outAvail;
        bwr.write_buffer = (uintptr_t)mOut.data(); // mOut parcel存储的是(cmd + binder_transaction_data)

        // This is what we'll read.
        if (doReceive && needRead) {
            bwr.read_size = mIn.dataCapacity();
            bwr.read_buffer = (uintptr_t)mIn.data();
        } else {
            bwr.read_size = 0;
            bwr.read_buffer = 0;
        }
        ...
        // ioctl和binder驱动交互数据,binder驱动读取write_buffer, read_buffer填充数据
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
                    err = NO_ERROR;
                else
                    err = -errno;
        ...
        // 重置一下mIn mOut数据大小读取位置
        if (err >= NO_ERROR) {
        if (bwr.write_consumed > 0) {
            if (bwr.write_consumed < mOut.dataSize())
                mOut.remove(0, bwr.write_consumed);
            else {
                mOut.setDataSize(0);
                processPostWriteDerefs();
            }
        }
        if (bwr.read_consumed > 0) {
            mIn.setDataSize(bwr.read_consumed);
            mIn.setDataPosition(0);
        }
}

status_t IPCThreadState::executeCommand(int32_t cmd)
{
    switch {
         // 数据传输到服务端
        case BR_TRANSACTION:
           binder_transaction_data tr;
            result = mIn.read(&tr, sizeof(tr));
            ALOG_ASSERT(result == NO_ERROR,
                "Not enough command data for brTRANSACTION");
            if (result != NO_ERROR) break;
            // binder_transaction_data数据填充为Parcel数据
            Parcel buffer;
            buffer.ipcSetDataReference(
                reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                tr.data_size,
                reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                tr.offsets_size/sizeof(binder_size_t), freeBuffer, this);
          ...
          if (tr.target.ptr) {
                          // We only have a weak reference on the target object, so we must first try to
                          // safely acquire a strong reference before doing anything else with it.
                          if (reinterpret_cast<RefBase::weakref_type*>(
                                  tr.target.ptr)->attemptIncStrong(this)) {
                                  // tr cookie指针代表本地binder,强制转化为BBinder,也就是 binder实体,调用transact方法
                                  // 数据已经解析出来封装到parcel buffer中
                              error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
                                      &reply, tr.flags);
                              reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
                          } else {
                              error = UNKNOWN_TRANSACTION;
                          }

                      } else {
                          error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
                      }
          ...

     case BR_SPAWN_LOOPER:
             // 增加binder线程
             mProcess->spawnPooledThread(false);
             break;
    }

}



// 增加binder线程
void ProcessState::spawnPooledThread(bool isMain)
{
    if (mThreadPoolStarted) {
        String8 name = makeBinderThreadName();
        ALOGV("Spawning new pooled thread, name=%s\n", name.string());
        sp<Thread> t = new PoolThread(isMain);
        t->run(name.string());
    }
}

class PoolThread : public Thread
 {
 public:
     explicit PoolThread(bool isMain)
         : mIsMain(isMain)
     {
     }
 
 protected:
     virtual bool threadLoop()
     {
         IPCThreadState::self()->joinThreadPool(mIsMain);
         return false;
     }
 
     const bool mIsMain;
 };

数据协议流动: java Parcel -> c++ Parcel -> binder_transaction_data -> (cmd + binder_transaction_data) c++ parcel -> binder_write_read (write_buffer)

接口:封装业务接口

ServiceManagerNative -> mRemote (Java层binderproxy对象) -> BpBinder.cpp(handle)
                                   -> IPCThreadState.transact-> IPCThreadState.talkWithDriver -> ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr)
                       客户端    { -> reply->ipcSetDataReference(
                                                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                                                            tr.data_size,
                                                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                                                            tr.offsets_size/sizeof(binder_size_t),
                                                            freeBuffer, this);
                       服务端    { -> reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
                                                                    &reply, tr.flags);


                       handle -> tr.cookie
                       

binder驱动

#从进程角度 客户端/服务端 都是IPCThreadState.talkWithDriver -> ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) #客户端获取到bwr read数据 BR_REPLY binder_transaction_data -> reply(c++Parcel) -> reply(java Parcel) #服务端获取到bwr read数据 BR_TRANSACTION

 buffer.ipcSetDataReference(
                                                                                                reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                                                                                                tr.data_size,
                                                                                                reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                                                                                                tr.offsets_size/sizeof(binder_size_t), freeBuffer, this); ) **/
                                      ->                                                                                           
                                      reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,&reply, tr.flags);

binder驱动角度,binder读取bwr read数据,获取进程线程事务队列,回写事务数据给客户端;

binder驱动读取bwr write数据,将flat_binder_object建立binderNode节点保存,通过handle句柄找到目标进程binderProc/binderNode目标服务节点,将write数据封装成事务写到目标进程,目标进程建立flat_binder_object binder_ref引用节点

binder.c

进程在binder驱动的代表,进程打开binder驱动得到一个文件描述符对应一个file结构体,file-priv保存binder_proc对象
struct binder_proc {
	struct hlist_node proc_node;
	struct rb_root threads; // 线程树
	struct rb_root nodes; // 节点树
	struct rb_root refs_by_desc; // desc(handle)节点树
	struct rb_root refs_by_node; // node引用节点树
	struct list_head waiting_threads;
	int pid; // 进程id
	struct task_struct *tsk;
	struct hlist_node deferred_work_node;
	int deferred_work;
	bool is_dead;

	struct list_head todo; // 进程todo队列
	struct binder_stats stats;
	struct list_head delivered_death;
	int max_threads; // 最大线程数
	int requested_threads;
	int requested_threads_started;
	int tmp_ref;
	struct binder_priority default_priority;
	struct dentry *debugfs_entry;
	struct binder_alloc alloc; // 分配管理buffer
	struct binder_context *context;
	spinlock_t inner_lock;
	spinlock_t outer_lock;
	struct dentry *binderfs_entry;
};
// binder_node节点表示的是java层本地binder对象,c++层BBinder对象, c++层parcel扁平化flat_binder_object数据,type类型为BINDER_TYPE_BINDER,cookie存储c++层BBinder对象指针
struct binder_node {
	int debug_id;
	spinlock_t lock;
	struct binder_work work;
	union {
		struct rb_node rb_node;
		struct hlist_node dead_node;
	};
	struct binder_proc *proc; // 该binder_node所在binder进程
	struct hlist_head refs;
	int internal_strong_refs;
	int local_weak_refs;
	int local_strong_refs;
	int tmp_refs;
	binder_uintptr_t ptr;
	binder_uintptr_t cookie; // cookie指针存储的是c++层本地binder对象指针BBinder.cpp
	struct {
		/*
		 * bitfield elements protected by
		 * proc inner_lock
		 */
		u8 has_strong_ref:1;
		u8 pending_strong_ref:1;
		u8 has_weak_ref:1;
		u8 pending_weak_ref:1;
	};
	struct {
		/*
		 * invariant after initialization
		 */
		u8 sched_policy:2;
		u8 inherit_rt:1;
		u8 accept_fds:1;
		u8 txn_security_ctx:1;
		u8 min_priority;
	};
	bool has_async_transaction;
	struct list_head async_todo;
};
struct binder_ref_data {
	int debug_id;
	uint32_t desc; // 存储代理binder handle句柄,每一个本地binder_node在不同进程的引用树中的desc值是不一样的
	int strong;
	int weak;
};

struct binder_ref {
	/* Lookups needed: */
	/*   node + proc => ref (transaction) */
	/*   desc + proc => ref (transaction, inc/dec ref) */
	/*   node => refs + procs (proc exit) */
	struct binder_ref_data data;
	struct rb_node rb_node_desc;
	struct rb_node rb_node_node;
	struct hlist_node node_entry;
	struct binder_proc *proc; // 该引用binder所属binder_proc进程
	struct binder_node *node; // 该引用binder所属binder_node
	struct binder_ref_death *death;
};
// 每个进程PoolThread在binder进程的代表
struct binder_thread {
	struct binder_proc *proc; // binder线程所属进程
	struct rb_node rb_node; // binder线程所属节点服务
	struct list_head waiting_thread_node;
	int pid; // 线程pid
	int looper;              /* only modified by this thread */ //线程状态
	bool looper_need_return; /* can be written by other thread */
	struct binder_transaction *transaction_stack; // 传输完整事务描述
	struct list_head todo; // 线程todo队列
	bool process_todo;
	struct binder_error return_error;
	struct binder_error reply_error;
	wait_queue_head_t wait;
	struct binder_stats stats;
	atomic_t tmp_ref;
	bool is_dead;
	struct task_struct *task;
};
struct binder_work {
	struct list_head entry;

	enum {
		BINDER_WORK_TRANSACTION = 1,
		BINDER_WORK_TRANSACTION_COMPLETE,
		BINDER_WORK_RETURN_ERROR,
		BINDER_WORK_NODE,
		BINDER_WORK_DEAD_BINDER,
		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
	} type;
};

// 传输事务
struct binder_transaction {
	int debug_id;
	struct binder_work work; // 工作类型
	struct binder_thread *from; // 来自哪个binder线程
	struct binder_transaction *from_parent;
	struct binder_proc *to_proc; // 去哪个binder进程
	struct binder_thread *to_thread; // 去哪个binder线程
	struct binder_transaction *to_parent;
	unsigned need_reply:1;
	/* unsigned is_dead:1; */	/* not used at the moment */

	struct binder_buffer *buffer; //传输内容 binder_alloc.h
	unsigned int	code;
	unsigned int	flags;
	struct binder_priority	priority;
	struct binder_priority	saved_priority;
	bool    set_priority_called;
	kuid_t	sender_euid;
	struct list_head fd_fixups;
	binder_uintptr_t security_ctx;
	/**
	 * @lock:  protects @from, @to_proc, and @to_thread
	 *
	 * @from, @to_proc, and @to_thread can be set to NULL
	 * during thread teardown
	 */
	spinlock_t lock;
};
static int binder_open(struct inode *nodp, struct file *filp)
{
	struct binder_proc *proc, *itr;
	struct binder_device *binder_dev;
	struct binderfs_info *info;
	struct dentry *binder_binderfs_dir_entry_proc = NULL;
	bool existing_pid = false;

	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
		     current->group_leader->pid, current->pid);
    // 分配binder_proc给进程
	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
	if (proc == NULL)
		return -ENOMEM;
	spin_lock_init(&proc->inner_lock);
	spin_lock_init(&proc->outer_lock);
	get_task_struct(current->group_leader);
	proc->tsk = current->group_leader;
	INIT_LIST_HEAD(&proc->todo); // 进程todo list
	if (binder_supported_policy(current->policy)) {
		proc->default_priority.sched_policy = current->policy;
		proc->default_priority.prio = current->normal_prio;
	} else {
		proc->default_priority.sched_policy = SCHED_NORMAL;
		proc->default_priority.prio = NICE_TO_PRIO(0);
	}

	/* binderfs stashes devices in i_private */
	if (is_binderfs_device(nodp)) {
		binder_dev = nodp->i_private;
		info = nodp->i_sb->s_fs_info;
		binder_binderfs_dir_entry_proc = info->proc_log_dir;
	} else {
		binder_dev = container_of(filp->private_data,
					  struct binder_device, miscdev);
	}
	refcount_inc(&binder_dev->ref);
    // context 保存全局变量binder_dev->context(保存上下文节点servicemanager)
	proc->context = &binder_dev->context;
	binder_alloc_init(&proc->alloc);

	binder_stats_created(BINDER_STAT_PROC);
	proc->pid = current->group_leader->pid;
	INIT_LIST_HEAD(&proc->delivered_death);
	INIT_LIST_HEAD(&proc->waiting_threads);
	filp->private_data = proc; // binder_proc依附在filp->private_data

	mutex_lock(&binder_procs_lock);
	hlist_for_each_entry(itr, &binder_procs, proc_node) {
		if (itr->pid == proc->pid) {
			existing_pid = true;
			break;
		}
	}
	hlist_add_head(&proc->proc_node, &binder_procs); // binder_procs加入全局链表头部
	mutex_unlock(&binder_procs_lock);

	if (binder_debugfs_dir_entry_proc && !existing_pid) {
		char strbuf[11];

		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
		/*
		 * proc debug entries are shared between contexts.
		 * Only create for the first PID to avoid debugfs log spamming
		 * The printing code will anyway print all contexts for a given
		 * PID so this is not a problem.
		 */
		proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
			binder_debugfs_dir_entry_proc,
			(void *)(unsigned long)proc->pid,
			&proc_fops);
	}

	if (binder_binderfs_dir_entry_proc && !existing_pid) {
		char strbuf[11];
		struct dentry *binderfs_entry;

		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
		/*
		 * Similar to debugfs, the process specific log file is shared
		 * between contexts. Only create for the first PID.
		 * This is ok since same as debugfs, the log file will contain
		 * information on all contexts of a given PID.
		 */
		binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
			strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
		if (!IS_ERR(binderfs_entry)) {
			proc->binderfs_entry = binderfs_entry;
		} else {
			int error;

			error = PTR_ERR(binderfs_entry);
			pr_warn("Unable to create file %s in binderfs (error %d)\n",
				strbuf, error);
		}
	}

	return 0;
}
用户进程空间ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr),ioctl是跑在 PoolThread run方法中的
          
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
    ...
    void __user *ubuf = (void __user *)arg; // 用户空间数据
    ...
    thread = binder_get_thread(proc); // 此次操作的线程若没有则实例化插入线程红黑树
    switch (cmd) {
       case BINDER_WRITE_READ:
           //数据读写
          ret = binder_ioctl_write_read(filp, cmd, arg, thread);
          if (ret)
             goto err;
          break;
       case BINDER_SET_MAX_THREADS: {
          int max_threads;
          // 进程空间 ioctl(mDriverFD, BINDER_SET_MAX_THREADS, &maxThreads) 
          // 设置最大线程数
            // 拷贝用户空间数据ubuf sizeof(max_threads)长度到驱动空间 max_threads
          if (copy_from_user(&max_threads, ubuf,
                   sizeof(max_threads))) {
             ret = -EINVAL;
             goto err;
          }
          binder_inner_proc_lock(proc);
          // 设置进程最大线程数量
          proc->max_threads = max_threads;
          binder_inner_proc_unlock(proc);
          break;
       }
       // 设置servicemanager为上下文
       case BINDER_SET_CONTEXT_MGR_EXT: {
           // servicemanager 打扁的binder数据
          struct flat_binder_object fbo; 
            // 拷贝用户空间ubuf数据 sizeof(flat_binder_object)长度到flat_binder_object fbo
            // 读取ubuf sizeof(fbo)长度数据填充到fbo
          if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
             ret = -EINVAL;
             goto err;
          }
          ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
          if (ret)
             goto err;
          break;
       }
       // 设置上下文为null
       case BINDER_SET_CONTEXT_MGR:
          ret = binder_ioctl_set_ctx_mgr(filp, NULL);
          if (ret)
             goto err;
          break;
       case BINDER_THREAD_EXIT:
          binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
                  proc->pid, thread->pid);
          binder_thread_release(proc, thread);
          thread = NULL;
          break;
       // 读取binder驱动版本
       case BINDER_VERSION: {
          struct binder_version __user *ver = ubuf;
          if (size != sizeof(struct binder_version)) {
             ret = -EINVAL;
             goto err;
          }
          //读取binder驱动版本,返回用户空间
          if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
                  &ver->protocol_version)) {
             ret = -EINVAL;
             goto err;
          }
          break;
       }
    ...
}

// 设置flat_binder_object对象(servicemanager)为上下文节点
static int binder_ioctl_set_ctx_mgr(struct file *filp,
				    struct flat_binder_object *fbo)
{
	int ret = 0;
	struct binder_proc *proc = filp->private_data;
    // 全局对象binder_context
	struct binder_context *context = proc->context; 
	struct binder_node *new_node;
	kuid_t curr_euid = current_euid();

	mutex_lock(&context->context_mgr_node_lock);
    // 上下文节点已经设置
	if (context->binder_context_mgr_node) {
		pr_err("BINDER_SET_CONTEXT_MGR already set\n");
		ret = -EBUSY;
		goto out;
	}
	ret = security_binder_set_context_mgr(proc->tsk);
	if (ret < 0)
		goto out;
	if (uid_valid(context->binder_context_mgr_uid)) {
		if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
			pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
			       from_kuid(&init_user_ns, curr_euid),
			       from_kuid(&init_user_ns,
					 context->binder_context_mgr_uid));
			ret = -EPERM;
			goto out;
		}
	} else {
		context->binder_context_mgr_uid = curr_euid;
	}
    // 新建binder_node节点
	new_node = binder_new_node(proc, fbo);
	if (!new_node) {
		ret = -ENOMEM;
		goto out;
	}
	binder_node_lock(new_node);
	new_node->local_weak_refs++;
	new_node->local_strong_refs++;
	new_node->has_strong_ref = 1;
	new_node->has_weak_ref = 1;
	context->binder_context_mgr_node = new_node; // 设置上下文node,也就是servicemanager节点
	binder_node_unlock(new_node);
	binder_put_node(new_node);
out:
	mutex_unlock(&context->context_mgr_node_lock);
	return ret;
}

static int binder_ioctl_write_read(struct file *filp,
            unsigned int cmd, unsigned long arg,
            struct binder_thread *thread)
{
    ...
    // 用户层传来的binder_write_read数据
    void __user *ubuf = (void __user *)arg;
    struct binder_write_read bwr;
    ...
    if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
          ret = -EFAULT;
          goto out;
       }
    ...
    if (bwr.write_size > 0) {
       // 写数据
          ret = binder_thread_write(proc, thread,
                     bwr.write_buffer,
                     bwr.write_size,
                     &bwr.write_consumed);
            ...
       }
       if (bwr.read_size > 0) {
       // 读数据
          ret = binder_thread_read(proc, thread, bwr.read_buffer,
                    bwr.read_size,
                    &bwr.read_consumed,
                    filp->f_flags & O_NONBLOCK);
         ...
       }

}
binder_thread_write(proc, thread, bwr.write_buffer,bwr.write_size,&bwr.write_consumed)
// 往驱动空间写数据,驱动将数据写入目标进程
// 传输的是binder_write_read结构的write_buffer数据
static int binder_thread_write(struct binder_proc *proc,
         struct binder_thread *thread,
         binder_uintptr_t binder_buffer, size_t size,
         binder_size_t *consumed)
{
   uint32_t cmd;
   struct binder_context *context = proc->context;
   void __user *buffer = (void __user *)(uintptr_t)binder_buffer; // buffer数据
   void __user *ptr = buffer + *consumed; // buffer开始指针位置, consumed已经消化的数据长度, ptr待写的数据开始位置
   void __user *end = buffer + size; // buffer结束指针位置, end待写的数据结束位置
    // 遍历buffer数据ptr-end 
   while (ptr < end && thread->return_error.cmd == BR_OK) {
      int ret;
        // 获取uint32_t长度cmd
      if (get_user(cmd, (uint32_t __user *)ptr))
         return -EFAULT;
         // 移动uint32_t长度继续读取
      ptr += sizeof(uint32_t);
      switch (cmd) {
          ...
          case BC_TRANSACTION:
          case BC_REPLY: {
                   // (cmd + binder_transaction_data)
                   // bc_transaction cmd
                   // 读取binder_transaction_data 数据长度大小
                   // binder_write_read write_buffer指向的是由cmd + binder_transaction_data组成的Parcel起始指针
                     struct binder_transaction_data tr;
                      // 获取用户空间sizeof(tr)长度的binder_transaction_data数据
                     if (copy_from_user(&tr, ptr, sizeof(tr)))
                        return -EFAULT;
                     // 移动ptr指针
                     ptr += sizeof(tr);
                     // 数据传输处理
                     binder_transaction(proc, thread, &tr,
                              cmd == BC_REPLY, 0);
                     break;
                  }
            ...
      }
     }

}
// binder_transaction/binder_reply cmd处理
// 客户端请求服务端cmd BC_TRANSACTION/ 服务端响应客户端cmd  BC_REPLY
// reply = (cmd == BC_REPLY)
// 默认请求是非one_way同步
static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
{
	int ret;
	struct binder_transaction *t;
	struct binder_work *w;
	struct binder_work *tcomplete;
	binder_size_t buffer_offset = 0;
	binder_size_t off_start_offset, off_end_offset;
	binder_size_t off_min;
	binder_size_t sg_buf_offset, sg_buf_end_offset;
	struct binder_proc *target_proc = NULL;
	struct binder_thread *target_thread = NULL;
	struct binder_node *target_node = NULL;
	struct binder_transaction *in_reply_to = NULL;
	uint32_t return_error = 0;
	uint32_t return_error_param = 0;
	uint32_t return_error_line = 0;
	binder_size_t last_fixup_obj_off = 0;
	binder_size_t last_fixup_min_off = 0;
	struct binder_context *context = proc->context;
	int t_debug_id = atomic_inc_return(&binder_last_id);
	char *secctx = NULL;
	u32 secctx_sz = 0;

    ...
	if (reply) {
         // BC_REPLY
        // 服务端回应客户端才走这里
		binder_inner_proc_lock(proc);
        // 找到请求服务端的客户端线程事务栈, 客户端请求的时候记录了
		in_reply_to = thread->transaction_stack;
		if (in_reply_to == NULL) {
			binder_inner_proc_unlock(proc);
			binder_user_error("%d:%d got reply transaction with no transaction stack\n",
					  proc->pid, thread->pid);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
			goto err_empty_call_stack;
		}
		if (in_reply_to->to_thread != thread) {
			spin_lock(&in_reply_to->lock);
			binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
				proc->pid, thread->pid, in_reply_to->debug_id,
				in_reply_to->to_proc ?
				in_reply_to->to_proc->pid : 0,
				in_reply_to->to_thread ?
				in_reply_to->to_thread->pid : 0);
			spin_unlock(&in_reply_to->lock);
			binder_inner_proc_unlock(proc);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
			in_reply_to = NULL;
			goto err_bad_call_stack;
		}
		thread->transaction_stack = in_reply_to->to_parent;
		binder_inner_proc_unlock(proc);
        // 找到目标线程即客户端请求的线程 t->from
		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
		if (target_thread == NULL) {
			/* annotation for sparse */
			__release(&target_thread->proc->inner_lock);
			return_error = BR_DEAD_REPLY;
			return_error_line = __LINE__;
			goto err_dead_binder;
		}
		if (target_thread->transaction_stack != in_reply_to) {
			binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
				proc->pid, thread->pid,
				target_thread->transaction_stack ?
				target_thread->transaction_stack->debug_id : 0,
				in_reply_to->debug_id);
			binder_inner_proc_unlock(target_thread->proc);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
			in_reply_to = NULL;
			target_thread = NULL;
			goto err_dead_binder;
		}
        // 找到目标线程对应的目标进程
		target_proc = target_thread->proc;
		target_proc->tmp_ref++;
		binder_inner_proc_unlock(target_thread->proc);
	} else {
         // BR_REPLY
         // 客户端请求传输数据走这里
	    // 目标handle句柄存在
		if (tr->target.handle) {
			struct binder_ref *ref;

			/*
			 * There must already be a strong ref
			 * on this node. If so, do a strong
			 * increment on the node to ensure it
			 * stays alive until the transaction is
			 * done.
			 */
			binder_proc_lock(proc);
			// 从当前进程binder_proc句柄节点树(node_desc)查找目标binder引用节点(该引用节点包含目标binder_node节点 binder_proc)
			ref = binder_get_ref_olocked(proc, tr->target.handle,
						     true);
			if (ref) {
			// 句柄树存在目标handle对应binder引用, 找到目标binder_node/binder_proc
				target_node = binder_get_node_refs_for_txn(
						ref->node, &target_proc,
						&return_error);
			} else {
				binder_user_error("%d:%d got transaction to invalid handle\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
			}
			binder_proc_unlock(proc);
		} else {
		// 没有目标handle句柄,则采取上下文binder_context_mgr_node,也就是servicemanager bindernode 节点
			mutex_lock(&context->context_mgr_node_lock);
			target_node = context->binder_context_mgr_node;
			if (target_node)
			// 找到target_node/target_proc 也就是servicemanager bindernode 节点 /servicemanager binder_proc进程
				target_node = binder_get_node_refs_for_txn(
						target_node, &target_proc,
						&return_error);
			else
				return_error = BR_DEAD_REPLY;
			mutex_unlock(&context->context_mgr_node_lock);
			if (target_node && target_proc->pid == proc->pid) {
				binder_user_error("%d:%d got transaction to context manager from process owning it\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_invalid_target_handle;
			}
		}
		if (!target_node) {
		    // 找不到目标binder_node节点,返回错误
			/*
			 * return_error is set above
			 */
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
			goto err_dead_binder;
		}
		e->to_node = target_node->debug_id;
		if (security_binder_transaction(proc->tsk,
						target_proc->tsk) < 0) {
			return_error = BR_FAILED_REPLY;
			return_error_param = -EPERM;
			return_error_line = __LINE__;
			goto err_invalid_target_handle;
		}
		binder_inner_proc_lock(proc);
        // 当前binder_proc binder_thread todo 队列头部binder_work
		w = list_first_entry_or_null(&thread->todo,
					     struct binder_work, entry);
	
		if (!(tr->flags & TF_ONE_WAY) && w &&
		    w->type == BINDER_WORK_TRANSACTION) {
			/*
			 * Do not allow new outgoing transaction from a
			 * thread that has a transaction at the head of
			 * its todo list. Only need to check the head
			 * because binder_select_thread_ilocked picks a
			 * thread from proc->waiting_threads to enqueue
			 * the transaction, and nothing is queued to the
			 * todo list while the thread is on waiting_threads.
			 */
			binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
					  proc->pid, thread->pid);
			binder_inner_proc_unlock(proc);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EPROTO;
			return_error_line = __LINE__;
			goto err_bad_todo_list;
		}
        // 客户端请求服务端数据默认为非one_way, thread->transaction_stack为null
		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
			struct binder_transaction *tmp;

			tmp = thread->transaction_stack;
			if (tmp->to_thread != thread) {
				spin_lock(&tmp->lock);
				binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
					proc->pid, thread->pid, tmp->debug_id,
					tmp->to_proc ? tmp->to_proc->pid : 0,
					tmp->to_thread ?
					tmp->to_thread->pid : 0);
				spin_unlock(&tmp->lock);
				binder_inner_proc_unlock(proc);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EPROTO;
				return_error_line = __LINE__;
				goto err_bad_call_stack;
			}
			while (tmp) {
				struct binder_thread *from;

				spin_lock(&tmp->lock);
				from = tmp->from;
				if (from && from->proc == target_proc) {
					atomic_inc(&from->tmp_ref);
					target_thread = from;
					spin_unlock(&tmp->lock);
					break;
				}
				spin_unlock(&tmp->lock);
				tmp = tmp->from_parent;
			}
		}
		binder_inner_proc_unlock(proc);
	}
    // 客户端请求服务端target_thread是null
    // 服务端响应客户端target_thread不为null,是客户端请求服务端的那个线程
	if (target_thread)
		e->to_thread = target_thread->pid;
	e->to_proc = target_proc->pid;

    // binder_transaction t
	/* TODO: reuse incoming transaction for reply */
	// 封装一个binder_transaction t,用于reply 复用,也就是存储在目标进程事务栈中,用于服务端响应客户端
	t = kzalloc(sizeof(*t), GFP_KERNEL);
	if (t == NULL) {
		return_error = BR_FAILED_REPLY;
		return_error_param = -ENOMEM;
		return_error_line = __LINE__;
		goto err_alloc_t_failed;
	}
	INIT_LIST_HEAD(&t->fd_fixups);
	binder_stats_created(BINDER_STAT_TRANSACTION);
	spin_lock_init(&t->lock);
    // binder_work  tcomplete
    // 封装一个binder_work tcomplete
	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
	if (tcomplete == NULL) {
		return_error = BR_FAILED_REPLY;
		return_error_param = -ENOMEM;
		return_error_line = __LINE__;
		goto err_alloc_tcomplete_failed;
	}
	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);

	t->debug_id = t_debug_id;

	if (reply)
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
			     proc->pid, thread->pid, t->debug_id,
			     target_proc->pid, target_thread->pid,
			     (u64)tr->data.ptr.buffer,
			     (u64)tr->data.ptr.offsets,
			     (u64)tr->data_size, (u64)tr->offsets_size,
			     (u64)extra_buffers_size);
	else
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
			     proc->pid, thread->pid, t->debug_id,
			     target_proc->pid, target_node->debug_id,
			     (u64)tr->data.ptr.buffer,
			     (u64)tr->data.ptr.offsets,
			     (u64)tr->data_size, (u64)tr->offsets_size,
			     (u64)extra_buffers_size);

	if (!reply && !(tr->flags & TF_ONE_WAY))
		t->from = thread; // 客户端请求服务端, BC_TRANSATION, 非TF_ONE_WAY, 记录from为当前进程线程
	else
		t->from = NULL; // one_way,BC_reply(服务端响应客户端) ,不记录from
	t->sender_euid = task_euid(proc->tsk);
	t->to_proc = target_proc; // 记录目标进程
	// BC_REPLY 服务端响应客户端reply的过程会找到target_thread;
    // BC_TRANSACTION  客户端请求服务端非reply则一般找到target_proc;
    // 记录目标线程,客户端请求服务端BC_TRANSATION 为null;BC_REPLY服务端响应客户端从事务栈中找到t对象找到t->from
	t->to_thread = target_thread; 
	t->code = tr->code;
	t->flags = tr->flags;
	if (!(t->flags & TF_ONE_WAY) &&
	    binder_supported_policy(current->policy)) {
		/* Inherit supported policies for synchronous transactions */
		t->priority.sched_policy = current->policy;
		t->priority.prio = current->normal_prio;
	} else {
		/* Otherwise, fall back to the default priority */
		t->priority = target_proc->default_priority;
	}

	if (target_node && target_node->txn_security_ctx) {
		u32 secid;
		size_t added_size;

		security_task_getsecid(proc->tsk, &secid);
		ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
		if (ret) {
			return_error = BR_FAILED_REPLY;
			return_error_param = ret;
			return_error_line = __LINE__;
			goto err_get_secctx_failed;
		}
		added_size = ALIGN(secctx_sz, sizeof(u64));
		extra_buffers_size += added_size;
		if (extra_buffers_size < added_size) {
			/* integer overflow of extra_buffers_size */
			return_error = BR_FAILED_REPLY;
			return_error_param = EINVAL;
			return_error_line = __LINE__;
			goto err_bad_extra_size;
		}
	}

	trace_binder_transaction(reply, t, target_node);
 
    // 驱动空间目标进程target_proc分配一块buffer内存指向一块物理内存,目标进程进程空间映射一块内存区域同样指向该物理内存,
    // 起始指针是内核buffer空间起始指针 + 偏移地址(其实该偏移地址,也是进程空间buffer内存区域起始指针 - 内核buffer空间起始指针 计算而来)
    // 将传输数据拷贝到内核空间目标进程buffer区域,目标进程在进程空间便可通过对应buffer访问数据
    // 数据拷贝到内核目标进程buffer空间后,遍历所有binder对象,将binder对象根据类型分别存储在当前进程和目标进程的binder_node节点树或者引用节点树
	t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
		tr->offsets_size, extra_buffers_size,
		!reply && (t->flags & TF_ONE_WAY));
	if (IS_ERR(t->buffer)) {
		/*
		 * -ESRCH indicates VMA cleared. The target is dying.
		 */
		return_error_param = PTR_ERR(t->buffer);
		return_error = return_error_param == -ESRCH ?
			BR_DEAD_REPLY : BR_FAILED_REPLY;
		return_error_line = __LINE__;
		t->buffer = NULL;
		goto err_binder_alloc_buf_failed;
	}
	if (secctx) {
		int err;
		size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
				    ALIGN(tr->offsets_size, sizeof(void *)) +
				    ALIGN(extra_buffers_size, sizeof(void *)) -
				    ALIGN(secctx_sz, sizeof(u64));

		t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
		err = binder_alloc_copy_to_buffer(&target_proc->alloc,
						  t->buffer, buf_offset,
						  secctx, secctx_sz);
		if (err) {
			t->security_ctx = 0;
			WARN_ON(1);
		}
		security_release_secctx(secctx, secctx_sz);
		secctx = NULL;
	}
	t->buffer->debug_id = t->debug_id;
	t->buffer->transaction = t; // buffer所属binder_transaction
	t->buffer->target_node = target_node; // buffer所属target_node
	trace_binder_transaction_alloc_buf(t->buffer);
        // 这里才是binder一次传输的表现,binder一次传输指的是内核将客户端buffer+offets数据内容拷贝到目标进程的buffer空间(mmap),前面的buffer+offets数据拷贝是拷贝了指针
    // 拷贝用户层普通数据tr->data.ptr.buffer,tr->data_size到t->buffer空间
	if (binder_alloc_copy_user_to_buffer(
				&target_proc->alloc,
				t->buffer, 0,
				(const void __user *)
					(uintptr_t)tr->data.ptr.buffer,
				tr->data_size)) {
		binder_user_error("%d:%d got transaction with invalid data ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		return_error_param = -EFAULT;
		return_error_line = __LINE__;
		goto err_copy_data_failed;
	}
	// 拷贝用户层binder偏移数据tr->data.ptr.offsets 长度tr->offsets_size(binder偏移地址数组集合) 继续加入t->buffer
    // binder偏移地址数组集合方便快速查找传输数据中所有的binder数据
	if (binder_alloc_copy_user_to_buffer(
				&target_proc->alloc,
				t->buffer,
				ALIGN(tr->data_size, sizeof(void *)),
				(const void __user *)
					(uintptr_t)tr->data.ptr.offsets,
				tr->offsets_size)) {
		binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
				proc->pid, thread->pid);
		return_error = BR_FAILED_REPLY;
		return_error_param = -EFAULT;
		return_error_line = __LINE__;
		goto err_copy_data_failed;
	}
	if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
		binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
				proc->pid, thread->pid, (u64)tr->offsets_size);
		return_error = BR_FAILED_REPLY;
		return_error_param = -EINVAL;
		return_error_line = __LINE__;
		goto err_bad_offset;
	}
	if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
		binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
				  proc->pid, thread->pid,
				  (u64)extra_buffers_size);
		return_error = BR_FAILED_REPLY;
		return_error_param = -EINVAL;
		return_error_line = __LINE__;
		goto err_bad_offset;
	}
	off_start_offset = ALIGN(tr->data_size, sizeof(void *));
	buffer_offset = off_start_offset;
	off_end_offset = off_start_offset + tr->offsets_size;
	sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
	sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
		ALIGN(secctx_sz, sizeof(u64));
	off_min = 0;
    // 遍历所有flat_binder_obj对象
	for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
	     buffer_offset += sizeof(binder_size_t)) {
		struct binder_object_header *hdr;
		size_t object_size;
		struct binder_object object;
		binder_size_t object_offset;
        // 上面数据已经拷贝到t->buffer, 接下来读取t->buffer中的binder数据,自动填充到对应的binder_node/binder_node_desc红黑树
		if (binder_alloc_copy_from_buffer(&target_proc->alloc,
						  &object_offset,
						  t->buffer,
						  buffer_offset,
						  sizeof(object_offset))) {
			return_error = BR_FAILED_REPLY;
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
			goto err_bad_offset;
		}
        // 获取binder_object对象
        /*struct binder_object {
            union {
                struct binder_object_header hdr;
                struct flat_binder_object fbo;
                struct binder_fd_object fdo;
                struct binder_buffer_object bbo;
                struct binder_fd_array_object fdao;
            };
        }; */
		object_size = binder_get_object(target_proc, t->buffer,
						object_offset, &object);
		if (object_size == 0 || object_offset < off_min) {
			binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
					  proc->pid, thread->pid,
					  (u64)object_offset,
					  (u64)off_min,
					  (u64)t->buffer->data_size);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
			goto err_bad_offset;
		}

		hdr = &object.hdr;
		off_min = object_offset + object_size;
		switch (hdr->type) {
		// 传输一个本地binder到目标target
		// 该本地binder会生成一个binder node保存在实体节点树
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
			struct flat_binder_object *fp;
            // hdr转换为flat_binder_object
			fp = to_flat_binder_object(hdr);
			ret = binder_translate_binder(fp, t, thread);

			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							fp, sizeof(*fp))) {
				return_error = BR_FAILED_REPLY;
				return_error_param = ret;
				return_error_line = __LINE__;
				goto err_translate_failed;
			}
		} break;
		case BINDER_TYPE_HANDLE:
		case BINDER_TYPE_WEAK_HANDLE: {
			struct flat_binder_object *fp;

			fp = to_flat_binder_object(hdr);
			ret = binder_translate_handle(fp, t, thread);
			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							fp, sizeof(*fp))) {
				return_error = BR_FAILED_REPLY;
				return_error_param = ret;
				return_error_line = __LINE__;
				goto err_translate_failed;
			}
		} break;

		case BINDER_TYPE_FD: {
			struct binder_fd_object *fp = to_binder_fd_object(hdr);
			binder_size_t fd_offset = object_offset +
				(uintptr_t)&fp->fd - (uintptr_t)fp;
			int ret = binder_translate_fd(fp->fd, fd_offset, t,
						      thread, in_reply_to);

			fp->pad_binder = 0;
			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							fp, sizeof(*fp))) {
				return_error = BR_FAILED_REPLY;
				return_error_param = ret;
				return_error_line = __LINE__;
				goto err_translate_failed;
			}
		} break;
		case BINDER_TYPE_FDA: {
			struct binder_object ptr_object;
			binder_size_t parent_offset;
			struct binder_fd_array_object *fda =
				to_binder_fd_array_object(hdr);
			size_t num_valid = (buffer_offset - off_start_offset) /
						sizeof(binder_size_t);
			struct binder_buffer_object *parent =
				binder_validate_ptr(target_proc, t->buffer,
						    &ptr_object, fda->parent,
						    off_start_offset,
						    &parent_offset,
						    num_valid);
			if (!parent) {
				binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_parent;
			}
			if (!binder_validate_fixup(target_proc, t->buffer,
						   off_start_offset,
						   parent_offset,
						   fda->parent_offset,
						   last_fixup_obj_off,
						   last_fixup_min_off)) {
				binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_parent;
			}
			ret = binder_translate_fd_array(fda, parent, t, thread,
							in_reply_to);
			if (ret < 0) {
				return_error = BR_FAILED_REPLY;
				return_error_param = ret;
				return_error_line = __LINE__;
				goto err_translate_failed;
			}
			last_fixup_obj_off = parent_offset;
			last_fixup_min_off =
				fda->parent_offset + sizeof(u32) * fda->num_fds;
		} break;
		case BINDER_TYPE_PTR: {
			struct binder_buffer_object *bp =
				to_binder_buffer_object(hdr);
			size_t buf_left = sg_buf_end_offset - sg_buf_offset;
			size_t num_valid;

			if (bp->length > buf_left) {
				binder_user_error("%d:%d got transaction with too large buffer\n",
						  proc->pid, thread->pid);
				return_error = BR_FAILED_REPLY;
				return_error_param = -EINVAL;
				return_error_line = __LINE__;
				goto err_bad_offset;
			}
			if (binder_alloc_copy_user_to_buffer(
						&target_proc->alloc,
						t->buffer,
						sg_buf_offset,
						(const void __user *)
							(uintptr_t)bp->buffer,
						bp->length)) {
				binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
						  proc->pid, thread->pid);
				return_error_param = -EFAULT;
				return_error = BR_FAILED_REPLY;
				return_error_line = __LINE__;
				goto err_copy_data_failed;
			}
			/* Fixup buffer pointer to target proc address space */
			bp->buffer = (uintptr_t)
				t->buffer->user_data + sg_buf_offset;
			sg_buf_offset += ALIGN(bp->length, sizeof(u64));

			num_valid = (buffer_offset - off_start_offset) /
					sizeof(binder_size_t);
			ret = binder_fixup_parent(t, thread, bp,
						  off_start_offset,
						  num_valid,
						  last_fixup_obj_off,
						  last_fixup_min_off);
			if (ret < 0 ||
			    binder_alloc_copy_to_buffer(&target_proc->alloc,
							t->buffer,
							object_offset,
							bp, sizeof(*bp))) {
				return_error = BR_FAILED_REPLY;
				return_error_param = ret;
				return_error_line = __LINE__;
				goto err_translate_failed;
			}
			last_fixup_obj_off = object_offset;
			last_fixup_min_off = 0;
		} break;
		default:
			binder_user_error("%d:%d got transaction with invalid object type, %x\n",
				proc->pid, thread->pid, hdr->type);
			return_error = BR_FAILED_REPLY;
			return_error_param = -EINVAL;
			return_error_line = __LINE__;
			goto err_bad_object_type;
		}
	}
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	t->work.type = BINDER_WORK_TRANSACTION;

	if (reply) {
        // BC_REPLY reply,服务端响应客户端, 封装一个tcomplete事件到服务端线程todo队列
		binder_enqueue_thread_work(thread, tcomplete);
		binder_inner_proc_lock(target_proc);
		if (target_thread->is_dead) {
			binder_inner_proc_unlock(target_proc);
			goto err_dead_proc_or_thread;
		}
		BUG_ON(t->buffer->async_transaction != 0);
		binder_pop_transaction_ilocked(target_thread, in_reply_to);
		// binder_work, container_of可以根据该binder_work得到binder_transaction
         // 传输事务t存放到目标线程todo 链表, 服务端响应客户端过程是可以找到目标线程的,也就是客户端请求服务端的那个线程
		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
        // 唤醒目标进程/目标线程
		binder_inner_proc_unlock(target_proc);
		wake_up_interruptible_sync(&target_thread->wait);
		binder_restore_priority(current, in_reply_to->saved_priority);
		binder_free_transaction(in_reply_to);
	} else if (!(t->flags & TF_ONE_WAY)) {
	    // t->flags & TF_ONE_WAY 如果是one_way则 &运算结果为1, !(t->flags & TF_ONE_WAY) 则为0
        // BC_TRANSATION 客户端请求服务端
		BUG_ON(t->buffer->async_transaction != 0);
		binder_inner_proc_lock(proc);
		/*
		 * Defer the TRANSACTION_COMPLETE, so we don't return to
		 * userspace immediately; this allows the target process to
		 * immediately start processing this transaction, reducing
		 * latency. We will then return the TRANSACTION_COMPLETE when
		 * the target replies (or there is an error).
		 */
		binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
		t->need_reply = 1;
        // t->from_parent 指向客户端线程传输事务栈,提供给服务端响应客户端时查找客户端对应的线程事务栈,进而查找对应的客户端线程 t->from
		t->from_parent = thread->transaction_stack; 
		thread->transaction_stack = t; // 客户端线程事务栈记录事务t
		binder_inner_proc_unlock(proc);
        // 事务t进入目标进程 or 线程,唤醒目标进程
		if (!binder_proc_transaction(t, target_proc, target_thread)) {
			binder_inner_proc_lock(proc);
			binder_pop_transaction_ilocked(thread, t);
			binder_inner_proc_unlock(proc);
			goto err_dead_proc_or_thread;
		}
	} else {
	    // 这种情况是one_way,thread为null
		BUG_ON(target_node == NULL);
		BUG_ON(t->buffer->async_transaction != 1);
		binder_enqueue_thread_work(thread, tcomplete);
		if (!binder_proc_transaction(t, target_proc, NULL))
			goto err_dead_proc_or_thread;
	}
	if (target_thread)
		binder_thread_dec_tmpref(target_thread);
	binder_proc_dec_tmpref(target_proc);
	if (target_node)
		binder_dec_node_tmpref(target_node);
	/*
	 * write barrier to synchronize with initialization
	 * of log entry
	 */
	smp_wmb();
	WRITE_ONCE(e->debug_id_done, t_debug_id);
	return;

err_dead_proc_or_thread:
	return_error = BR_DEAD_REPLY;
	return_error_line = __LINE__;
	binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
	binder_free_txn_fixups(t);
	trace_binder_transaction_failed_buffer_release(t->buffer);
	binder_transaction_buffer_release(target_proc, t->buffer,
					  buffer_offset, true);
	if (target_node)
		binder_dec_node_tmpref(target_node);
	target_node = NULL;
	t->buffer->transaction = NULL;
	binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
err_bad_extra_size:
	if (secctx)
		security_release_secctx(secctx, secctx_sz);
err_get_secctx_failed:
	kfree(tcomplete);
	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
	kfree(t);
	binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
	if (target_thread)
		binder_thread_dec_tmpref(target_thread);
	if (target_proc)
		binder_proc_dec_tmpref(target_proc);
	if (target_node) {
		binder_dec_node(target_node, 1, 0);
		binder_dec_node_tmpref(target_node);
	}

	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
		     "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
		     proc->pid, thread->pid, return_error, return_error_param,
		     (u64)tr->data_size, (u64)tr->offsets_size,
		     return_error_line);

	{
		struct binder_transaction_log_entry *fe;

		e->return_error = return_error;
		e->return_error_param = return_error_param;
		e->return_error_line = return_error_line;
		fe = binder_transaction_log_add(&binder_transaction_log_failed);
		*fe = *e;
		/*
		 * write barrier to synchronize with initialization
		 * of log entry
		 */
		smp_wmb();
		WRITE_ONCE(e->debug_id_done, t_debug_id);
		WRITE_ONCE(fe->debug_id_done, t_debug_id);
	}

	BUG_ON(thread->return_error.cmd != BR_OK);
	if (in_reply_to) {
		binder_restore_priority(current, in_reply_to->saved_priority);
		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
		binder_enqueue_thread_work(thread, &thread->return_error.work);
		binder_send_failed_reply(in_reply_to, return_error);
	} else {
		thread->return_error.cmd = return_error;
		binder_enqueue_thread_work(thread, &thread->return_error.work);
	}
}
/**第三方服务加入servicemanager会在自己的binder_proc中binder_nodes树建立binder_node节点,
** 在servicemanager引用节点树中通过handle句柄建立引用节点,servicemanager进程空间会建立(handle/servicename对应关系),
** servicemanager查找第三方服务可以通过名字获取对应的handle句柄,封装handle句柄为flat_binder_object数据,binder类型为BINDER_TYPE_HANDLE回传给binder驱动
** flat_binder_object数据会生成binder节点加入本进程binder_proc的node节点树,
** 同时会生成引用节点(引用节点树最大handle+1)加入目标进程引用节点树node_desc
**/
static int binder_translate_binder(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{
	struct binder_node *node;
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
	struct binder_ref_data rdata;
	int ret = 0;

	node = binder_get_node(proc, fp->binder);
	if (!node) {
	    // 生成新节点并且插入binder_node节点树
	    // 例如注册一个服务到servicemanager,那么在服务进程binder_proc生成该服务对应binder_node节点并加入到服务进程binder_proc的binder_node节点树中
		node = binder_new_node(proc, fp);
		if (!node)
			return -ENOMEM;
	}
	if (fp->cookie != node->cookie) {
		binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
				  proc->pid, thread->pid, (u64)fp->binder,
				  node->debug_id, (u64)fp->cookie,
				  (u64)node->cookie);
		ret = -EINVAL;
		goto done;
	}
	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
		ret = -EPERM;
		goto done;
	}
    // 生成引用节点(引用节点包含所属binder_proc/binder_node)插入目标进程target_proc对应的引用节点树中
    // 这样子目标进程就可以通过handle句柄查找到该服务节点了
	ret = binder_inc_ref_for_node(target_proc, node,
			fp->hdr.type == BINDER_TYPE_BINDER,
			&thread->todo, &rdata);
	if (ret)
		goto done;
    
    // 上面两个步骤已经在驱动空间服务进程节点树建立对应binder_node,以及在驱动空间目标进程引用节点树建立binder_desc_node
    // 接下来修改一下传输的数据flat_binder_object,该数据会被传输到目标进程用户空间
      
	if (fp->hdr.type == BINDER_TYPE_BINDER)
		fp->hdr.type = BINDER_TYPE_HANDLE; // type修改为BINDER_TYPE_HANDLE
	else
		fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
	fp->binder = 0; // 清空binder
    // 记录handle值为rdata.desc , 例如servicemanager用户空间收到该flat_binder_object数据后,会建立(handle - serviceName)的对应关系
    // 通过serviceName向servicemanager获取第三方服务得到对应的handle句柄,才能在binder驱动空间 servicemanager引用节点树获取对应的binder_desc_node
    // 才能获取到binder_desc_node所属的binder_proc/binder_node
	fp->handle = rdata.desc; 
	fp->cookie = 0; // 清空cookie

	trace_binder_transaction_node_to_ref(t, node, &rdata);
	binder_debug(BINDER_DEBUG_TRANSACTION,
		     "        node %d u%016llx -> ref %d desc %d\n",
		     node->debug_id, (u64)node->ptr,
		     rdata.debug_id, rdata.desc);
done:
	binder_put_node(node);
	return ret;
}
// 诸如servicemanager等传输类型为BINDER_TYPE_HANDLE的flat_binder_object数据包含handle句柄
static int binder_translate_handle(struct flat_binder_object *fp,
				   struct binder_transaction *t,
				   struct binder_thread *thread)
{
	struct binder_proc *proc = thread->proc;
	struct binder_proc *target_proc = t->to_proc;
	struct binder_node *node;
	struct binder_ref_data src_rdata;
	int ret = 0;
     // 例如在servicemanager用户空间通过服务名称得到对应句柄后封装成flat_binder_object回传binder驱动,该handle对应引用节点记录在servicemanager引用节点树,该引用节点记录服务binder_node/binder_proc
    // 当前进程proc引用节点树获取handle对应引用节点,如向servicemanager获取注册的服务引用节点, 进而得到引用节点所属的binder_node/binder_proc
	node = binder_get_node_from_ref(proc, fp->handle,
			fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
	if (!node) {
		binder_user_error("%d:%d got transaction with invalid handle, %d\n",
				  proc->pid, thread->pid, fp->handle);
		return -EINVAL;
	}
	if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
		ret = -EPERM;
		goto done;
	}

	binder_node_lock(node);
    // 如果该服务进程和目标进程一致,也就是客户端获取的binder服务和自己在同一个进程,
    // 那么就没必要在自己进程binder_proc中的引用节点树建立该服务对应的引用节点,因为该binder服务节点已经挂载在同进程binder_node节点树上
	if (node->proc == target_proc) {
		if (fp->hdr.type == BINDER_TYPE_HANDLE)
            // 传输到用户空间的flat_binder_object type修改为BINDER_TYPE_BINDER
			fp->hdr.type = BINDER_TYPE_BINDER;
		else
			fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
        // 传输到用户空间的flat_binder_object binder修改为node->ptr /cookie修改为node->cookie
        // 所找服务binder节点在同一个进程,直接使用cookie便可转换为c++层BBinder对象
		fp->binder = node->ptr;  
		fp->cookie = node->cookie;
		if (node->proc)
			binder_inner_proc_lock(node->proc);
		else
			__acquire(&node->proc->inner_lock);
		binder_inc_node_nilocked(node,
					 fp->hdr.type == BINDER_TYPE_BINDER,
					 0, NULL);
		if (node->proc)
			binder_inner_proc_unlock(node->proc);
		else
			__release(&node->proc->inner_lock);
		trace_binder_transaction_ref_to_node(t, node, &src_rdata);
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> node %d u%016llx\n",
			     src_rdata.debug_id, src_rdata.desc, node->debug_id,
			     (u64)node->ptr);
		binder_node_unlock(node);
	} else {
		struct binder_ref_data dest_rdata;

		binder_node_unlock(node);
        // 如果获取的binder服务是属于第三方进程,比如通过servicemanager获取第三方服务,
        // 那么就在自己的引用节点树上也就是target_proc的引用节点树建立该binder服务引用节点,handle值自增;返回用户空间的也是该新handle值了
        // 接下来数据传输就已经用不上第三者如servicemanager,直接通过该handle在自己进程的引用节点树找到目标服务binder_proc/binder_node
		ret = binder_inc_ref_for_node(target_proc, node,
				fp->hdr.type == BINDER_TYPE_HANDLE,
				NULL, &dest_rdata);
		if (ret)
			goto done;
        // 修改传输到用户空间的flat_binder_object binder/cookie为0,记录新handle值
		fp->binder = 0;
		fp->handle = dest_rdata.desc;
		fp->cookie = 0;
		trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
						    &dest_rdata);
		binder_debug(BINDER_DEBUG_TRANSACTION,
			     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
			     src_rdata.debug_id, src_rdata.desc,
			     dest_rdata.debug_id, dest_rdata.desc,
			     node->debug_id);
	}
done:
	binder_put_node(node);
	return ret;
}
static bool binder_proc_transaction(struct binder_transaction *t,
				    struct binder_proc *proc,
				    struct binder_thread *thread)
{
	struct binder_node *node = t->buffer->target_node;
	struct binder_priority node_prio;
	bool oneway = !!(t->flags & TF_ONE_WAY);
	bool pending_async = false;

	BUG_ON(!node);
	binder_node_lock(node);
	node_prio.prio = node->min_priority;
	node_prio.sched_policy = node->sched_policy;

    // oneway flag是记录在传输数据里
	if (oneway) {
		BUG_ON(thread);
		if (node->has_async_transaction) {
			pending_async = true;
		} else {
		  // 第一次走这里,oneway异步传输,不阻塞
			node->has_async_transaction = true;
		}
	}

	binder_inner_proc_lock(proc);
    // 目标进程dead
	if (proc->is_dead || (thread && thread->is_dead)) {
		binder_inner_proc_unlock(proc);
		binder_node_unlock(node);
		return false;
	}
    // 如果线程为null而且没有异步任务即是非oneway请求或者是oneway请求的第一次,从binder_proc进程中找到一个可用的线程
	if (!thread && !pending_async)
	    // 从目标进程找到一个可用的线程
		thread = binder_select_thread_ilocked(proc);

	if (thread) {
		binder_transaction_priority(thread->task, t, node_prio,
					    node->inherit_rt);
		// 找到线程后,将事务t存入线程todo链表
		binder_enqueue_thread_work_ilocked(thread, &t->work);
	} else if (!pending_async) {
	    // 找不到合适的线程,而且是非oneway请求或者是oneway请求的第一次, 将事务t存入进程todo链表
		binder_enqueue_work_ilocked(&t->work, &proc->todo);
	} else {
	    // 下一次进来的oneway,不跑上面两个if了, 会跑这里,进入目标节点的异步队列
        // 第一次oneway会先找到一个合适的线程, 线程没有则找进程, 将事务t存入对应的todo链表, 也就是上面两个if判断流程
        // 接下来的oneway直接存放到目标节点的异步todo链表中
		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
	}

	if (!pending_async)
	    // 第一次进来的传输事件,唤醒进程或者线程
		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);

    // 总结,传输事务第一次无论是oneway还是非oneway首先都是查找合适的线程,找不到线程则找进程,将事务t放入线程/进程todo
    // 接下来同样target_node节点的one_way调用都会加入该目标节点的异步队列node->async_todo
    // 非one_way调用则是一直都是在查找合适的线程,找不到线程则找进程,将事务t放入线程/进程todo
    // 同一target_node节点one_way异步调用和串行化处理(node->async_todo同一个目标节点情况下任务才会串行化处理,
    // 也就是客户端请求同一个服务端target_node的多次oneway调用(第一次唤醒线程或者进程驱动任务,后续请求都是将事务t丢到target_node->async_todo不管了)
    // 非one_way同步调用 非串行化处理(高并发会启用多个线程同时处理任务)

	binder_inner_proc_unlock(proc);
	binder_node_unlock(node);

	return true;
}

mmap

// mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);
// binder内存映射(进程空间一块内存区域和binder驱动内核空间一块区域共同映射同一块物理内存)
// mmap先在进程虚拟空间建立一个vm_area_struct结构体保存了mmap的参数(起始地址/mapsize(vm_start vm_end)/offset等)
// 传递该参数vm_area_struct到驱动层vma
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
   int ret;
   // 每个进程binder_open时生成的binder_proc会保存在filp private_data中
   struct binder_proc *proc = filp->private_data;
   const char *failure_string;

   if (proc->tsk != current->group_leader)
      return -EINVAL;

   binder_debug(BINDER_DEBUG_OPEN_CLOSE,
           "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
           __func__, proc->pid, vma->vm_start, vma->vm_end,
           (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
           (unsigned long)pgprot_val(vma->vm_page_prot));

   if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
      ret = -EPERM;
      failure_string = "bad vm_flags";
      goto err_bad_arg;
   }
   vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
   vma->vm_flags &= ~VM_MAYWRITE;

   vma->vm_ops = &binder_vm_ops; // 记录vma ops操作
   vma->vm_private_data = proc; // 记录vma->vm_private_data指向binder_proc

   ret = binder_alloc_mmap_handler(&proc->alloc, vma); // 每个进程binder_proc都有自己的alloc binder_alloc.c
   if (ret)
      return ret;
   return 0;

err_bad_arg:
   pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
          proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
   return ret;
}
// 每个binder_proc都有自己的binder_alloc,用来动态分配buffer,映射进程虚拟空间/内核虚拟空间到同一块物理内存
struct binder_alloc {
   struct mutex mutex;
   struct vm_area_struct *vma; // 
   struct mm_struct *vma_vm_mm;
   void __user *buffer; // 整块进程虚拟空间的起始地址vma->start
   struct list_head buffers; // 所有buffer链表
   struct rb_root free_buffers; // 空闲buffer红黑树
   struct rb_root allocated_buffers; // 已经分配buffer红黑树
   size_t free_async_space; // 异步任务大小
   struct binder_lru_page *pages; // 分配的物理页面集合
   size_t buffer_size; // 总buffer大小
   uint32_t buffer_free; // 空闲buffer数量
   int pid;
   size_t pages_high;
};
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
               struct vm_area_struct *vma)
{
   int ret;
   const char *failure_string;
   struct binder_buffer *buffer;

   mutex_lock(&binder_alloc_mmap_lock);
   if (alloc->buffer_size) {
      ret = -EBUSY;
      failure_string = "already mapped";
      goto err_already_mapped;
   }
   // 驱动空间约束进程映射buffer空间大小不能超过4M
   // 进程虚拟空间申请大小不能超过4M
   alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
               SZ_4M);
   mutex_unlock(&binder_alloc_mmap_lock);
    // buffer起始地址
   alloc->buffer = (void __user *)vma->vm_start;
    // 分配是按页分配,每页大小PAGE_SIZE, 计算可以分配多少页,分配这么多个页的pages对象
    // 内核分配了buffer_size数据大小的多个页面,每个页面大小PAGE_SIZE, 但是还没有映射内核空间和进程虚拟空间
   alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                sizeof(alloc->pages[0]),
                GFP_KERNEL);
   if (alloc->pages == NULL) {
      ret = -ENOMEM;
      failure_string = "alloc page array";
      goto err_alloc_pages_failed;
   }
    // 分配一个空闲buffer
   buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
   if (!buffer) {
      ret = -ENOMEM;
      failure_string = "alloc buffer struct";
      goto err_alloc_buf_struct_failed;
   }
    // 分配一个buffer起始指针是这一整块进程虚拟空间的开始地址
   buffer->user_data = alloc->buffer;
   // 该buffer插入当前进程保存所有buffer的链表
   list_add(&buffer->entry, &alloc->buffers);
   // 记录该buffer是空闲的
   buffer->free = 1;
   // 插入空闲buffer红黑树
   binder_insert_free_buffer(alloc, buffer);
   //异步事务的空闲缓冲区大小最大2M,为分配总buffer空间大小的一半
   alloc->free_async_space = alloc->buffer_size / 2;
   binder_alloc_set_vma(alloc, vma); // vma保存到目标进程buffer alloc, 待传输内容时使用, 动态申请buffer
   mmgrab(alloc->vma_vm_mm);

   return 0;

err_alloc_buf_struct_failed:
   kfree(alloc->pages);
   alloc->pages = NULL;
err_alloc_pages_failed:
   alloc->buffer = NULL;
   mutex_lock(&binder_alloc_mmap_lock);
   alloc->buffer_size = 0;
err_already_mapped:
   mutex_unlock(&binder_alloc_mmap_lock);
   binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
            "%s: %d %lx-%lx %s failed %d\n", __func__,
            alloc->pid, vma->vm_start, vma->vm_end,
            failure_string, ret);
   return ret;
}
// 传输过程动态申请buffer
// alloc 目标进程buffer分配类
static struct binder_buffer *binder_alloc_new_buf_locked(
            struct binder_alloc *alloc,
            size_t data_size, // data数据大小
            size_t offsets_size, // binder扁平化数据个数
            size_t extra_buffers_size, // 其他额外buffer大小
            int is_async) // 是否异步
{
    // 空闲buffer红黑树
   struct rb_node *n = alloc->free_buffers.rb_node;
   struct binder_buffer *buffer;
   size_t buffer_size;
   struct rb_node *best_fit = NULL;
   void __user *has_page_addr;
   void __user *end_page_addr;
   size_t size, data_offsets_size;
   int ret;
    // binder_mmap的时候已经保存类vma到alloc中
   if (!binder_alloc_get_vma(alloc)) {
      binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
               "%d: binder_alloc_buf, no vma\n",
               alloc->pid);
      return ERR_PTR(-ESRCH);
   }

    // data_size 所有数据长度  offsets_size binder打扁数据位置数组长度
   data_offsets_size = ALIGN(data_size, sizeof(void *)) +
      ALIGN(offsets_size, sizeof(void *));

   if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
      binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
            "%d: got transaction with invalid size %zd-%zd\n",
            alloc->pid, data_size, offsets_size);
      return ERR_PTR(-EINVAL);
   }
   // size 总长度 (data_size + offsets_size + extra_buffers_size)
   size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
   if (size < data_offsets_size || size < extra_buffers_size) {
      binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
            "%d: got transaction with invalid extra_buffers_size %zd\n",
            alloc->pid, extra_buffers_size);
      return ERR_PTR(-EINVAL);
   }
   // 异步传输buffer大小为binder_mmap大小一半,判断是否有足够的异步数据空间
   if (is_async &&
       alloc->free_async_space < size + sizeof(struct binder_buffer)) {
      binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
              "%d: binder_alloc_buf size %zd failed, no async space left\n",
               alloc->pid, size);
      return ERR_PTR(-ENOSPC);
   }

   /* Pad 0-size buffers so they get assigned unique addresses */
   size = max(size, sizeof(void *));
    // 遍历所有空闲buffer,找到空闲buffer剩余大小最合适size的buffer
   while (n) {
      buffer = rb_entry(n, struct binder_buffer, rb_node);
      BUG_ON(!buffer->free);
      // 当前空闲buffer的大小
      buffer_size = binder_alloc_buffer_size(alloc, buffer);
        // 递归查找
      if (size < buffer_size) {
         best_fit = n;
         n = n->rb_left; 
      } else if (size > buffer_size)
         n = n->rb_right;
      else {
         best_fit = n;
         break;
      }
   }
   // 空闲buffer红黑树查找不到合适的buffer,打印当前所有分配/空闲buffer情况,返回错误
   if (best_fit == NULL) {
      size_t allocated_buffers = 0;
      size_t largest_alloc_size = 0;
      size_t total_alloc_size = 0;
      size_t free_buffers = 0;
      size_t largest_free_size = 0;
      size_t total_free_size = 0;

      for (n = rb_first(&alloc->allocated_buffers); n != NULL;
           n = rb_next(n)) {
         buffer = rb_entry(n, struct binder_buffer, rb_node);
         buffer_size = binder_alloc_buffer_size(alloc, buffer);
         allocated_buffers++;
         total_alloc_size += buffer_size; // 统计所有已经分配的buffer大小
         if (buffer_size > largest_alloc_size)
            largest_alloc_size = buffer_size; // 记录已经分配的buffer大小最大的值
      }
      for (n = rb_first(&alloc->free_buffers); n != NULL;
           n = rb_next(n)) {
         buffer = rb_entry(n, struct binder_buffer, rb_node);
         buffer_size = binder_alloc_buffer_size(alloc, buffer);
         free_buffers++;
         total_free_size += buffer_size; // 统计所有空闲的buffer大小
         if (buffer_size > largest_free_size)
            largest_free_size = buffer_size; // 记录空闲buffer空间最大的值
      }
      binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
               "%d: binder_alloc_buf size %zd failed, no address space\n",
               alloc->pid, size);
      binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
               "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
               total_alloc_size, allocated_buffers,
               largest_alloc_size, total_free_size,
               free_buffers, largest_free_size);
      return ERR_PTR(-ENOSPC);
   }
   if (n == NULL) {
      buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
      buffer_size = binder_alloc_buffer_size(alloc, buffer);
   }

   binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
           "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
            alloc->pid, size, buffer, buffer_size);

   has_page_addr = (void __user *)
      (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
   WARN_ON(n && buffer_size != size);
   end_page_addr =
      (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
   if (end_page_addr > has_page_addr)
      end_page_addr = has_page_addr;
   // 映射这块进程虚拟空间buffer地址和内核虚拟地址到同一个物理页面   
   ret = binder_update_page_range(alloc, 1, (void __user *)
      PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
   if (ret)
      return ERR_PTR(ret);

   if (buffer_size != size) {
     // 一开始mmap时就已经分配了一份free buffer,buffer_size是前后两份buffer起始地址差值计算来的
      // 拿到的buffer大小不符合传输数据总大小的话,新建一个free buffer插入红黑树,系统总会多出一个freebuffer?
      struct binder_buffer *new_buffer;

      new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
      if (!new_buffer) {
         pr_err("%s: %d failed to alloc new buffer struct\n",
                __func__, alloc->pid);
         goto err_alloc_buf_struct_failed;
      }
      // 新buffer地址是查找到的一会就使用的buffer起始地址 + 数据总大小
      new_buffer->user_data = (u8 __user *)buffer->user_data + size;
      list_add(&new_buffer->entry, &buffer->entry);
      new_buffer->free = 1;
      // 插入空闲红黑树
      binder_insert_free_buffer(alloc, new_buffer);
   }
    // 获取到的buffer从空闲红黑树移除,加入分配红黑树
   rb_erase(best_fit, &alloc->free_buffers);
   buffer->free = 0; // 标识非free
   buffer->allow_user_free = 0;
   // 插入已经分配红黑树
   binder_insert_allocated_buffer_locked(alloc, buffer);
   binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
           "%d: binder_alloc_buf size %zd got %pK\n",
            alloc->pid, size, buffer);
   buffer->data_size = data_size;
   buffer->offsets_size = offsets_size;
   buffer->async_transaction = is_async;
   buffer->extra_buffers_size = extra_buffers_size;
   if (is_async) {
      alloc->free_async_space -= size + sizeof(struct binder_buffer);
      binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
              "%d: binder_alloc_buf size %zd async free %zd\n",
               alloc->pid, size, alloc->free_async_space);
   }
   return buffer;

err_alloc_buf_struct_failed:
   binder_update_page_range(alloc, 0, (void __user *)
             PAGE_ALIGN((uintptr_t)buffer->user_data),
             end_page_addr);
   return ERR_PTR(-ENOMEM);
}
// 映射这块进程虚拟空间地址(start - end)和内核虚拟地址到同一个物理页面
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
                void __user *start, void __user *end)
{
   void __user *page_addr;
   unsigned long user_page_addr;
   struct binder_lru_page *page;
   struct vm_area_struct *vma = NULL;
   struct mm_struct *mm = NULL;
   bool need_mm = false;

   binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
           "%d: %s pages %pK-%pK\n", alloc->pid,
           allocate ? "allocate" : "free", start, end);

   if (end <= start)
      return 0;

   trace_binder_update_page_range(alloc, allocate, start, end);

   if (allocate == 0)
      goto free_range;
    // 物理内存是一页一页的page,mmap时已经创建多个pages,总大小是mmap时的大小
   for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
       // 根据进程虚拟起始地址计算对应在哪个物理页面page
      page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
      if (!page->page_ptr) {
         need_mm = true; // 需要映射
         break;
      }
   }

   if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
      mm = alloc->vma_vm_mm;

   if (mm) {
      down_read(&mm->mmap_sem);
      vma = alloc->vma;
   }

   if (!vma && need_mm) {
      binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
               "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
               alloc->pid);
      goto err_no_vma;
   }

   for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
      int ret;
      bool on_lru;
      size_t index;

      index = (page_addr - alloc->buffer) / PAGE_SIZE;
      page = &alloc->pages[index];

      if (page->page_ptr) {
         trace_binder_alloc_lru_start(alloc, index);

         on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
         WARN_ON(!on_lru);

         trace_binder_alloc_lru_end(alloc, index);
         continue;
      }

      if (WARN_ON(!vma))
         goto err_page_ptr_cleared;

      trace_binder_alloc_page_start(alloc, index);
      //给该page分配一块物理内存
      page->page_ptr = alloc_page(GFP_KERNEL |
                   __GFP_HIGHMEM |
                   __GFP_ZERO);
      if (!page->page_ptr) {
         pr_err("%d: binder_alloc_buf failed for page at %pK\n",
            alloc->pid, page_addr);
         goto err_alloc_page_failed;
      }
      page->alloc = alloc;
      INIT_LIST_HEAD(&page->lru);

      user_page_addr = (uintptr_t)page_addr;
      // 映射进程虚拟空间起始地址user_page_addr到这个物理内存页面page[0].page_ptr
      ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
      if (ret) {
         pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
                alloc->pid, user_page_addr);
         goto err_vm_insert_page_failed;
      }

      if (index + 1 > alloc->pages_high)
         alloc->pages_high = index + 1;

      trace_binder_alloc_page_end(alloc, index);
      /* vm_insert_page does not seem to increment the refcount */
   }
   if (mm) {
      up_read(&mm->mmap_sem);
      mmput(mm);
   }
   return 0;

free_range:
   for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
      bool ret;
      size_t index;

      index = (page_addr - alloc->buffer) / PAGE_SIZE;
      page = &alloc->pages[index];

      trace_binder_free_lru_start(alloc, index);

      ret = list_lru_add(&binder_alloc_lru, &page->lru);
      WARN_ON(!ret);

      trace_binder_free_lru_end(alloc, index);
      if (page_addr == start)
         break;
      continue;

err_vm_insert_page_failed:
      __free_page(page->page_ptr);
      page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
      if (page_addr == start)
         break;
   }
err_no_vma:
   if (mm) {
      up_read(&mm->mmap_sem);
      mmput(mm);
   }
   return vma ? -ENOMEM : -ESRCH;
}
// 拷贝用户空间数据到进程虚拟空间buffer
binder_alloc_copy_user_to_buffer(
            &target_proc->alloc,
            t->buffer, 0,  // 拷贝tr->data.ptr.buffer数据,偏移地址0
            (const void __user *)
               (uintptr_t)tr->data.ptr.buffer,
            tr->data_size)
binder_alloc_copy_user_to_buffer(
            &target_proc->alloc,
            t->buffer,
            ALIGN(tr->data_size, sizeof(void *)), // 拷贝tr->data.ptr.offsets数据,偏移地址tr->data_size
            (const void __user *)
               (uintptr_t)tr->data.ptr.offsets,
            tr->offsets_size)           
            
            

unsigned long
binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
             struct binder_buffer *buffer,
             binder_size_t buffer_offset,
             const void __user *from,
             size_t bytes)
{
   if (!check_buffer(alloc, buffer, buffer_offset, bytes))
      return bytes;

   while (bytes) {
      unsigned long size;
      unsigned long ret;
      struct page *page;
      pgoff_t pgoff;
      void *kptr;
      //进程虚拟空间(start - end) 映射一块物理内存(start - end)这快物理内存分为多个page,每个page占空间(start-end)/ pagesize
        //通过起始地址+偏移地址计算出这段buffer在虚拟进程空间的起始地址, 计算出属于哪个page,以及在哪个page的偏移地址pgoff
      page = binder_alloc_get_page(alloc, buffer,
                    buffer_offset, &pgoff);
      size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
      kptr = kmap(page) + pgoff; // 得到该物理页面page在偏移地址pgoff下对应内核驱动空间的地址
      ret = copy_from_user(kptr, from, size); // 拷贝from size大小数据到该内核驱动空间地址,因为映射关系即拷贝到了进程空间buffer地址
      kunmap(page);
      if (ret)
         return bytes - size + ret;
      bytes -= size;
      from += size;
      buffer_offset += size;
   }
   return 0;
}
static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
                 struct binder_buffer *buffer, // buffer起始地址buffer->user_data
                 binder_size_t buffer_offset, // buffer偏移地址
                 pgoff_t *pgoffp)
{
   // 一块buffer需要存储两份数据,一份是tr->data.ptr.buffer数据(parcel所有数据), 一份是tr->data.ptr.offsets数据(binder扁平化数据偏移指针集合,即在buffer中的位置集合)
   // 计算这份数据(tr->data.ptr.buffer/ tr->data.ptr.offsets)分配填充到buffer的地址
   binder_size_t buffer_space_offset = buffer_offset +
      (buffer->user_data - alloc->buffer); 
   pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; // 计算出在某个物理页面page的偏移量
   size_t index = buffer_space_offset >> PAGE_SHIFT; // 计算出是在哪个物理页面page
   struct binder_lru_page *lru_page;

   lru_page = &alloc->pages[index];
   *pgoffp = pgoff;
   return lru_page->page_ptr; // 返回这个物理页面page地址
}