Binder从陌生到熟悉(下)

1,133 阅读5分钟

Binder从陌生到熟悉(上)

Binder从陌生到熟悉(中)

3.JNI

那么Java层是如何跟Native层进行通信的呢,靠的就是JNI了

JNI全称是Java Native Interface,这个是由Java虚拟机提供的机制。这个机制使得natvie代码可以和Java代码相互通讯。简单的来说就是:我们可以在C/C++端调用Java代码,也可以在Java端调用C/C++代码。

在Android系统开始过程中,Zygote启动时会有一个"虚拟机注册过程",该过程调用AndroidRuntime::startReg()方法来完成JNI方法的注册

  • 3.1 startReg()
frameworks/base/core/jni/AndroidRuntime.cpp   

//1440
int AndroidRuntime::startReg(JNIEnv* env)
{
    androidSetCreateThreadFunc((android_create_thread_fn) javaCreateThreadEtc);
    env->PushLocalFrame(200);
    //核心函数   register_jni_procs()  注册jni方法
    if (register_jni_procs(gRegJNI, NELEM(gRegJNI), env) < 0) {
        env->PopLocalFrame(NULL);
        return -1;
    }
    env->PopLocalFrame(NULL);
    return 0;
}

gRegJNI 是一个数组,记录所有需要注册的JNI方法,其中有一项便是REG_JNI(register_android_os_Binder)

frameworks/base/core/jni/AndroidRuntime.cpp  

// 1296
static const RegJNIRec gRegJNI[] = {
...
    REG_JNI(register_android_os_SystemProperties),
    // *****  重点部分  *****
    REG_JNI(register_android_os_Binder),
    // *****  重点部分  *****
    REG_JNI(register_android_os_Parcel),
...  
};

3.1.1 register_android_os_Binder

frameworks/base/core/jni/android_util_Binder.cpp

//1282
int register_android_os_Binder(JNIEnv* env)
{
    // 注册Binder类的 jin方法
    if (int_register_android_os_Binder(env) < 0)
        return -1;

    // 注册 BinderInternal类的jni方法
    if (int_register_android_os_BinderInternal(env) < 0)
        return -1;

    // 注册BinderProxy类的jni方法
    if (int_register_android_os_BinderProxy(env) < 0)
        return -1;
    ...
    return 0;
}

3.1.1.1 int_register_android_os_Binder

frameworks/base/core/jni/android_util_Binder.cpp

//589
static int int_register_android_os_Binder(JNIEnv* env)
{
    //kBinderPathName="android/os/Binder",主要是查找kBinderPathName路径所属类
    jclass clazz = FindClassOrDie(env, kBinderPathName);
    //将Java层Binder类保存到mClass变量上
    gBinderOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    //将Java层execTransact()方法保存到mExecTransact变量;
    gBinderOffsets.mExecTransact = GetMethodIDOrDie(env, clazz, "execTransact", "(IJJI)Z");
    //将Java层的mObject属性保存到mObject变量中
    gBinderOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
    //注册JNI方法
    return RegisterMethodsOrDie(env, kBinderPathName, gBinderMethods,
        NELEM(gBinderMethods));
}

gBinderOffsets结构体

frameworks/base/core/jni/android_util_Binder.cpp

//65
static struct bindernative_offsets_t
{
    // Class state.
    //记录 Binder类
    jclass mClass; 
    // 记录execTransact()方法
    jmethodID mExecTransact; 
    // Object state.
    // 记录mObject属性
    jfieldID mObject; 
} gBinderOffsets;

gBinderOffsets保存了Binder.java类本身以及其成员方法execTransact()和成员属性mObject,这为JNI层访问Java层提供通道。另外通过查询获取Java层 binder信息后保存到gBinderOffsets,而不再需要每次查找binder类信息的方式能大幅提高效率,是由于每次查询需要花费较多的CPU时间,尤其是频繁访问时,但用额外的结构体来保存这些信息,是以空间换时间的方法。

gBinderMethods

frameworks/base/core/jni/android_util_Binder.cpp

//843
static const JNINativeMethod gBinderMethods[] = {
     /* 名称, 签名, 函数指针 */
    { "getCallingPid", "()I", (void*)android_os_Binder_getCallingPid },
    { "getCallingUid", "()I", (void*)android_os_Binder_getCallingUid },
    { "clearCallingIdentity", "()J", (void*)android_os_Binder_clearCallingIdentity },
    { "restoreCallingIdentity", "(J)V", (void*)android_os_Binder_restoreCallingIdentity },
    { "setThreadStrictModePolicy", "(I)V", (void*)android_os_Binder_setThreadStrictModePolicy },
    { "getThreadStrictModePolicy", "()I", (void*)android_os_Binder_getThreadStrictModePolicy },
    { "flushPendingCommands", "()V", (void*)android_os_Binder_flushPendingCommands },
    { "init", "()V", (void*)android_os_Binder_init },
    { "destroy", "()V", (void*)android_os_Binder_destroy },
    { "blockUntilThreadAvailable", "()V", (void*)android_os_Binder_blockUntilThreadAvailable }
};

通过RegisterMethodsOrDie(),将为gBinderMethods数组中的方法建立了一一映射关系,从而为Java层访问JNI层提供了通道。

3.1.1.2 int_register_android_os_BinderInternal()

frameworks/base/core/jni/android_util_Binder.cpp

//935
static int int_register_android_os_BinderInternal(JNIEnv* env)
{
    jclass clazz = FindClassOrDie(env, kBinderInternalPathName);
    gBinderInternalOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    gBinderInternalOffsets.mForceGc = GetStaticMethodIDOrDie(env, clazz, "forceBinderGc", "()V");
    return RegisterMethodsOrDie(
        env, kBinderInternalPathName,
        gBinderInternalMethods, NELEM(gBinderInternalMethods));
}
frameworks/base/core/jni/android_util_Binder.cpp

//92
static const JNINativeMethod gBinderInternalMethods[] = {
    { "getContextObject", "()Landroid/os/IBinder;", (void*)android_os_BinderInternal_getContextObject },
    { "joinThreadPool", "()V", (void*)android_os_BinderInternal_joinThreadPool },
    { "disableBackgroundScheduling", "(Z)V", (void*)android_os_BinderInternal_disableBackgroundScheduling },
    { "handleGc", "()V", (void*)android_os_BinderInternal_handleGc }
};

3.1.1.3 int_register_android_os_BinderProxy()

frameworks/base/core/jni/android_util_Binder.cpp

//1254
static int int_register_android_os_BinderProxy(JNIEnv* env)
{
    //gErrorOffsets 保存了Error类信息
    jclass clazz = FindClassOrDie(env, "java/lang/Error");
    gErrorOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    //gBinderProxyOffsets保存了BinderProxy类的信息
    //其中kBinderProxyPathName="android/os/BinderProxy"
    clazz = FindClassOrDie(env, kBinderProxyPathName);
    gBinderProxyOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    gBinderProxyOffsets.mConstructor = GetMethodIDOrDie(env, clazz, "<init>", "()V");
    gBinderProxyOffsets.mSendDeathNotice = GetStaticMethodIDOrDie(env, clazz, "sendDeathNotice", "(Landroid/os/IBinder$DeathRecipient;)V");
    gBinderProxyOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");
    gBinderProxyOffsets.mSelf = GetFieldIDOrDie(env, clazz, "mSelf", "Ljava/lang/ref/WeakReference;");
    gBinderProxyOffsets.mOrgue = GetFieldIDOrDie(env, clazz, "mOrgue", "J");
    //gClassOffsets保存了Class.getName()方法
    clazz = FindClassOrDie(env, "java/lang/Class");
    gClassOffsets.mGetName = GetMethodIDOrDie(env, clazz, "getName", "()Ljava/lang/String;");
    return RegisterMethodsOrDie(
        env, kBinderProxyPathName,
        gBinderProxyMethods, NELEM(gBinderProxyMethods));
}
frameworks/base/core/jni/android_util_Binder.cpp

//1241
static const JNINativeMethod gBinderProxyMethods[] = {
     /* 名称, 签名, 函数指针 */
    {"pingBinder",          "()Z", (void*)android_os_BinderProxy_pingBinder},
    {"isBinderAlive",       "()Z", (void*)android_os_BinderProxy_isBinderAlive},
    {"getInterfaceDescriptor", "()Ljava/lang/String;", (void*)android_os_BinderProxy_getInterfaceDescriptor},
    {"transactNative",      "(ILandroid/os/Parcel;Landroid/os/Parcel;I)Z", (void*)android_os_BinderProxy_transact},
    {"linkToDeath",         "(Landroid/os/IBinder$DeathRecipient;I)V", (void*)android_os_BinderProxy_linkToDeath},
    {"unlinkToDeath",       "(Landroid/os/IBinder$DeathRecipient;I)Z", (void*)android_os_BinderProxy_unlinkToDeath},
    {"destroy",             "()V", (void*)android_os_BinderProxy_destroy},
};

通过这三步注册与映射,Binder Java层Native层就可以相互调用了

下面的台子已经搭建好了,Java层要做的就是,Server端把服务注册并添加进ServiceManagerClient端获取到服务就可以进行通信了。

4. 注册并添加服务(addService)

注册服务在ServiceManager

frameworks/base/core/java/android/os/ServiceManager.java

//87
public static void addService(String name, IBinder service) {
        try {
            //getIServiceManager()是获取ServiceManagerProxy对象
            // addService() 是执行注册服务操作
            getIServiceManager().addService(name, service, false);
        } catch (RemoteException e) {
            Log.e(TAG, "error in addService", e);
        }
    }

4.1 getIServiceManager()

frameworks/base/core/java/android/os/ServiceManager.java

//33
private static IServiceManager getIServiceManager() {
         //采用单例形式返回 ServiceManagerProxy对象
        if (sServiceManager != null) {
            return sServiceManager;
        }

         // 相当于 new ServiceManagerProxy(new BinderProxy);
        sServiceManager = ServiceManagerNative.asInterface(BinderInternal.getContextObject());
        return sServiceManager;
    }

4.1.1 BinderInternal.getContextObject()

frameworks/base/core/java/com/android/internal/os/BinderInternal.java

//88
public static final native IBinder getContextObject();

native ,可见BinderInternal.getContextObject()最终会调用JNI通过C层来实现

frameworks/base/core/jni/android_util_Binder.cpp

//899
static jobject android_os_BinderInternal_getContextObject(JNIEnv* env, jobject clazz)
{
    // 打开 binder驱动(ProcessState是单例的),创建 BpBinder(handle) 对象,并返回 
    sp<IBinder> b = ProcessState::self()->getContextObject(NULL);
    return javaObjectForIBinder(env, b);
}

上面讲启动ServiceManager时说过,ProcessState::self()->getContextObject(NULL)是创建一个BpBinder

4.1.1.1 javaObjectForIBinder

frameworks/base/core/jni/android_util_Binder.cpp

//547
jobject javaObjectForIBinder(JNIEnv* env, const sp<IBinder>& val)
{
    if (val == NULL) return NULL;
	// 返回false
    if (val->checkSubclass(&gBinderOffsets)) {

        jobject object = static_cast<JavaBBinder*>(val.get())->object();
        LOGDEATH("objectForBinder %p: it's our own %p!\n", val.get(), object);
        return object;
    }

    AutoMutex _l(mProxyLock);

    //  从 BpBinder中查找 BinderProxy对象,第一次为 null
    jobject object = (jobject)val->findObject(&gBinderProxyOffsets);
    if (object != NULL) {
        jobject res = jniGetReferent(env, object);
        if (res != NULL) {
            ALOGV("objectForBinder %p: found existing %p!\n", val.get(), res);
            return res;
        }
        LOGDEATH("Proxy object %p of IBinder %p no longer in working set!!!", object, val.get());
        android_atomic_dec(&gNumProxyRefs);
        val->detachObject(&gBinderProxyOffsets);
        env->DeleteGlobalRef(object);
    }
    // 创建 BinderProxy对象
    object = env->NewObject(gBinderProxyOffsets.mClass, gBinderProxyOffsets.mConstructor);
    if (object != NULL) {
        LOGDEATH("objectForBinder %p: created new proxy %p !\n", val.get(), object);
       // BinderProxy.mObject成员变量记录 BpBinder对象
        env->SetLongField(object, gBinderProxyOffsets.mObject, (jlong)val.get());
        val->incStrong((void*)javaObjectForIBinder);

        jobject refObject = env->NewGlobalRef(
                env->GetObjectField(object, gBinderProxyOffsets.mSelf));
        // 将 BinderProxy对象信息添加到 BpBinder的成员变量 mObjects中
        val->attachObject(&gBinderProxyOffsets, refObject,
                jnienv_to_javavm(env), proxy_cleanup);

        // Also remember the death recipients registered on this proxy
        sp<DeathRecipientList> drl = new DeathRecipientList;
        drl->incStrong((void*)javaObjectForIBinder);
        // BinderProxy.mOrgue成员变量记录死亡通知对象
        env->SetLongField(object, gBinderProxyOffsets.mOrgue, reinterpret_cast<jlong>(drl.get()));

        // Note that a new object reference has been created.
        android_atomic_inc(&gNumProxyRefs);
        incRefsCreated(env);
    }

    return object;
}

大概流程为

  • 1.第二个入参val在有些时候指向BpBinder,有些时候指向BBinder

  • 2.至于是BpBinder还是BBinder是通过if (val->checkSubclass(&gBinderOffsets)) 这个函数来区分的,如果是BBinder,则为true,就会通过成员函数object(),返回一个Java对象,这个对象就是Java层的Binder对象。由于当前是BpBinder,所以是 ** 返回false**

  • 3.如果是BpBinder,会先判断是不是第一次,如果是第一次,下面的object为null;如果不是第一次,就会先查找是否已经存在需要使用的BinderProxy对象,如果找到就会返回引用

  • 4.如果没有找到可用的引用,就new一个BinderProxy对象

所以这个方法主要是根据BpBinder(C++) 生成BinderProxy(Java对象),工作是创建BinderProxy对象,并把BpBinder对象地址保存到BinderProxy.mObject成员变量。到此,可知**BinderInternal.getContextObject() **等价于 new BinderProxy()

4.1.2 ServiceManagerNative.asInterface

frameworks/base/core/java/android/os/ServiceManagerNative.java

//33
static public IServiceManager asInterface(IBinder obj)
    {
...
        // 因为 obj为 BinderProxy,默认返回 null
        IServiceManager in =
            (IServiceManager)obj.queryLocalInterface(descriptor);
        if (in != null) {
            return in;
        }
        
        return new ServiceManagerProxy(obj);
    }

obj.queryLocalInterface(descriptor)方法,调用的IBindernative方法如下

public interface IBinder {
...
    /**
     * Attempt to retrieve a local implementation of an interface
     * for this Binder object.  If null is returned, you will need
     * to instantiate a proxy class to marshall calls through
     * the transact() method.
     */
    public IInterface queryLocalInterface(String descriptor);
...
}

通过注释可以看出,queryLocalInterface是查询本地的对象。什么是本地对象,这里的本地对象是指,在进行IPC调用时,如果Client端Server端在同一个进程内,即对象是本地对象;如果在两个不同的进程里,则返回的远端的代理类。所以在BBinder的子类BnInterface中,重载了这个方法,返回this,而在BpInterface并没有重载这个方法。又因为queryLocalInterface 默认返回的是null,所以obj.queryLocalInterface=null。 所以逻辑会向下执行 return new ServiceManagerProxy(obj);

4.1.2.1 ServiceManagerProxy

frameworks/base/core/java/android/os/ServiceManagerNative.java$ServiceManagerProxy.java

// 109 
class ServiceManagerProxy implements IServiceManager {
    // mRemote为 BinderProxy对象
    public ServiceManagerProxy(IBinder remote) {
        mRemote = remote;
    }

mRemoteBinderProxy对象,该BinderProxy对象对应BpBinder(0),其作为binder代理端,指向native层Service Manager

所以得出,ServiceManager.getIServiceManager最终等价于new ServiceManagerProxy(new BinderProxy())

4.2 addService()

frameworks/base/core/java/android/os/ServiceManagerNative.java$ServiceManagerProxy.java

//142
public void addService(String name, IBinder service, boolean allowIsolated)
            throws RemoteException {
        Parcel data = Parcel.obtain();
        Parcel reply = Parcel.obtain();
        //这个常量是 “android.os.IServiceManager"
        data.writeInterfaceToken(IServiceManager.descriptor);
        data.writeString(name);
        //重点
        data.writeStrongBinder(service);
        data.writeInt(allowIsolated ? 1 : 0);
        //重点
        mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0);
        reply.recycle();
        data.recycle();
    }

4.2.1 Parcel.writeStrongBinder()

frameworks/base/core/java/android/os/Parcel.java 

//583
	/**
     * Write an object into the parcel at the current dataPosition(),
     * growing dataCapacity() if needed.
     * 在当前的dataPosition()的位置上写入一个对象,如果空间不足,则增加空间
     */
    public final void writeStrongBinder(IBinder val) {
        nativeWriteStrongBinder(mNativePtr, val);
    }
frameworks/base/core/java/android/os/Parcel.java 

//265
private static native void nativeWriteStrongBinder(long nativePtr, IBinder val);

这是一个native方法

frameworks/base/core/jni/android_os_Parcel.cpp

//298
static void android_os_Parcel_writeStrongBinder(JNIEnv* env, jclass clazz, jlong nativePtr, jobject object)
{
    // 将java层 Parcel转换为 native层 Parcel
    Parcel* parcel = reinterpret_cast<Parcel*>(nativePtr);
    if (parcel != NULL) {
        const status_t err = parcel->writeStrongBinder(ibinderForJavaObject(env, object));
        if (err != NO_ERROR) {
            signalExceptionForError(env, clazz, err);
        }
    }
}

4.2.1.1 ibinderForJavaObject

frameworks/base/core/jni/android_util_Binder.cpp

//603
sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj)
{
    if (obj == NULL) return NULL;
    // 是否是 Java层的 Binder对象
    //mClass指向Java层中的Binder class
    if (env->IsInstanceOf(obj, gBinderOffsets.mClass)) {
        JavaBBinderHolder* jbh = (JavaBBinderHolder*)
            env->GetLongField(obj, gBinderOffsets.mObject);
        //get()返回一个JavaBBinder,继承自BBinder
        return jbh != NULL ? jbh->get(env, obj) : NULL;
    }
    //Java层的BinderProxy对象
    // mClass 指向Java层的BinderProxy class
	if (env->IsInstanceOf(obj, gBinderProxyOffsets.mClass)) {
    	//返回一个 BpBinder,mObject 是它的地址值
        return (IBinder*)
            env->GetLongField(obj, gBinderProxyOffsets.mObject);
    }

    ALOGW("ibinderForJavaObject: %p is not a Binder object", obj);
    return NULL;
}

根据Binder(Java)生成JavaBBinderHolder(C++)对象,主要工作是创建JavaBBinderHolder对象,并把JavaBBinder对象保存在到Binder.mObject成员变量。

这个函数,本质就是根据传进来的Java对象找到对应的C++对象,这里的obj可能会指向两种对象:Binder对象和BinderProxy对象。

如果传进来的是Binder对象,则会把gBinderOffsets.mObject转化为JavaBBinderHolder,并从中获得一个JavaBBinder对象(JavaBBinder继承自BBinder)。

如果是BinderProxy对象,会返回一个BpBinder,这个BpBinder的地址值保存在gBinderProxyOffsets.mObject

4.2.1.1.1 JavaBBinderHolder.get()

frameworks/base/core/jni/android_util_Binder.cpp$JavaBBinderHolder.cpp

//317
sp<JavaBBinder> get(JNIEnv* env, jobject obj)
    {
        AutoMutex _l(mLock);
        // 将弱指针升级为强指针,首次进来返回空指针
        sp<JavaBBinder> b = mBinder.promote();
        if (b == NULL) {
            // 创建一个 JavaBBinder 对象并返回
            b = new JavaBBinder(env, obj);
            mBinder = b;
        }
        return b;
    }

JavaBBinderHolder有一个成员变量mBinder,保存当前创建的JavaBBinder对象,这是一个wp类型的,可能会被垃圾回收器给回收的,所以每次使用前都需要先判断是否存在

4.2.1.2 (Parcel.cpp)parcel->writeStrongBinder

frameworks/native/libs/binder/Parcel.cpp

// 872
status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
{
    return flatten_binder(ProcessState::self(), val, this);
}

4.2.1.2.1 flatten_binder

frameworks/native/libs/binder/Parcel.cpp

//205
status_t flatten_binder(const sp<ProcessState>& /*proc*/,
    const sp<IBinder>& binder, Parcel* out)
{
    flat_binder_object obj;

    obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
    if (binder != NULL) {
        // 当前进程有 Binder,所以本地 Binder不为空
        IBinder *local = binder->localBinder();
        if (!local) {
            BpBinder *proxy = binder->remoteBinder();
            if (proxy == NULL) {
                ALOGE("null proxy");
            }
            const int32_t handle = proxy ? proxy->handle() : 0;
            //  Binder对象扁平化,转换成 flat_binder_object对象
            obj.type = BINDER_TYPE_HANDLE;
            obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
            obj.handle = handle;
            obj.cookie = 0;
        } else {
            obj.type = BINDER_TYPE_BINDER;
            obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
            obj.cookie = reinterpret_cast<uintptr_t>(local);
        }
    } else {
        obj.type = BINDER_TYPE_BINDER;
        obj.binder = 0;
        obj.cookie = 0;
    }

    return finish_flatten_binder(binder, obj, out);
}

4.2.1.2.1.1 finish_flatten_binder

frameworks/native/libs/binder/Parcel.cpp

//199
inline static status_t finish_flatten_binder(
    const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
{
    // 将 flat_binder_object写入 out
    return out->writeObject(flat, false);
}

Parcel.writeStrongBinder()的流程就看完了,接下来看看mRemote.transact(ADD_SERVICE_TRANSACTION, data, reply, 0)

mRemoteBinderProxy,所以我们先来看下BinderProxy.transactBinderProxy类是Binder类的内部类

frameworks/base/core/java/android/os/IBinder.java  

//223
    /**
     * Perform a generic operation with the object.
     * 
     * @param code The action to perform.  This should
     * be a number between {@link #FIRST_CALL_TRANSACTION} and
     * {@link #LAST_CALL_TRANSACTION}.
     * @param data Marshalled data to send to the target.  Must not be null.
     * If you are not sending any data, you must create an empty Parcel
     * that is given here.
     * @param reply Marshalled data to be received from the target.  May be
     * null if you are not interested in the return value.
     * @param flags Additional operation flags.  Either 0 for a normal
     * RPC, or {@link #FLAG_ONEWAY} for a one-way RPC.
     */
    public boolean transact(int code, Parcel data, Parcel reply, int flags)
        throws RemoteException;

注释写的非常清楚:

用对象执行一个操作

参数code为操作码,是介于FIRST_CALL_TRANSACTIONLAST_CALL_TRANSACTION之间

参数data是要发往目标的数据,一定不能null,如果你没有数据要发送,你也要创建一个Parcel,哪怕是空的。

参数reply是从目标发过来的数据,如果你对这个数据没兴趣,这个数据是可以为null的。

参数flags一个操作标志位,要么是0代表普通的RPC,要么是FLAG_ONEWAY代表单一方向的RPC即不管返回值

4.2.2 BinderProxy.transact

frameworks/base/core/java/android/os/Binder.java$BinderProxy.java

//501
public boolean transact(int code, Parcel data, Parcel reply, int flags) throws RemoteException {
        Binder.checkParcel(this, code, data, "Unreasonably large binder buffer");
        return transactNative(code, data, reply, flags);
    }

4.2.2.1 android_os_BinderProxy_transact

frameworks/base/core/jni/android_util_Binder.cpp

// 1083 
static jboolean android_os_BinderProxy_transact(JNIEnv* env, jobject obj,
        jint code, jobject dataObj, jobject replyObj, jint flags) // throws 
RemoteException
...
// 获取 data对象
Parcel* data = parcelForJavaObject(env, dataObj);
// 获取 reply对象
Parcel* reply = parcelForJavaObject(env, replyObj);
// 获取 BpBinder 对象
IBinder* target = (IBinder*)
        env->GetLongField(obj, gBinderProxyOffsets.mObject);
// 此处便是BpBinder:: transact(),经过native层,进入Binder驱动。 
status_t err = target->transact(code, *data, reply, flags);
...

这个方法就是Java层BinderProxy.transact()最终交由Native层的BpBinder::transact()完成

4.2.2.1.1 BpBinder::transact

frameworks/native/libs/binder/BpBinder.cpp

// 159 
status_t BpBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags){
    
	if (mAlive) {
        status_t status = IPCThreadState::self()->transact(
            mHandle, code, data, reply, flags);
        if (status == DEAD_OBJECT) mAlive = 0;
        return status;
    }

    return DEAD_OBJECT;

4.2.2.1.1.1 IPCThreadState::transact

frameworks/native/libs/binder/IPCThreadState.cpp

//548
status_t IPCThreadState::transact(int32_t handle,
                                  uint32_t code, const Parcel& data,
                                  Parcel* reply, uint32_t flags)
{
    // 数据错误检查
    status_t err = data.errorCheck();

    // flags |= TF_ACCEPT_FDS;
    // TF_ACCEPT_FDS = 0x10:允许回复中包含文件描述符
    // TF_ONE_WAY:当前业务是异步的,不需要等待
    // TF_ROOT_OBJECT:所包含的内容是根对象
    // TF_STATUS_CODE:所包含的内容是 32-bit 的状态值
    flags |= TF_ACCEPT_FDS;

...
    
    if (err == NO_ERROR) {
        LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
            (flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
        // 整理数据,并把结果存入 mOut 中。(在 talkWithDriver方法中才会将命令真正发送给 Binder驱动)
        err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
    }
...
     // 不是异步,if命中
    if ((flags & TF_ONE_WAY) == 0) {
        #if 0
        if (code == 4) { // relayout
            ALOGI(">>>>>> CALLING transaction 4");
        } else {
            ALOGI(">>>>>> CALLING transaction %d", code);
        }
        #endif
        if (reply) { // 不为空
            err = waitForResponse(reply);
        } else {
            Parcel fakeReply;
            // 等待回应事件
            err = waitForResponse(&fakeReply);
        }
...
    return err;
}

4.2.2.1.1.1.1 writeTransactionData

frameworks/native/libs/binder/IPCThreadState.cpp

//904
status_t IPCThreadState::writeTransactionData(int32_t cmd, uint32_t binderFlags,
    int32_t handle, uint32_t code, const Parcel& data, status_t* statusBuffer)
{
...

    // mOut写入命令为 BC_TRANSACTION
    mOut.writeInt32(cmd);
    // 写入 binder_transaction_data数据
    mOut.write(&tr, sizeof(tr));
    
    return NO_ERROR;
}

4.2.2.1.1.1.2 IPCThreadState::waitForResponse

frameworks/native/libs/binder/IPCThreadState.cpp

//712
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
    uint32_t cmd;
    int32_t err;
    // 循环等待结果
    while (1) {
        if ((err=talkWithDriver()) < NO_ERROR) break;
        err = mIn.errorCheck();
        if (err < NO_ERROR) break;
        if (mIn.dataAvail() == 0) continue;
        
        cmd = (uint32_t)mIn.readInt32();
        
...

    return err;
}

4.2.2.1.1.1.2.1 talkWithDriver

frameworks/native/libs/binder/IPCThreadState.cpp

//803
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
    if (mProcess->mDriverFD <= 0) {
        return -EBADF;
    }
    
    binder_write_read bwr;
    
    // 读的 buffer是否为空。现在读为 null
    const bool needRead = mIn.dataPosition() >= mIn.dataSize();
    
    // 读的时 候不能写 mOut
    const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
    
    bwr.write_size = outAvail;
    // 在 bwr中填写需要 write的大小和内容
    bwr.write_buffer = (uintptr_t)mOut.data();

    // This is what we'll read.
    if (doReceive && needRead) {
        bwr.read_size = mIn.dataCapacity();
        bwr.read_buffer = (uintptr_t)mIn.data();
    } else {// needRead为 null,走 else
        bwr.read_size = 0;
        bwr.read_buffer = 0;
    }

...
    // Return immediately if there is nothing to do.
    if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;

    bwr.write_consumed = 0;
    bwr.read_consumed = 0;
    status_t err;
    // while循环条件不会成立,只执行一次
    do {
        IF_LOG_COMMANDS() {
            alog << "About to read/write, write size = " << mOut.dataSize() << endl;
        }
#if defined(HAVE_ANDROID_OS)
        // 写入命令 BC_TRANSACTION
        if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
            err = NO_ERROR;
        else
            err = -errno;
...
    return err;
}

4.2.2.1.1.1.2.1.1 binder_ioctl_write_read

kernel/drivers/staging/android/binder.c

// 3136
static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
...

// 通过这个函数写入用户的数据
ret = binder_thread_write(proc, thread,
              bwr.write_buffer,
              bwr.write_size,
              &bwr.write_consumed);
...

4.2.2.1.1.1.2.1.1.1 binder_thread_write

kernel/drivers/staging/android/binder.c

// 2250
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
...

case BC_TRANSACTION:
// 此处 cmd == BC_TRANSACTION,第四个参数为false
binder_transaction(proc, thread, &tr,cmd == BC_REPLY, 0); 

...

4.2.2.1.1.1.2.1.1.1.1 binder_transaction

kernel/drivers/staging/android/binder.c

// 1829
static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
{

...

	//1861 此处 reply为 false(cmd == BC_TRANSACTION)
	if (reply) {
		
        ...
        
	} else {
    	//1899 不命中 if,handle不为 0,才会命中 if
		if (tr->target.handle) {
        
			...
            
		} else {
			//1914 获取目标对象的 target_node,目标是 service_manager,所以可以直接使用全局变量binder_context_mgr_node
			target_node = context->binder_context_mgr_node;
			if (target_node == NULL) {
				return_error = BR_DEAD_REPLY;
				goto err_no_context_mgr_node;
			}
		}
		e->to_node = target_node->debug_id;
		//1919 target_proc为 service_manager进程
		target_proc = target_node->proc;
        ...
}
...

		// 1954 找到 service_manager进程的 todo队列
		target_list = &target_thread->todo;
		target_wait = &target_thread->wait;

...

	// 1960 生成一个 binder_transaction 变量(即变量 t),用于描述本次要进行的 transaction(最后将其加入 target_thread->todo)。
	// 这样当目标对象被唤醒时,它就可以从这个队列中取出需要做的工作。
	t = kzalloc(sizeof(*t), GFP_KERNEL);

...

	// 1967 生成一个binder_work变量(即变量 tcomplete),用于说明当前调用者线程有一宗未完成的 transaction(它最后会被添加到本线程的 todo队列中)
	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
    
...

	// 1996 给 transaction结构体赋值,即变量 t
	if (!reply && !(tr->flags & TF_ONE_WAY))// 非 oneway的通信方式,把当前 thread保存到 transaction的 from字段
		t->from = thread;
	else
		t->from = NULL;
	t->sender_euid = task_euid(proc->tsk);
	// 此次通信目标进程为 service_manager进程
	t->to_proc = target_proc;
	t->to_thread = target_thread;
	// 此次通信 code = ADD_SERVICE_TRANSACTION
	t->code = tr->code;
	// 此次通信 flags = 0
	t->flags = tr->flags;
	t->priority = task_nice(current);

	trace_binder_transaction(reply, t, target_node);
	// 2009 从 service_manager进程中分配 buffer(为完成本条 transaction申请内存,从 binder_mmap开辟的空间中申请内存)
	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
		tr->offsets_size, extra_buffers_size,
		!reply && (t->flags & TF_ONE_WAY));
        
	... 
    
	// 2028 分别拷贝用户空间的 binder_transaction_data中 ptr.buffer和 ptr.offsets到内核
	if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
			   tr->data.ptr.buffer, tr->data_size)) {
               ...
               
	}
	if (copy_from_user(offp, (const void __user *)(uintptr_t)
			   tr->data.ptr.offsets, tr->offsets_size)) {
               ...
}
...

	// 2059
	for (; offp < off_end; offp++) {
    
...

		// 2075
		case BINDER_TYPE_BINDER:
		case BINDER_TYPE_WEAK_BINDER: {
			struct flat_binder_object *fp;

			fp = to_flat_binder_object(hdr);
			//创建 binder_ref,service_manager的 binder引用对象
			ret = binder_translate_binder(fp, t, thread);
            
...

	// 2187
	} else if (!(t->flags & TF_ONE_WAY)) {
		BUG_ON(t->buffer->async_transaction != 0);
		t->need_reply = 1;
		t->from_parent = thread->transaction_stack;
		//2191 记录本次 transaction,以备后期查询 (service_manager通过这个知道是谁调用的,从而返回数据)
		thread->transaction_stack = t;
        
...

	// 2201 设置 t的类型为 BINDER_WORK_TRANSACTION
	t->work.type = BINDER_WORK_TRANSACTION;
	// 将 t加入目标的处理队列中
	list_add_tail(&t->work.entry, target_list);
	// 设置 binder_work的类型为 BINDER_WORK_TRANSACTION_COMPLETE
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	// 当前线程有一个未完成的操作
	list_add_tail(&tcomplete->entry, &thread->todo);
	if (target_wait)
		// 唤醒目标,即 service_manager
		wake_up_interruptible(target_wait);
...

}

4.2.2.1.1.1.2.1.1.1.1.1 binder_translate_binder

kernel/drivers/staging/android/binder.c

// 1564
static int binder_translate_binder(struct flat_binder_object *fp,
                   struct binder_transaction *t,
                   struct binder_thread *thread)
// 创建一个 binder_ref
ref = binder_get_ref_for_node(target_proc, node);

...

// 改变类型为 BINDER_TYPE_HANDLE
if (fp->hdr.type == BINDER_TYPE_BINDER)
    fp->hdr.type = BINDER_TYPE_HANDLE;
    
    ...
    

talkWithDriver中做了很多事情

  • 1.获取target.handle是否等于0,由于是service_manager进程,所以为0

  • 2.获取目标对象的 target_node

  • 3.获取目标对象的 target_proc

  • 4.获取目标对象的 todowait队列

  • 5.生成一个 binder_transaction 变量t``和一个binder_work变量tcomplete

  • 6.分别拷贝用户空间的 binder_transaction_dataptr.bufferptr.offsets到内核空间,这正是所谓的一次拷贝

  • 7.binder_translate_binder方法,创建 binder_refservice_managerbinder引用对象

  • 8.thread->transaction_stack = t记录本次 transaction,以备后期查询 (service_manager通过这个知道是谁调用的,从而返回数据)

  • 9.设置t的类型为 BINDER_WORK_TRANSACTION,发送给service_manager,唤醒

  • 10.设置tcomplete的类型为 BINDER_WORK_TRANSACTION,发送给client端,挂起

  • 11.wake_up_interruptible,唤醒service_manager

service_manager被唤醒以后,会读取数据

4.2.2.1.1.1.2.2 binder_ioctl_write_read

kernel/drivers/staging/android/binder.c

// 3136
static int binder_ioctl_write_read(struct file *filp,
                unsigned int cmd, unsigned long arg,
                struct binder_thread *thread)
...

ret = binder_thread_read(proc, thread, bwr.read_buffer,
             bwr.read_size,
             &bwr.read_consumed,
             filp->f_flags & O_NONBLOCK);

...
             

4.2.2.1.1.1.2.2.1 binder_thread_read

kernel/drivers/staging/android/binder.c

// 2652
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
// 2664 如果 consumed==0,则写入一个 BR_NOOP 
if (*consumed == 0) {
    if (put_user(BR_NOOP, (uint32_t __user *)ptr))
    
...
    
// 2739 前面把一个 binder_work添加到 thread->todo队列中,所以 w不为空,类型为 BINDER_WORK_TRANSACTION_COMPLETE
if (!list_empty(&thread->todo)) {
    w = list_first_entry(&thread->todo, struct binder_work,
                         entry);
                         
...
                         
// 2760 写入命令 BR_TRANSACTION_COMPLETE 
case BINDER_WORK_TRANSACTION_COMPLETE: {
    cmd = BR_TRANSACTION_COMPLETE;
    if (put_user(cmd, (uint32_t __user *)ptr))
    
...
    

talkWithDriver结束,回到waitForResponse的循环中

4.2.2.1.1.1.2 IPCThreadState::waitForResponse

frameworks/native/libs/binder/IPCThreadState.cpp

// 712
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
	// 786 处理 BR_NOOP命令,什么也没干 
    default:
    err = executeCommand(cmd);
	// 718 while循环,继续执行 talkWithDriver方法
	if ((err=talkWithDriver()) < NO_ERROR) break;
	// 812 因为 mIn中有数据,所以 needRead为 false,导致 bwr.write_size和 bwr.read_size都为0,直接返回
	const bool needRead = mIn.dataPosition() >= mIn.dataSize();
	// 731 处理 BR_TRANSACTION_COMPLETE命令
 case BR_TRANSACTION_COMPLETE:
	if (!reply && !acquireResult) goto finish; // 当前为同步,不会进入 if,继续 while循环
	// 718 再次执行 talkWithDriver方法,这个时候 bwr.write_size==0,bwr.read_size还是大于 0,所以直接执行驱动中 binder_thread_read
	if ((err=talkWithDriver()) < NO_ERROR) break;

再次进入binder_thread_read,这次Client端进入挂起

4.2.2.1.1.1.2.2 binder_thread_read(Client端)

kernel/drivers/staging/android/binder.c

// 2652
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)

...

//  放入 BR_NOOP命令
if (*consumed == 0) {
        if (put_user(BR_NOOP, (uint32_t __user *)ptr))
        
...

// 2671 此时 wait_for_proc_work为false
wait_for_proc_work = thread->transaction_stack == NULL &&
            list_empty(&thread->todo);
            
...

if (non_block) { // 是阻塞模式的,所以 if不会命中
} else {// 进入等待,直到 service_manager 来唤醒
    ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
    }

...

这次轮到service_manager进入binder_thread_read处理消息

4.2.2.1.1.1.2.2.1 binder_thread_read(service_manager)

kernel/drivers/staging/android/binder.c

// 2652
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)
                  
...
                  
// 2757 主要是把用户的请求复制到 service_manager 中并对各种队列进行调整
case BINDER_WORK_TRANSACTION: {
    t = container_of(w, struct binder_transaction, work);
    
...

// 2898 设置命令为 BR_TRANSACTION
cmd = BR_TRANSACTION;

...

4.2.2.1.1.1.2.2.1.1 binder_loop

frameworks/native/cmds/servicemanager/binder.c

// 372
void binder_loop(struct binder_state *bs, binder_handler func)

// 397 对 getService请求进行解析
res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);

4.2.2.1.1.1.2.2.1.2 binder_parse

frameworks/native/cmds/servicemanager/binder.c

// 204
int binder_parse(struct binder_state *bs, struct binder_io *bio,
                 uintptr_t ptr, size_t size, binder_handler func)
                 
...

case BR_TRANSACTION: {
    if (func) {
    
    ...
    
        // 为 reply初始化
        bio_init(&reply, rdata, sizeof(rdata), 4);
        
        ...
        
        res = func(bs, txn, &msg, &reply); // 由 svcmgr_handler 处理请求
        binder_send_reply(bs, &reply, txn->data.ptr.buffer, res); // 将 reply发给 
binder驱动
    }
    
...

reply初始化

4.2.2.1.1.1.2.2.1.2.1 service_manager

frameworks/native/cmds/servicemanager/service_manager.c

// 251
int svcmgr_handler(struct binder_state *bs,
                   struct binder_transaction_data *txn,
                   struct binder_io *msg,
                   struct binder_io *reply)

...

case SVC_MGR_ADD_SERVICE:

...

    // 注册指定服务
    if (do_add_service(bs, s, len, handle, txn->sender_euid,
        allow_isolated, txn->sender_pid))
        

svcmgr_handler作用是获取或添加Service

4.2.2.1.1.1.2.2.1.2.1.1 do_add_service

frameworks/native/cmds/servicemanager/service_manager.c

// 201
int do_add_service(struct binder_state *bs,
                   const uint16_t *s, size_t len,
                   uint32_t handle, uid_t uid, int allow_isolated,
                   pid_t spid)

...

if (!svc_can_register(s, len, spid, uid)) {

...

si = find_svc(s, len);
if (si) {
    if (si->handle) {
        svcinfo_death(bs, si); // 服务已注册时,释放相应的服务
    }
    si->handle = handle; // 重新放入新的
} else {
    si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));
    if (!si) { // 内存不足,无法分配足够内存
        return -1;
    }
    si->handle = handle;
    si->len = len;
    memcpy(si->name, s, (len + 1) * sizeof(uint16_t));// 内存拷贝服务信息
    si->name[len] = '\0';
    si->death.func = (void*) svcinfo_death;
    si->death.ptr = si;
    si->allow_isolated = allow_isolated;
    si->next = svclist; // svclist保存所有已注册的服务
    svclist = si;
}
// 以 BC_ACQUIRE命令,handle为目标的信息,通过 ioctl发送给 binder驱动,binder_ref强引用加 1操作
binder_acquire(bs, handle); 
// 以 BC_REQUEST_DEATH_NOTIFICATION命令的信息,通过 ioctl发送给 binder驱动,主要用于清理内存等收尾工作 
binder_link_to_death(bs, handle, &si->death);

svclist是用来保存Service的

4.2.2.1.1.1.2.2.1.2.2 binder_send_reply

frameworks/native/cmds/servicemanager/binder.c

// 170
void binder_send_reply(struct binder_state *bs,
                       struct binder_io *reply,
                       binder_uintptr_t buffer_to_free,
                       int status)

...

data.cmd_free = BC_FREE_BUFFER; // free buffer命令
data.buffer = buffer_to_free;
data.cmd_reply = BC_REPLY; // reply命令
data.txn.target.ptr = 0;
data.txn.cookie = 0;
data.txn.code = 0;
if (status) { // status == 0
} else {
    data.txn.flags = 0;
    data.txn.data_size = reply->data - reply->data0;
    data.txn.offsets_size = ((char*) reply->offs) - ((char*) reply->offs0);
    data.txn.data.ptr.buffer = (uintptr_t)reply->data0;
    data.txn.data.ptr.offsets = (uintptr_t)reply->offs0;
}
binder_write(bs, &data, sizeof(data)); // 向 Binder驱动通信

...

设置BC_REPLY命令,然后进入binder_thread_write

4.2.2.1.1.1.2.2.1.2.2.1 binder_thread_write

kernel/drivers/staging/android/binder.c

// 2250 
static int binder_thread_write(struct binder_proc *proc,
            struct binder_thread *thread,
            binder_uintptr_t binder_buffer, size_t size,
            binder_size_t *consumed)
binder_transaction(proc, thread, &tr,
                       cmd == BC_REPLY, 0);

4.2.2.1.1.1.2.2.1.2.2.1.1 binder_transaction

kernel/drivers/staging/android/binder.c

//1829
static void binder_transaction(struct binder_proc *proc,
			       struct binder_thread *thread,
			       struct binder_transaction_data *tr, int reply,
			       binder_size_t extra_buffers_size)
{

...
	//2201
	t->work.type = BINDER_WORK_TRANSACTION;
	list_add_tail(&t->work.entry, target_list);
	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
	list_add_tail(&tcomplete->entry, &thread->todo);
    if (target_wait)
    	wake_up_interruptible(target_wait);// 唤醒目标,即 client端

    ...
    
}

由于cmd == BC_REPLY,所以这次设置t类型为BINDER_WORK_TRANSACTION是发送给client端唤醒,设置tcomplete类型为BINDER_WORK_TRANSACTION则是发送给service_manager挂起

Client端被唤醒后,进入binder_thread_read

binder_thread_read

kernel/drivers/staging/android/binder.c

// 2652
static int binder_thread_read(struct binder_proc *proc,
                  struct binder_thread *thread,
                  binder_uintptr_t binder_buffer, size_t size,
                  binder_size_t *consumed, int non_block)

...
			//2900
			tr.target.ptr = 0;
			tr.cookie = 0;
			cmd = BR_REPLY;

...

最终BINDER_WORK_TRANSACTIONcmd = BR_REPLY,IPCThreadState::waitForResponse继续循环,进入到cmd = BR_REPLY的逻辑中

4.2.2.1.1.1.2 IPCThreadState::waitForResponse

frameworks/native/libs/binder/IPCThreadState.cpp

// 712 
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{

...

        case BR_REPLY:
            {
                binder_transaction_data tr;
                err = mIn.read(&tr, sizeof(tr));
                ALOG_ASSERT(err == NO_ERROR, "Not enough command data for brREPLY");
                if (err != NO_ERROR) goto finish;

                if (reply) {
                    if ((tr.flags & TF_STATUS_CODE) == 0) {
                        reply->ipcSetDataReference(
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t),
                            freeBuffer, this);
                    } else {
                        err = *reinterpret_cast<const status_t*>(tr.data.ptr.buffer);
                        freeBuffer(NULL,
                            reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                            tr.data_size,
                            reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                            tr.offsets_size/sizeof(binder_size_t), this);
                    }
                } else {
                    freeBuffer(NULL,
                        reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer),
                        tr.data_size,
                        reinterpret_cast<const binder_size_t*>(tr.data.ptr.offsets),
                        tr.offsets_size/sizeof(binder_size_t), this);
                    continue;
                }
            }
            goto finish;

...

}

可以看出waitForResponse经历了一层一层的循环,进行处理不同cmd命令时的逻辑,可以根据时序图来看它们执行的顺序 其中BC命令代表发往Binder驱动的(Binder Command),BR命令代表Binder驱动转发给目标进程的(Binder Return)

ServiceManager执行IPCThreadState::waitForResponse在循环的时候最终会执行到default

frameworks/native/libs/binder/IPCThreadState.cpp

// 712 
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{

...

          default:
            err = executeCommand(cmd);
            if (err != NO_ERROR) goto finish;
            break;

...

}

4.2.2.1.1.1.2.3 IPCThreadState::executeCommand

frameworks/native/libs/binder/IPCThreadState.cpp

//947
status_t IPCThreadState::executeCommand(int32_t cmd)
{

...

    case BR_TRANSACTION:
        {
            ...
            
            if (tr.target.ptr) {
                // We only have a weak reference on the target object, so we must first try to
                // safely acquire a strong reference before doing anything else with it.
                if (reinterpret_cast<RefBase::weakref_type*>(
                        tr.target.ptr)->attemptIncStrong(this)) {
                    //1090 调用BBinder.transact
                    error = reinterpret_cast<BBinder*>(tr.cookie)->transact(tr.code, buffer,
                            &reply, tr.flags);
                    reinterpret_cast<BBinder*>(tr.cookie)->decStrong(this);
                } else {
                    error = UNKNOWN_TRANSACTION;
                }

            } else {
                error = the_context_object->transact(tr.code, buffer, &reply, tr.flags);
            }

           ...
    
    return result;
}

进入native层BBinder::transact

4.2.2.1.1.1.2.3.1 BBinder::transact

frameworks/base/core/jni/android_util_Binder.cpp

//97
status_t BBinder::transact(
    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
{
    data.setDataPosition(0);

    status_t err = NO_ERROR;
    switch (code) {
        case PING_TRANSACTION:
            reply->writeInt32(pingBinder());
            break;
        default:
        	//108 调用onTransact方法
            err = onTransact(code, data, reply, flags);
            break;
    }

    if (reply != NULL) {
        reply->setDataPosition(0);
    }

    return err;
}

进入native层JavaBBinderonTransact

4.2.2.1.1.1.2.3.1.1 onTransact

frameworks/base/core/jni/android_util_Binder.cpp$JavaBBinder

	//247
    virtual status_t onTransact(
        uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags = 0)
    {
        JNIEnv* env = javavm_to_jnienv(mVM);

        ALOGV("onTransact() on %p calling object %p in env %p vm %p\n", this, mObject, env, mVM);

        IPCThreadState* thread_state = IPCThreadState::self();
        const int32_t strict_policy_before = thread_state->getStrictModePolicy();
		//调用CallBooleanMethod
        jboolean res = env->CallBooleanMethod(mObject, gBinderOffsets.mExecTransact,
            code, reinterpret_cast<jlong>(&data), reinterpret_cast<jlong>(reply), flags);

        
        ...
        
        return res != JNI_FALSE ? NO_ERROR : UNKNOWN_TRANSACTION;
    }

CallBooleanMethod方法中会调用gBinderOffsets.mExecTransact

int_register_android_os_Binder

frameworks/base/core/jni/android_util_Binder.cpp

//860
static int int_register_android_os_Binder(JNIEnv* env)
{
    jclass clazz = FindClassOrDie(env, kBinderPathName);

    gBinderOffsets.mClass = MakeGlobalRefOrDie(env, clazz);
    gBinderOffsets.mExecTransact = GetMethodIDOrDie(env, clazz, "execTransact", "(IJJI)Z");
    gBinderOffsets.mObject = GetFieldIDOrDie(env, clazz, "mObject", "J");

    return RegisterMethodsOrDie(
        env, kBinderPathName,
        gBinderMethods, NELEM(gBinderMethods));
}

native层gBinderOffsets.mExecTransact对应的是Java层execTransact方法

4.2.2.1.1.1.2.3.1.2 execTransact

frameworks/base/core/java/android/os/Binder.java

	//442
    private boolean execTransact(int code, long dataObj, long replyObj,
            int flags) {
        ...
        
        try {
        	//453 调用ServiceManagerNative.onTransact方法
            res = onTransact(code, data, reply, flags);
        } catch (RemoteException e) {
           ...
        } 
        ...

        return res;
    }

最终回到我们ServiceManagerNativeonTransact方法

4.2.2.1.1.1.2.3.1.3 onTransact

frameworks/base/core/java/android/os/ServiceManagerNative.java

	//52
    public boolean onTransact(int code, Parcel data, Parcel reply, int flags)
    {
        try {
            switch (code) {
            case IServiceManager.GET_SERVICE_TRANSACTION: {
                data.enforceInterface(IServiceManager.descriptor);
                String name = data.readString();
                IBinder service = getService(name);
                reply.writeStrongBinder(service);
                return true;
            }
    
            case IServiceManager.CHECK_SERVICE_TRANSACTION: {
                data.enforceInterface(IServiceManager.descriptor);
                String name = data.readString();
                IBinder service = checkService(name);
                reply.writeStrongBinder(service);
                return true;
            }
    
            case IServiceManager.ADD_SERVICE_TRANSACTION: {
                data.enforceInterface(IServiceManager.descriptor);
                String name = data.readString();
                IBinder service = data.readStrongBinder();
                boolean allowIsolated = data.readInt() != 0;
                addService(name, service, allowIsolated);
                return true;
            }
    
            case IServiceManager.LIST_SERVICES_TRANSACTION: {
                data.enforceInterface(IServiceManager.descriptor);
                String[] list = listServices();
                reply.writeStringArray(list);
                return true;
            }
            
            case IServiceManager.SET_PERMISSION_CONTROLLER_TRANSACTION: {
                data.enforceInterface(IServiceManager.descriptor);
                IPermissionController controller
                        = IPermissionController.Stub.asInterface(
                                data.readStrongBinder());
                setPermissionController(controller);
                return true;
            }
            }
        } catch (RemoteException e) {
        }
        
        return false;
    }

这样,添加注册服务的整个过程就结束了

在整个过程中,服务所在的进程(system_server)是客户端,ServiceManager是服务端

5. 获取服务(getService)

过程与注册添加服务一致,以获取AMS为例,Client进程使用AMS前,需要先向ServiceManager中获取AMS的代理类AMP。在这个过程中,APM所在进程(app process)是客户端,ServiceManager是服务端

6. 使用服务

以使用AMS为例,app进程根据得到的代理类AMP,便可以直接与AMS所在的进程进行交互。在这个过程中,AMP所在进程(app process)是客户端,AMS所在进程(system_server)是服务端

来一幅完整的Binder IPC通讯流程图

最后

这篇binder原理系列的文章就先写到这里,作者在写作这篇文章的时候参考了许多博客、书籍、源码,在此感谢各位大佬无私的分享。作者能力有限,对于Binder的理解还很粗浅,若各位读者发现文章中的错误、不足、不同意见,欢迎交流探讨。