Android 中消息处理机制研究

1,381 阅读48分钟
原文链接: blog.csdn.net

消息机制概述

Android应用程序的每一个线程在启动时,都可以首先在内部创建一个消息队列,然后再进入一个无限循环中,不断检查它的消息队列是否有新的消息需要处理,如果有新的消息需要处理,那么线程就会将它从消息队列中取出来,并且对它进行处理;否则线程就会进入睡眠等待状态,直到有新的消息需要处理为止,这样就可以通过消息来驱动Android应用程序的执行。

消息机制组成部分

Android系统主要通过Messagequeue,Looper,Handler三个类来实现Android应用程序的消息处理机制:

1.Messagequeue:描述消息队列

2.Looper:创建消息队列,以及进入消息循环

3.Handler:用来发送消息和处理消息

消息处理流程

1、程序启动的时候,主线程会创建一个Looper对象。Looper对象初始化一个MessageQueue,然后调用loop()方法循环去读取消息。
2、初始化Handler的时候,在Handler的构造函数内部,会获取当前线程的Looper对象,进而获取MessageQueue对象。由此可见,想要操作UI的Handler必须在主线程中创建。否则会提示你:【”Can’tcreate handler inside thread that has not called Looper.prepare()”】
3、调用Handler的相关方法时,会获取Message对象,将消息对象的target指向当前handler对象,然后放到消息队列中。
4、loop()工作中,会从消息队列中获取一个个的消息,调用handle的dispatchMessage(msg)分发处理。
5、Message内部维护一个消息池,用来回收缓存message对象。

6、Looper相当于一个发动机,MessageQueue相当于流水线,Message相当于一个个的物品,而Handler就相当于工人。

消息循环机制机构图


消息循环机制Java层与c++层关系图


创建线程消息队列

1.创建Java层Looper对象:

  1. private static void prepare(boolean quitAllowed) {  
  2.        if (sThreadLocal.get() != null) {  
  3.        throw new RuntimeException("Only one Looper may be created per thread");  
  4.        }  
  5.        sThreadLocal.set(new Looper(quitAllowed));//创建looper对象  
  6. }  
private static void prepare(boolean quitAllowed) {
       if (sThreadLocal.get() != null) {
       throw new RuntimeException("Only one Looper may be created per thread");
       }
       sThreadLocal.set(new Looper(quitAllowed));//创建looper对象
}

2.Looper对象内部创建一个MessageQueue对象(mQueue):

  1. private Looper(boolean quitAllowed) {  
  2.        //Looper在创建的时候会创建一个MessageQueue对象  
  3.        mQueue = new MessageQueue(quitAllowed);   
  4.        mThread = Thread.currentThread();  
  5. }  
private Looper(boolean quitAllowed) {
       //Looper在创建的时候会创建一个MessageQueue对象
       mQueue = new MessageQueue(quitAllowed); 
       mThread = Thread.currentThread();
}

3.调用MessageQueue的nativeInit方法创建一个NativeMessageQueue对象:

  1. MessageQueue(boolean quitAllowed) {  
  2.        mQuitAllowed = quitAllowed;  
  3.        mPtr = nativeInit();//..  
  4. }  
MessageQueue(boolean quitAllowed) {
       mQuitAllowed = quitAllowed;
       mPtr = nativeInit();//..
}


4.nativeInit方法返回NativeMessageQueue地址给mPtr:

  1. static jlong android_os_MessageQueue_nativeInit(JNIEnv* env, jclass clazz) {  
  2.       //在C++层通过此方法创建一个NativeMessageQueue对象  
  3.       NativeMessageQueue* nativeMessageQueue = new NativeMessageQueue();  
  4.       if (!nativeMessageQueue) {  
  5.         jniThrowRuntimeException(env, "Unable to allocate native queue");  
  6.         return 0;  
  7.       }  
  8.       nativeMessageQueue->incStrong(env);  
  9.       //返回nativeMessageQueue地址给Java层;  
  10.       return reinterpret_cast<jlong>(nativeMessageQueue);  
  11. }  
static jlong android_os_MessageQueue_nativeInit(JNIEnv* env, jclass clazz) {
      //在C++层通过此方法创建一个NativeMessageQueue对象
      NativeMessageQueue* nativeMessageQueue = new NativeMessageQueue();
      if (!nativeMessageQueue) {
        jniThrowRuntimeException(env, "Unable to allocate native queue");
        return 0;
      }
      nativeMessageQueue->incStrong(env);
      //返回nativeMessageQueue地址给Java层;
      return reinterpret_cast<jlong>(nativeMessageQueue);
}


5.NativeMessageQueue创建时内部创建一个C++层Looper(Native)对象:

  1. NativeMessageQueue::NativeMessageQueue() : mPollEnv(NULL), mPollObj(NULL), mExceptionObj(NULL) {  
  2.      //NativeMessageQueue创建时会创建一个Looper(Native)对象  
  3.      mLooper = Looper::getForThread();  
  4.      if (mLooper == NULL) {  
  5.         mLooper = new Looper(false);  
  6.         Looper::setForThread(mLooper);  
  7.      }  
  8. }  
NativeMessageQueue::NativeMessageQueue() : mPollEnv(NULL), mPollObj(NULL), mExceptionObj(NULL) {
     //NativeMessageQueue创建时会创建一个Looper(Native)对象
     mLooper = Looper::getForThread();
     if (mLooper == NULL) {
        mLooper = new Looper(false);
        Looper::setForThread(mLooper);
     }
}


6.Looper(Native)创建时内部创建一个管道通过两个文件描述符管理它:

  1. Looper::Looper(bool allowNonCallbacks) : mAllowNonCallbacks(allowNonCallbacks), mResponseIndex(0) {  
  2.      int wakeFds[2];  
  3.      int result = pipe(wakeFds);//创建一个管道  
  4.      LOG_ALWAYS_FATAL_IF(result != 0, "Could not create wake pipe.  errno=%d", errno);  
  5.   
  6.      mWakeReadPipeFd = wakeFds[0];//读端文件描述符  
  7.      mWakeWritePipeFd = wakeFds[1];//写端文件描述符  
  8.   
  9.      result = fcntl(mWakeReadPipeFd, F_SETFL, O_NONBLOCK);  
  10.      LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake read pipe non-blocking.  errno=%d",errno);  
  11.   
  12.      result = fcntl(mWakeWritePipeFd, F_SETFL, O_NONBLOCK);  
  13.      LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake write pipe non-blocking.  errno=%d",errno);  
  14.   
  15. #ifdef LOOPER_USES_EPOLL  
  16.      // Allocate the epoll instance and register the wake pipe.  
  17.      mEpollFd = epoll_create(EPOLL_SIZE_HINT);//..  
  18.      LOG_ALWAYS_FATAL_IF(mEpollFd < 0, "Could not create epoll instance.  errno=%d", errno);  
  19.   
  20.      struct epoll_event eventItem;  
  21.      memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union  
  22.      eventItem.events = EPOLLIN;  
  23.      eventItem.data.fd = mWakeReadPipeFd;  
  24.      //将文件描述符放在epoll中进行管理  
  25.      result = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mWakeReadPipeFd, & eventItem);   
  26.      LOG_ALWAYS_FATAL_IF(result != 0, "Could not add wake read pipe to epoll instance.  errno=%d",errno);  
  27. #else  
  28.      // Add the wake pipe to the head of the request list with a null callback.  
  29.      struct pollfd requestedFd;  
  30.      requestedFd.fd = mWakeReadPipeFd;  
  31.      requestedFd.events = POLLIN;  
  32.      mRequestedFds.push(requestedFd);  
  33.   
  34.      Request request;  
  35.      request.fd = mWakeReadPipeFd;  
  36.      request.callback = NULL;  
  37.      request.ident = 0;  
  38.      request.data = NULL;  
  39.      mRequests.push(request);  
  40.   
  41.      mPolling = false;  
  42.      mWaiters = 0;  
  43. #endif  
  44.   
  45. #ifdef LOOPER_STATISTICS  
  46.     mPendingWakeTime = -1;  
  47.     mPendingWakeCount = 0;  
  48.     mSampledWakeCycles = 0;  
  49.     mSampledWakeCountSum = 0;  
  50.     mSampledWakeLatencySum = 0;  
  51.   
  52.     mSampledPolls = 0;  
  53.     mSampledZeroPollCount = 0;  
  54.     mSampledZeroPollLatencySum = 0;  
  55.     mSampledTimeoutPollCount = 0;  
  56.     mSampledTimeoutPollLatencySum = 0;  
  57. #endif  
  58. }  
Looper::Looper(bool allowNonCallbacks) : mAllowNonCallbacks(allowNonCallbacks), mResponseIndex(0) {
     int wakeFds[2];
     int result = pipe(wakeFds);//创建一个管道
     LOG_ALWAYS_FATAL_IF(result != 0, "Could not create wake pipe.  errno=%d", errno);

     mWakeReadPipeFd = wakeFds[0];//读端文件描述符
     mWakeWritePipeFd = wakeFds[1];//写端文件描述符

     result = fcntl(mWakeReadPipeFd, F_SETFL, O_NONBLOCK);
     LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake read pipe non-blocking.  errno=%d",errno);

     result = fcntl(mWakeWritePipeFd, F_SETFL, O_NONBLOCK);
     LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake write pipe non-blocking.  errno=%d",errno);

#ifdef LOOPER_USES_EPOLL
     // Allocate the epoll instance and register the wake pipe.
     mEpollFd = epoll_create(EPOLL_SIZE_HINT);//..
     LOG_ALWAYS_FATAL_IF(mEpollFd < 0, "Could not create epoll instance.  errno=%d", errno);

     struct epoll_event eventItem;
     memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union
     eventItem.events = EPOLLIN;
     eventItem.data.fd = mWakeReadPipeFd;
     //将文件描述符放在epoll中进行管理
     result = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mWakeReadPipeFd, & eventItem); 
     LOG_ALWAYS_FATAL_IF(result != 0, "Could not add wake read pipe to epoll instance.  errno=%d",errno);
#else
     // Add the wake pipe to the head of the request list with a null callback.
     struct pollfd requestedFd;
     requestedFd.fd = mWakeReadPipeFd;
     requestedFd.events = POLLIN;
     mRequestedFds.push(requestedFd);

     Request request;
     request.fd = mWakeReadPipeFd;
     request.callback = NULL;
     request.ident = 0;
     request.data = NULL;
     mRequests.push(request);

     mPolling = false;
     mWaiters = 0;
#endif

#ifdef LOOPER_STATISTICS
    mPendingWakeTime = -1;
    mPendingWakeCount = 0;
    mSampledWakeCycles = 0;
    mSampledWakeCountSum = 0;
    mSampledWakeLatencySum = 0;

    mSampledPolls = 0;
    mSampledZeroPollCount = 0;
    mSampledZeroPollLatencySum = 0;
    mSampledTimeoutPollCount = 0;
    mSampledTimeoutPollLatencySum = 0;
#endif
}

消息循环过程

1.Looper获取当前线程MessageQueue并循环调用它的next方法检查是否有新消息需要处理:

  1. public static void loop() {  
  2.       final Looper me = myLooper();//获取当前线程looper  
  3.       if (me == null) {  
  4.       throw new RuntimeException("No Looper; Looper.prepare() wasn't called on this thread.");  
  5.         }  
  6.       final MessageQueue queue = me.mQueue;//获取当前线程MessageQueue  
  7.   
  8.        // Make sure the identity of this thread is that of the local process,  
  9.        // and keep track of what that identity token actually is.  
  10.        Binder.clearCallingIdentity();  
  11.        final long ident = Binder.clearCallingIdentity();  
  12.   
  13.        for (;;) {//不断检查是否有新消息需要处理  
  14.             Message msg = queue.next(); // might block  
  15.             if (msg == null) {  
  16.                 // No message indicates that the message queue is quitting.  
  17.                return;  
  18.             }  
  19.   
  20.             // This must be in a local variable, in case a UI event sets the logger  
  21.             Printer logging = me.mLogging;  
  22.             if (logging != null) {  
  23.                 logging.println(">>>>> Dispatching to " + msg.target +  " " +  
  24.                         msg.callback + ": " + msg.what);  
  25.             }  
  26.        //msg.target指向一个Handler对象,调用Handler的dispatchMessage方法分发消息  
  27.        msg.target.dispatchMessage(msg);   
  28.             if (logging != null) {  
  29.                 logging.println("<<<<< Finished to " + msg.target +  " " + msg.callback);  
  30.             }  
  31.   
  32.             // Make sure that during the course of dispatching the  
  33.             // identity of the thread wasn't corrupted.  
  34.             final long newIdent = Binder.clearCallingIdentity();  
  35.             if (ident != newIdent) {  
  36.                Log.wtf(TAG, "Thread identity changed from 0x"  
  37.                         + Long.toHexString(ident) + " to 0x"  
  38.                         + Long.toHexString(newIdent) + " while dispatching to "  
  39.                         + msg.target.getClass().getName() + " "  
  40.                         + msg.callback + " what=" + msg.what);  
  41.             }  
  42.   
  43.             msg.recycleUnchecked();  
  44.         }  
  45.     }  
public static void loop() {
      final Looper me = myLooper();//获取当前线程looper
      if (me == null) {
      throw new RuntimeException("No Looper; Looper.prepare() wasn't called on this thread.");
        }
      final MessageQueue queue = me.mQueue;//获取当前线程MessageQueue

       // Make sure the identity of this thread is that of the local process,
       // and keep track of what that identity token actually is.
       Binder.clearCallingIdentity();
       final long ident = Binder.clearCallingIdentity();

       for (;;) {//不断检查是否有新消息需要处理
            Message msg = queue.next(); // might block
            if (msg == null) {
                // No message indicates that the message queue is quitting.
               return;
            }

            // This must be in a local variable, in case a UI event sets the logger
            Printer logging = me.mLogging;
            if (logging != null) {
                logging.println(">>>>> Dispatching to " + msg.target + " " +
                        msg.callback + ": " + msg.what);
            }
       //msg.target指向一个Handler对象,调用Handler的dispatchMessage方法分发消息
       msg.target.dispatchMessage(msg); 
            if (logging != null) {
                logging.println("<<<<< Finished to " + msg.target + " " + msg.callback);
            }

            // Make sure that during the course of dispatching the
            // identity of the thread wasn't corrupted.
            final long newIdent = Binder.clearCallingIdentity();
            if (ident != newIdent) {
               Log.wtf(TAG, "Thread identity changed from 0x"
                        + Long.toHexString(ident) + " to 0x"
                        + Long.toHexString(newIdent) + " while dispatching to "
                        + msg.target.getClass().getName() + " "
                        + msg.callback + " what=" + msg.what);
            }

            msg.recycleUnchecked();
        }
    }

2.MessageQueue的next方法中调用nativePollOnce函数检查当前线程的消息队列中是否有新消息要处理,如果有消息会存在mMessage中并进行处理:

  1. Message next() {  
  2.         // Return here if the message loop has already quit and been disposed.  
  3.         // This can happen if the application tries to restart a looper after quit  
  4.         // which is not supported.  
  5.         final long ptr = mPtr;  
  6.         if (ptr == 0) {  
  7.             return null;  
  8.         }  
  9.   
  10.         int pendingIdleHandlerCount = -1// -1 only during first iteration  
  11.         int nextPollTimeoutMillis = 0;//当前线程需要进入睡眠等待状态的时间  
  12.         for (;;) {//不断调用成员函数nativePollOnce来检查当前线程的消息队列是否有新消息要处理  
  13.             if (nextPollTimeoutMillis != 0) {  
  14.                 Binder.flushPendingCommands();  
  15.             }  
  16.   
  17.             nativePollOnce(ptr, nextPollTimeoutMillis);//..  
  18.   
  19.             synchronized (this) {  
  20.                 // Try to retrieve the next message.  Return if found.  
  21.                 final long now = SystemClock.uptimeMillis();  
  22.                 Message prevMsg = null;  
  23.                 Message msg = mMessages;//当前线程需要处理的消息  
  24.                 if (msg != null && msg.target ==  null) {  
  25.                    // Stalled by a barrier.  Find the next asynchronous message in the queue.  
  26.                     do {  
  27.                         prevMsg = msg;  
  28.                         msg = msg.next;  
  29.                     } while (msg != null && !msg.isAsynchronous());  
  30.                 }  
  31.                 if (msg != null) {  
  32.                     if (now < msg.when) {  
  33.                         // Next message is not ready.  Set a timeout to wake up when it is ready.  
  34.                        nextPollTimeoutMillis = (int) Math.min(msg.when - now, Integer.MAX_VALUE);  
  35.                     } else {  
  36.                         // Got a message.  
  37.                         mBlocked = false;  
  38.                         if (prevMsg !=  null) {  
  39.                             prevMsg.next = msg.next;  
  40.                         } else {  
  41.                             mMessages = msg.next;  
  42.                         }  
  43.                         msg.next = null;  
  44.                        if (DEBUG) Log.v(TAG,  "Returning message: " + msg);  
  45.                         msg.markInUse();  
  46.                         return msg;  
  47.                     }  
  48.                 } else {  
  49.                     // No more messages.  
  50.                     nextPollTimeoutMillis = -1;//没有消息就睡觉  
  51.                 }  
  52.   
  53.                 // Process the quit message now that all pending messages have been handled.  
  54.                 if (mQuitting) {  
  55.                     dispose();  
  56.                     return null;  
  57.                 }  
  58.   
  59.                 // If first time idle, then get the number of idlers to run.  
  60.                 // Idle handles only run if the queue is empty or if the first message  
  61.                 // in the queue (possibly a barrier) is due to be handled in the future.  
  62.                 if (pendingIdleHandlerCount < 0  
  63.                         && (mMessages == null || now < mMessages.when)) {  
  64.                     pendingIdleHandlerCount = mIdleHandlers.size();  
  65.                 }  
  66.                 if (pendingIdleHandlerCount <= 0) {  
  67.                     // No idle handlers to run.  Loop and wait some more.  
  68.                     mBlocked = true;  
  69.                     continue;  
  70.                 }  
  71.   
  72.                if (mPendingIdleHandlers == null) {  
  73.              mPendingIdleHandlers = new IdleHandler[Math.max(pendingIdleHandlerCount,  4)];  
  74.                 }  
  75.             mPendingIdleHandlers = mIdleHandlers.toArray(mPendingIdleHandlers);  
  76.             }  
  77.   
  78.             // Run the idle handlers.  
  79.             // We only ever reach this code block during the first iteration.  
  80.             for (int i = 0; i < pendingIdleHandlerCount; i++) {  
  81.             final IdleHandler idler = mPendingIdleHandlers[i];        
  82.             mPendingIdleHandlers[i] = null;  //release the reference to the handler  
  83.             boolean keep = false;   
  84.             try {   
  85.                  keep = idler.queueIdle();   
  86.                  } catch (Throwable t) {   
  87.                 Log.wtf(TAG, "IdleHandler threw exception", t);   
  88.                 }  
  89.             if (!keep) {   
  90.                             synchronized ( this) {  
  91.                                     mIdleHandlers.remove(idler);   
  92.                             }  
  93.                         }   
  94.                }   
  95.                          // Reset the idle handler count to 0 so we do not run them again.   
  96.                          pendingIdleHandlerCount = 0;   
  97.                          // While calling an idle handler, a new message could have been delivered   
  98.                          // so go back and look again for a pending message without waiting.   
  99.                          nextPollTimeoutMillis = 0;   
  100.              }   
  101. }  
Message next() {
        // Return here if the message loop has already quit and been disposed.
        // This can happen if the application tries to restart a looper after quit
        // which is not supported.
        final long ptr = mPtr;
        if (ptr == 0) {
            return null;
        }

        int pendingIdleHandlerCount = -1; // -1 only during first iteration
        int nextPollTimeoutMillis = 0;//当前线程需要进入睡眠等待状态的时间
        for (;;) {//不断调用成员函数nativePollOnce来检查当前线程的消息队列是否有新消息要处理
            if (nextPollTimeoutMillis != 0) {
                Binder.flushPendingCommands();
            }

            nativePollOnce(ptr, nextPollTimeoutMillis);//..

            synchronized (this) {
                // Try to retrieve the next message.  Return if found.
                final long now = SystemClock.uptimeMillis();
                Message prevMsg = null;
                Message msg = mMessages;//当前线程需要处理的消息
                if (msg != null && msg.target == null) {
                   // Stalled by a barrier.  Find the next asynchronous message in the queue.
                    do {
                        prevMsg = msg;
                        msg = msg.next;
                    } while (msg != null && !msg.isAsynchronous());
                }
                if (msg != null) {
                    if (now < msg.when) {
                        // Next message is not ready.  Set a timeout to wake up when it is ready.
                       nextPollTimeoutMillis = (int) Math.min(msg.when - now, Integer.MAX_VALUE);
                    } else {
                        // Got a message.
                        mBlocked = false;
                        if (prevMsg != null) {
                            prevMsg.next = msg.next;
                        } else {
                            mMessages = msg.next;
                        }
                        msg.next = null;
                       if (DEBUG) Log.v(TAG, "Returning message: " + msg);
                        msg.markInUse();
                        return msg;
                    }
                } else {
                    // No more messages.
                    nextPollTimeoutMillis = -1;//没有消息就睡觉
                }

                // Process the quit message now that all pending messages have been handled.
                if (mQuitting) {
                    dispose();
                    return null;
                }

                // If first time idle, then get the number of idlers to run.
                // Idle handles only run if the queue is empty or if the first message
                // in the queue (possibly a barrier) is due to be handled in the future.
                if (pendingIdleHandlerCount < 0
                        && (mMessages == null || now < mMessages.when)) {
                    pendingIdleHandlerCount = mIdleHandlers.size();
                }
                if (pendingIdleHandlerCount <= 0) {
                    // No idle handlers to run.  Loop and wait some more.
                    mBlocked = true;
                    continue;
                }

               if (mPendingIdleHandlers == null) {
             mPendingIdleHandlers = new IdleHandler[Math.max(pendingIdleHandlerCount, 4)];
                }
            mPendingIdleHandlers = mIdleHandlers.toArray(mPendingIdleHandlers);
            }

            // Run the idle handlers.
            // We only ever reach this code block during the first iteration.
            for (int i = 0; i < pendingIdleHandlerCount; i++) {
            final IdleHandler idler = mPendingIdleHandlers[i];      
            mPendingIdleHandlers[i] = null;  //release the reference to the handler
            boolean keep = false; 
            try { 
                 keep = idler.queueIdle(); 
                 } catch (Throwable t) { 
                Log.wtf(TAG, "IdleHandler threw exception", t); 
                }
            if (!keep) { 
                            synchronized (this) {
                                    mIdleHandlers.remove(idler); 
                            }
                        } 
               } 
                         // Reset the idle handler count to 0 so we do not run them again. 
                         pendingIdleHandlerCount = 0; 
                         // While calling an idle handler, a new message could have been delivered 
                         // so go back and look again for a pending message without waiting. 
                         nextPollTimeoutMillis = 0; 
             } 
}
3.在nativePollOnce函数中调用Looper(Native)的pollOnce函数不断检查是否有新消息要处理:

  1. static void android_os_MessageQueue_nativePollOnce(JNIEnv* env, jobject obj,jlong ptr, jint timeoutMillis) {  
  2.       //通过ptr找到NativeMessageQueue  
  3.       NativeMessageQueue* nativeMessageQueue = reinterpret_cast<NativeMessageQueue*>(ptr);  
  4.       //调用nativeMessageQueue对象的pollOnce函数检查当前线程是否有新消息     
  5.       nativeMessageQueue->pollOnce(env, obj, timeoutMillis);   
  6. }  
static void android_os_MessageQueue_nativePollOnce(JNIEnv* env, jobject obj,jlong ptr, jint timeoutMillis) {
      //通过ptr找到NativeMessageQueue
      NativeMessageQueue* nativeMessageQueue = reinterpret_cast<NativeMessageQueue*>(ptr);
      //调用nativeMessageQueue对象的pollOnce函数检查当前线程是否有新消息   
      nativeMessageQueue->pollOnce(env, obj, timeoutMillis); 
}
4.在pollOnce函数中调用polllnner函数(返回值不等于0即有新消息):

  1. void NativeMessageQueue::pollOnce(JNIEnv* env, jobject pollObj, int timeoutMillis) {  
  2.     mPollEnv = env;  
  3.     mPollObj = pollObj;  
  4.     //调用Looper(Native)的pollOnce函数检查当前线程是否有新消息要处理  
  5.     mLooper->pollOnce(timeoutMillis);  
  6.     mPollObj = NULL;  
  7.     mPollEnv = NULL;  
  8.   
  9.     if (mExceptionObj) {  
  10.         env->Throw(mExceptionObj);  
  11.         env->DeleteLocalRef(mExceptionObj);  
  12.         mExceptionObj = NULL;  
  13.     }  
  14. }  
void NativeMessageQueue::pollOnce(JNIEnv* env, jobject pollObj, int timeoutMillis) {
    mPollEnv = env;
    mPollObj = pollObj;
    //调用Looper(Native)的pollOnce函数检查当前线程是否有新消息要处理
    mLooper->pollOnce(timeoutMillis);
    mPollObj = NULL;
    mPollEnv = NULL;

    if (mExceptionObj) {
        env->Throw(mExceptionObj);
        env->DeleteLocalRef(mExceptionObj);
        mExceptionObj = NULL;
    }
}

----------------------------------------------------------分割线------------------------------------------------------------------

  1. int Looper::pollOnce(int timeoutMillis, int* outFd,int* outEvents,  void** outData) {  
  2.     int result = 0;  
  3.     for (;;) {//不断调用pollInner方法检查是否有新消息  
  4.         while (mResponseIndex < mResponses.size()) {  
  5.             const Response& response = mResponses.itemAt(mResponseIndex++);  
  6.             if (! response.request.callback) {  
  7. #if DEBUG_POLL_AND_WAKE  
  8.                 LOGD("%p ~ pollOnce - returning signalled identifier %d: "  
  9.                         "fd=%d, events=0x%x, data=%p" this,  
  10.                         response.request.ident, response.request.fd,  
  11.                         response.events, response.request.data);  
  12. #endif  
  13.                 if (outFd != NULL) *outFd = response.request.fd;  
  14.                 if (outEvents != NULL) *outEvents = response.events;  
  15.                 if (outData != NULL) *outData = response.request.data;  
  16.                 return response.request.ident;  
  17.             }  
  18.         }  
  19.   
  20.         if (result != 0) {  
  21. #if DEBUG_POLL_AND_WAKE  
  22.             LOGD("%p ~ pollOnce - returning result %d"this, result);  
  23. #endif  
  24.             if (outFd != NULL) *outFd = 0;  
  25.             if (outEvents != NULL) *outEvents = NULL;  
  26.             if (outData != NULL) *outData = NULL;  
  27.             return result;  
  28.         }  
  29.         result = pollInner(timeoutMillis);//如果有新消息返回值不等于0  
  30.     }  
  31. }  
int Looper::pollOnce(int timeoutMillis, int* outFd,int* outEvents, void** outData) {
    int result = 0;
    for (;;) {//不断调用pollInner方法检查是否有新消息
        while (mResponseIndex < mResponses.size()) {
            const Response& response = mResponses.itemAt(mResponseIndex++);
            if (! response.request.callback) {
#if DEBUG_POLL_AND_WAKE
                LOGD("%p ~ pollOnce - returning signalled identifier %d: "
                        "fd=%d, events=0x%x, data=%p", this,
                        response.request.ident, response.request.fd,
                        response.events, response.request.data);
#endif
                if (outFd != NULL) *outFd = response.request.fd;
                if (outEvents != NULL) *outEvents = response.events;
                if (outData != NULL) *outData = response.request.data;
                return response.request.ident;
            }
        }

        if (result != 0) {
#if DEBUG_POLL_AND_WAKE
            LOGD("%p ~ pollOnce - returning result %d", this, result);
#endif
            if (outFd != NULL) *outFd = 0;
            if (outEvents != NULL) *outEvents = NULL;
            if (outData != NULL) *outData = NULL;
            return result;
        }
        result = pollInner(timeoutMillis);//如果有新消息返回值不等于0
    }
}


5.polllnner函数中调用awoken方法把管道中的旧数据清理掉:

  1. int Looper::pollInner(int timeoutMillis) {  
  2. #if DEBUG_POLL_AND_WAKE  
  3.     LOGD("%p ~ pollOnce - waiting: timeoutMillis=%d"this, timeoutMillis);  
  4. #endif    
  5.   
  6.     int result = ALOOPER_POLL_WAKE;  
  7.     mResponses.clear();  
  8.     mResponseIndex = 0;  
  9.   
  10. #ifdef LOOPER_STATISTICS  
  11.     nsecs_t pollStartTime = systemTime(SYSTEM_TIME_MONOTONIC);  
  12. #endif  
  13.   
  14. #ifdef LOOPER_USES_EPOLL  
  15.     struct epoll_event eventItems[EPOLL_MAX_EVENTS];  
  16.     int eventCount = epoll_wait(mEpollFd, eventItems, EPOLL_MAX_EVENTS, timeoutMillis);  
  17.     bool acquiredLock = false;  
  18. #else  
  19.     // Wait for wakeAndLock() waiters to run then set mPolling to true.  
  20.     mLock.lock();  
  21.     while (mWaiters != 0) {  
  22.         mResume.wait(mLock);  
  23.     }  
  24.     mPolling = true;  
  25.     mLock.unlock();  
  26.   
  27.     size_t requestedCount = mRequestedFds.size();  
  28.     int eventCount = poll(mRequestedFds.editArray(), requestedCount, timeoutMillis);  
  29. #endif  
  30.   
  31.     if (eventCount < 0) {  
  32.         if (errno == EINTR) {  
  33.             goto Done;  
  34.         }  
  35.   
  36.         LOGW("Poll failed with an unexpected error, errno=%d", errno);  
  37.         result = ALOOPER_POLL_ERROR;  
  38.         goto Done;  
  39.     }  
  40.   
  41.     if (eventCount == 0) {  
  42. #if DEBUG_POLL_AND_WAKE  
  43.         LOGD("%p ~ pollOnce - timeout"this);  
  44. #endif  
  45.         result = ALOOPER_POLL_TIMEOUT;  
  46.         goto Done;  
  47.     }  
  48.   
  49. #if DEBUG_POLL_AND_WAKE  
  50.     LOGD("%p ~ pollOnce - handling events from %d fds"this, eventCount);  
  51. #endif  
  52.   
  53. #ifdef LOOPER_USES_EPOLL  
  54.     for (int i = 0; i < eventCount; i++) {  
  55.         int fd = eventItems[i].data.fd;  
  56.         uint32_t epollEvents = eventItems[i].events;  
  57.   
  58.     //判断发生IO事件的文件描述符是否与当前线程所关联的管道的mWakeReadPipeFd一致  
  59.         if (fd == mWakeReadPipeFd) {  
  60.             if (epollEvents & EPOLLIN) {  
  61.                 awoken();  
  62.             } else {  
  63.               LOGW("Ignoring unexpected epoll events 0x%x on wake read pipe.", epollEvents);  
  64.             }  
  65.         } else {  
  66.             if (! acquiredLock) {  
  67.                 mLock.lock();  
  68.                 acquiredLock = true;  
  69.             }  
  70.   
  71.             ssize_t requestIndex = mRequests.indexOfKey(fd);  
  72.             if (requestIndex >= 0) {  
  73.                 int events = 0;  
  74.                 if (epollEvents & EPOLLIN) events |= ALOOPER_EVENT_INPUT;  
  75.                 if (epollEvents & EPOLLOUT) events |= ALOOPER_EVENT_OUTPUT;  
  76.                 if (epollEvents & EPOLLERR) events |= ALOOPER_EVENT_ERROR;  
  77.                 if (epollEvents & EPOLLHUP) events |= ALOOPER_EVENT_HANGUP;  
  78.                 pushResponse(events, mRequests.valueAt(requestIndex));  
  79.             } else {  
  80.              LOGW("Ignoring unexpected epoll events 0x%x on fd %d that is "  
  81.                         "no longer registered.", epollEvents, fd);  
  82.             }  
  83.         }  
  84.     }  
  85.     if (acquiredLock) {  
  86.         mLock.unlock();  
  87.     }  
  88. Done: ;  
  89. #else  
  90.     for (size_t i = 0; i < requestedCount; i++) {  
  91.         const struct pollfd& requestedFd = mRequestedFds.itemAt(i);  
  92.   
  93.         short pollEvents = requestedFd.revents;  
  94.         if (pollEvents) {  
  95.             if (requestedFd.fd == mWakeReadPipeFd) {  
  96.                 if (pollEvents & POLLIN) {  
  97.                     awoken();  
  98.                 } else {  
  99.                  LOGW("Ignoring unexpected poll events 0x%x on wake read pipe.", pollEvents);  
  100.                 }  
  101.             } else {  
  102.                 int events = 0;  
  103.                 if (pollEvents & POLLIN) events |= ALOOPER_EVENT_INPUT;  
  104.                 if (pollEvents & POLLOUT) events |= ALOOPER_EVENT_OUTPUT;  
  105.                 if (pollEvents & POLLERR) events |= ALOOPER_EVENT_ERROR;  
  106.                 if (pollEvents & POLLHUP) events |= ALOOPER_EVENT_HANGUP;  
  107.                 if (pollEvents & POLLNVAL) events |= ALOOPER_EVENT_INVALID;  
  108.                 pushResponse(events, mRequests.itemAt(i));  
  109.             }  
  110.             if (--eventCount == 0) {  
  111.                 break;  
  112.             }  
  113.         }  
  114.     }  
  115.   
  116. Done:  
  117.     // Set mPolling to false and wake up the wakeAndLock() waiters.  
  118.     mLock.lock();  
  119.     mPolling = false;  
  120.     if (mWaiters != 0) {  
  121.         mAwake.broadcast();  
  122.     }  
  123.     mLock.unlock();  
  124. #endif  
  125.   
  126. #ifdef LOOPER_STATISTICS  
  127.     nsecs_t pollEndTime = systemTime(SYSTEM_TIME_MONOTONIC);  
  128.     mSampledPolls += 1;  
  129.     if (timeoutMillis == 0) {  
  130.         mSampledZeroPollCount += 1;  
  131.         mSampledZeroPollLatencySum += pollEndTime - pollStartTime;  
  132.     } else if (timeoutMillis > 0 && result == ALOOPER_POLL_TIMEOUT) {  
  133.         mSampledTimeoutPollCount += 1;  
  134.         mSampledTimeoutPollLatencySum += pollEndTime - pollStartTime  
  135.                 - milliseconds_to_nanoseconds(timeoutMillis);  
  136.     }  
  137.     if (mSampledPolls == SAMPLED_POLLS_TO_AGGREGATE) {  
  138.         LOGD("%p ~ poll latency statistics: %0.3fms zero timeout, %0.3fms non-zero timeout" this,  
  139.        0.000001f * float(mSampledZeroPollLatencySum) / mSampledZeroPollCount,  
  140.  0.000001f * float(mSampledTimeoutPollLatencySum) / mSampledTimeoutPollCount);  
  141.         mSampledPolls = 0;  
  142.         mSampledZeroPollCount = 0;  
  143.         mSampledZeroPollLatencySum = 0;  
  144.         mSampledTimeoutPollCount = 0;  
  145.         mSampledTimeoutPollLatencySum = 0;  
  146.     }  
  147. #endif  
  148.   
  149.     for (size_t i = 0; i < mResponses.size(); i++) {  
  150.         const Response& response = mResponses.itemAt(i);  
  151.         if (response.request.callback) {  
  152. #if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS  
  153.             LOGD("%p ~ pollOnce - invoking callback: fd=%d, events=0x%x, data=%p" this,  
  154.            response.request.fd, response.events, response.request.data);  
  155. #endif  
  156.             int callbackResult = response.request.callback(  
  157.               response.request.fd, response.events, response.request.data);  
  158.             if (callbackResult == 0) {  
  159.                 removeFd(response.request.fd);  
  160.             }  
  161.   
  162.             result = ALOOPER_POLL_CALLBACK;  
  163.         }  
  164.     }  
  165.     return result;  
  166. }  
int Looper::pollInner(int timeoutMillis) {
#if DEBUG_POLL_AND_WAKE
    LOGD("%p ~ pollOnce - waiting: timeoutMillis=%d", this, timeoutMillis);
#endif	

    int result = ALOOPER_POLL_WAKE;
    mResponses.clear();
    mResponseIndex = 0;

#ifdef LOOPER_STATISTICS
    nsecs_t pollStartTime = systemTime(SYSTEM_TIME_MONOTONIC);
#endif

#ifdef LOOPER_USES_EPOLL
    struct epoll_event eventItems[EPOLL_MAX_EVENTS];
    int eventCount = epoll_wait(mEpollFd, eventItems, EPOLL_MAX_EVENTS, timeoutMillis);
    bool acquiredLock = false;
#else
    // Wait for wakeAndLock() waiters to run then set mPolling to true.
    mLock.lock();
    while (mWaiters != 0) {
        mResume.wait(mLock);
    }
    mPolling = true;
    mLock.unlock();

    size_t requestedCount = mRequestedFds.size();
    int eventCount = poll(mRequestedFds.editArray(), requestedCount, timeoutMillis);
#endif

    if (eventCount < 0) {
        if (errno == EINTR) {
            goto Done;
        }

        LOGW("Poll failed with an unexpected error, errno=%d", errno);
        result = ALOOPER_POLL_ERROR;
        goto Done;
    }

    if (eventCount == 0) {
#if DEBUG_POLL_AND_WAKE
        LOGD("%p ~ pollOnce - timeout", this);
#endif
        result = ALOOPER_POLL_TIMEOUT;
        goto Done;
    }

#if DEBUG_POLL_AND_WAKE
    LOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount);
#endif

#ifdef LOOPER_USES_EPOLL
    for (int i = 0; i < eventCount; i++) {
        int fd = eventItems[i].data.fd;
        uint32_t epollEvents = eventItems[i].events;

    //判断发生IO事件的文件描述符是否与当前线程所关联的管道的mWakeReadPipeFd一致
        if (fd == mWakeReadPipeFd) {
            if (epollEvents & EPOLLIN) {
                awoken();
            } else {
              LOGW("Ignoring unexpected epoll events 0x%x on wake read pipe.", epollEvents);
            }
        } else {
            if (! acquiredLock) {
                mLock.lock();
                acquiredLock = true;
            }

            ssize_t requestIndex = mRequests.indexOfKey(fd);
            if (requestIndex >= 0) {
                int events = 0;
                if (epollEvents & EPOLLIN) events |= ALOOPER_EVENT_INPUT;
                if (epollEvents & EPOLLOUT) events |= ALOOPER_EVENT_OUTPUT;
                if (epollEvents & EPOLLERR) events |= ALOOPER_EVENT_ERROR;
                if (epollEvents & EPOLLHUP) events |= ALOOPER_EVENT_HANGUP;
                pushResponse(events, mRequests.valueAt(requestIndex));
            } else {
             LOGW("Ignoring unexpected epoll events 0x%x on fd %d that is "
                        "no longer registered.", epollEvents, fd);
            }
        }
    }
    if (acquiredLock) {
        mLock.unlock();
    }
Done: ;
#else
    for (size_t i = 0; i < requestedCount; i++) {
        const struct pollfd& requestedFd = mRequestedFds.itemAt(i);

        short pollEvents = requestedFd.revents;
        if (pollEvents) {
            if (requestedFd.fd == mWakeReadPipeFd) {
                if (pollEvents & POLLIN) {
                    awoken();
                } else {
                 LOGW("Ignoring unexpected poll events 0x%x on wake read pipe.", pollEvents);
                }
            } else {
                int events = 0;
                if (pollEvents & POLLIN) events |= ALOOPER_EVENT_INPUT;
                if (pollEvents & POLLOUT) events |= ALOOPER_EVENT_OUTPUT;
                if (pollEvents & POLLERR) events |= ALOOPER_EVENT_ERROR;
                if (pollEvents & POLLHUP) events |= ALOOPER_EVENT_HANGUP;
                if (pollEvents & POLLNVAL) events |= ALOOPER_EVENT_INVALID;
                pushResponse(events, mRequests.itemAt(i));
            }
            if (--eventCount == 0) {
                break;
            }
        }
    }

Done:
    // Set mPolling to false and wake up the wakeAndLock() waiters.
    mLock.lock();
    mPolling = false;
    if (mWaiters != 0) {
        mAwake.broadcast();
    }
    mLock.unlock();
#endif

#ifdef LOOPER_STATISTICS
    nsecs_t pollEndTime = systemTime(SYSTEM_TIME_MONOTONIC);
    mSampledPolls += 1;
    if (timeoutMillis == 0) {
        mSampledZeroPollCount += 1;
        mSampledZeroPollLatencySum += pollEndTime - pollStartTime;
    } else if (timeoutMillis > 0 && result == ALOOPER_POLL_TIMEOUT) {
        mSampledTimeoutPollCount += 1;
        mSampledTimeoutPollLatencySum += pollEndTime - pollStartTime
                - milliseconds_to_nanoseconds(timeoutMillis);
    }
    if (mSampledPolls == SAMPLED_POLLS_TO_AGGREGATE) {
        LOGD("%p ~ poll latency statistics: %0.3fms zero timeout, %0.3fms non-zero timeout", this,
       0.000001f * float(mSampledZeroPollLatencySum) / mSampledZeroPollCount,
 0.000001f * float(mSampledTimeoutPollLatencySum) / mSampledTimeoutPollCount);
        mSampledPolls = 0;
        mSampledZeroPollCount = 0;
        mSampledZeroPollLatencySum = 0;
        mSampledTimeoutPollCount = 0;
        mSampledTimeoutPollLatencySum = 0;
    }
#endif

    for (size_t i = 0; i < mResponses.size(); i++) {
        const Response& response = mResponses.itemAt(i);
        if (response.request.callback) {
#if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
            LOGD("%p ~ pollOnce - invoking callback: fd=%d, events=0x%x, data=%p", this,
           response.request.fd, response.events, response.request.data);
#endif
            int callbackResult = response.request.callback(
              response.request.fd, response.events, response.request.data);
            if (callbackResult == 0) {
                removeFd(response.request.fd);
            }

            result = ALOOPER_POLL_CALLBACK;
        }
    }
    return result;
}
6.awoken方法的实现:
  1. void Looper::awoken() {  
  2. #if DEBUG_POLL_AND_WAKE  
  3.     LOGD("%p ~ awoken"this);  
  4. #endif  
  5.   
  6. #ifdef LOOPER_STATISTICS  
  7.     if (mPendingWakeCount == 0) {  
  8.         LOGD("%p ~ awoken: spurious!"this);  
  9.     } else {  
  10.         mSampledWakeCycles += 1;  
  11.         mSampledWakeCountSum += mPendingWakeCount;  
  12.         mSampledWakeLatencySum += systemTime(SYSTEM_TIME_MONOTONIC) - mPendingWakeTime;  
  13.         mPendingWakeCount = 0;  
  14.         mPendingWakeTime = -1;  
  15.         if (mSampledWakeCycles == SAMPLED_WAKE_CYCLES_TO_AGGREGATE) {  
  16.            LOGD("%p ~ wake statistics: %0.3fms wake latency, %0.3f wakes per cycle" this,  
  17.               0.000001f * float(mSampledWakeLatencySum) / mSampledWakeCycles,  
  18.                     float(mSampledWakeCountSum) / mSampledWakeCycles);  
  19.             mSampledWakeCycles = 0;  
  20.             mSampledWakeCountSum = 0;  
  21.             mSampledWakeLatencySum = 0;  
  22.         }  
  23.     }  
  24. #endif  
  25.   
  26.     char buffer[16];  
  27.     ssize_t nRead;  
  28.     do {  
  29.   nRead = read(mWakeReadPipeFd, buffer, sizeof(buffer));//将管道中数据读出来  
  30.     } while ((nRead == -1 && errno == EINTR) || nRead == sizeof(buffer));  
  31. }  
void Looper::awoken() {
#if DEBUG_POLL_AND_WAKE
    LOGD("%p ~ awoken", this);
#endif

#ifdef LOOPER_STATISTICS
    if (mPendingWakeCount == 0) {
        LOGD("%p ~ awoken: spurious!", this);
    } else {
        mSampledWakeCycles += 1;
        mSampledWakeCountSum += mPendingWakeCount;
        mSampledWakeLatencySum += systemTime(SYSTEM_TIME_MONOTONIC) - mPendingWakeTime;
        mPendingWakeCount = 0;
        mPendingWakeTime = -1;
        if (mSampledWakeCycles == SAMPLED_WAKE_CYCLES_TO_AGGREGATE) {
           LOGD("%p ~ wake statistics: %0.3fms wake latency, %0.3f wakes per cycle", this,
              0.000001f * float(mSampledWakeLatencySum) / mSampledWakeCycles,
                    float(mSampledWakeCountSum) / mSampledWakeCycles);
            mSampledWakeCycles = 0;
            mSampledWakeCountSum = 0;
            mSampledWakeLatencySum = 0;
        }
    }
#endif

    char buffer[16];
    ssize_t nRead;
    do {
  nRead = read(mWakeReadPipeFd, buffer, sizeof(buffer));//将管道中数据读出来
    } while ((nRead == -1 && errno == EINTR) || nRead == sizeof(buffer));
}

线程消息发送过程

1.通过调用sendMessageXXX方法将消息发送到一个消息队列中:

  1. public boolean sendMessageAtTime(Message msg, long uptimeMillis)  
  2.     {  
  3.         boolean sent = false;  
  4.         MessageQueue queue = mQueue;  
  5.         if (queue != null) {  
  6.             msg.target = this;  
  7.             sent = queue.enqueueMessage(msg, uptimeMillis);//将消息发送到消息队列中  
  8.         }  
  9.         else {  
  10.             RuntimeException e = new RuntimeException(  
  11.                 this + " sendMessageAtTime() called with no mQueue");  
  12.             Log.w("Looper", e.getMessage(), e);  
  13.         }  
  14.         return sent;  
  15.     }  
public boolean sendMessageAtTime(Message msg, long uptimeMillis)
    {
        boolean sent = false;
        MessageQueue queue = mQueue;
        if (queue != null) {
            msg.target = this;
            sent = queue.enqueueMessage(msg, uptimeMillis);//将消息发送到消息队列中
        }
        else {
            RuntimeException e = new RuntimeException(
                this + " sendMessageAtTime() called with no mQueue");
            Log.w("Looper", e.getMessage(), e);
        }
        return sent;
    }
2.调用enqueueMessage方法将消息插入到消息队列(队头或队中):
  1. boolean enqueueMessage(Message msg, long when) {  
  2.        if (msg.target == null) {  
  3.         throw new IllegalArgumentException("Message must have a target.");  
  4.        }  
  5.        if (msg.isInUse()) {  
  6.    throw new IllegalStateException(msg + " This message is already in use.");  
  7.        }  
  8.   
  9.        synchronized (this) {  
  10.            if (mQuitting) {  
  11.                IllegalStateException e = new IllegalStateException(  
  12.              msg.target + " sending message to a Handler on a dead thread");  
  13.                Log.w(TAG, e.getMessage(), e);  
  14.                msg.recycle();  
  15.                return false;  
  16.            }  
  17.   
  18.            msg.markInUse();  
  19.            msg.when = when;  
  20.            Message p = mMessages;  
  21.            boolean needWake;  
  22.            if (p == null || when ==  0 || when < p.when) {  
  23.                // New head, wake up the event queue if blocked.  
  24.                msg.next = p;  
  25.                mMessages = msg;  
  26.                needWake = mBlocked;//记录了当前线程是否处于睡眠等待状态  
  27.            } else {  
  28.     // Inserted within the middle of the queue.  Usually we don't have to wake  
  29.     // up the event queue unless there is a barrier at the head of the queue  
  30.     // and the message is the earliest asynchronous message in the queue.  
  31.              needWake = mBlocked && p.target == null && msg.isAsynchronous();  
  32.                Message prev;  
  33.                for (;;) {  
  34.                    prev = p;  
  35.                    p = p.next;  
  36.                    if (p == null || when < p.when) {  
  37.                        break;  
  38.                    }  
  39.                    if (needWake && p.isAsynchronous()) {  
  40.                        needWake = false;  
  41.                    }  
  42.                }  
  43.         msg.next = p; // invariant: p == prev.next,将消息插入到消息队列中  
  44.                prev.next = msg;  
  45.            }  
  46.   
  47.            // We can assume mPtr != 0 because mQuitting is false.  
  48.            if (needWake) {  
  49.                nativeWake(mPtr);//如果线程正在睡眠,那么将其唤醒  
  50.            }  
  51.        }  
  52.        return true;  
 boolean enqueueMessage(Message msg, long when) {
        if (msg.target == null) {
         throw new IllegalArgumentException("Message must have a target.");
        }
        if (msg.isInUse()) {
    throw new IllegalStateException(msg + " This message is already in use.");
        }

        synchronized (this) {
            if (mQuitting) {
                IllegalStateException e = new IllegalStateException(
              msg.target + " sending message to a Handler on a dead thread");
                Log.w(TAG, e.getMessage(), e);
                msg.recycle();
                return false;
            }

            msg.markInUse();
            msg.when = when;
            Message p = mMessages;
            boolean needWake;
            if (p == null || when == 0 || when < p.when) {
                // New head, wake up the event queue if blocked.
                msg.next = p;
                mMessages = msg;
                needWake = mBlocked;//记录了当前线程是否处于睡眠等待状态
            } else {
     // Inserted within the middle of the queue.  Usually we don't have to wake
     // up the event queue unless there is a barrier at the head of the queue
     // and the message is the earliest asynchronous message in the queue.
              needWake = mBlocked && p.target == null && msg.isAsynchronous();
                Message prev;
                for (;;) {
                    prev = p;
                    p = p.next;
                    if (p == null || when < p.when) {
                        break;
                    }
                    if (needWake && p.isAsynchronous()) {
                        needWake = false;
                    }
                }
         msg.next = p; // invariant: p == prev.next,将消息插入到消息队列中
                prev.next = msg;
            }

            // We can assume mPtr != 0 because mQuitting is false.
            if (needWake) {
                nativeWake(mPtr);//如果线程正在睡眠,那么将其唤醒
            }
        }
        return true;
}
3.如果线程处于睡眠状态调用NativeMessageQueue的nativeWake方法,唤醒他:
  1. static void android_os_MessageQueue_nativeWake(JNIEnv* env, jclass clazz, jlong ptr) {  
  2.       NativeMessageQueue *nativeMessageQueue = reinterpret_cast<NativeMessageQueue*>(ptr);  
  3.       //通过ptr找到NativeMessageQueue,并调用它的wake方法唤醒目标线程,让它处理消息  
  4.       nativeMessageQueue->wake();  
  5. }  
static void android_os_MessageQueue_nativeWake(JNIEnv* env, jclass clazz, jlong ptr) {
      NativeMessageQueue *nativeMessageQueue = reinterpret_cast<NativeMessageQueue*>(ptr);
      //通过ptr找到NativeMessageQueue,并调用它的wake方法唤醒目标线程,让它处理消息
      nativeMessageQueue->wake();
}

4.nativeWake方法中调用NativeMessageQueue的wake方法:

  1. void NativeMessageQueue::wake() {  
  2.     mLooper->wake();//调用Looper(Native)的wake方法  
  3. }  
void NativeMessageQueue::wake() {
    mLooper->wake();//调用Looper(Native)的wake方法
}
5.wake方法中调用Looper(Native)的wake方法,向管道写入字符:
  1. void Looper::wake() {  
  2. #if DEBUG_POLL_AND_WAKE  
  3.     LOGD("%p ~ wake"this);  
  4. #endif  
  5.   
  6. #ifdef LOOPER_STATISTICS  
  7.     // FIXME: Possible race with awoken() but this code is for testing only and is rarely enabled.  
  8.     if (mPendingWakeCount++ == 0) {  
  9.         mPendingWakeTime = systemTime(SYSTEM_TIME_MONOTONIC);  
  10.    }  
  11. #endif  
  12.   
  13.     ssize_t nWrite;  
  14.     do {  
  15.       nWrite = write(mWakeWritePipeFd, "W", 1);//向管道中写入字符,唤醒线程。  
  16.     } while (nWrite == -1 && errno == EINTR);  
  17.   
  18.     if (nWrite != 1) {  
  19.         if (errno != EAGAIN) {  
  20.             LOGW("Could not write wake signal, errno=%d", errno);  
  21.         }  
  22.     }  
  23. }  
void Looper::wake() {
#if DEBUG_POLL_AND_WAKE
    LOGD("%p ~ wake", this);
#endif

#ifdef LOOPER_STATISTICS
    // FIXME: Possible race with awoken() but this code is for testing only and is rarely enabled.
    if (mPendingWakeCount++ == 0) {
        mPendingWakeTime = systemTime(SYSTEM_TIME_MONOTONIC);
   }
#endif

    ssize_t nWrite;
    do {
      nWrite = write(mWakeWritePipeFd, "W", 1);//向管道中写入字符,唤醒线程。
    } while (nWrite == -1 && errno == EINTR);

    if (nWrite != 1) {
        if (errno != EAGAIN) {
            LOGW("Could not write wake signal, errno=%d", errno);
        }
    }
}
线程消息处理过程
1.通过Looper类的loop方法获取消息:
  1. public static void loop() {  
  2.         final Looper me = myLooper();//获取当前线程looper  
  3.         if (me == null) {  
  4.         throw new RuntimeException("No Looper; Looper.prepare() wasn't called on this thread.");  
  5.         }  
  6.         final MessageQueue queue = me.mQueue;//获取当前线程MessageQueue  
  7.   
  8.       // Make sure the identity of this thread is that of the local process,  
  9.         // and keep track of what that identity token actually is.  
  10.         Binder.clearCallingIdentity();  
  11.         final long ident = Binder.clearCallingIdentity();  
  12.   
  13.         for (;;) {//不断检查是否有新消息需要处理  
  14.             Message msg = queue.next(); // might block  
  15.             if (msg == null) {  
  16.                 // No message indicates that the message queue is quitting.  
  17.                 return;  
  18.             }  
  19.   
  20.             // This must be in a local variable, in case a UI event sets the logger  
  21.             Printer logging = me.mLogging;  
  22.             if (logging != null) {  
  23.                 logging.println(">>>>> Dispatching to " + msg.target +  " " +  
  24.                         msg.callback + ": " + msg.what);  
  25.             }  
  26.   
  27.             //msg.target指向一个Handler对象,调用Handler的dispatchMessage方法分发消息  
  28.   
  29.             msg.target.dispatchMessage(msg);   
  30.             if (logging != null) {  
  31.               logging.println("<<<<< Finished to " + msg.target +  " " + msg.callback);  
  32.             }  
  33.   
  34.             // Make sure that during the course of dispatching the  
  35.             // identity of the thread wasn't corrupted.  
  36.             final long newIdent = Binder.clearCallingIdentity();  
  37.             if (ident != newIdent) {  
  38.                 Log.wtf(TAG, "Thread identity changed from 0x"  
  39.                         + Long.toHexString(ident) + " to 0x"  
  40.                         + Long.toHexString(newIdent)   
  41.                         + " while dispatching to "  
  42.                         + msg.target.getClass().getName() + " "  
  43.                         + msg.callback + " what=" + msg.what);  
  44.             }  
  45.   
  46.             msg.recycleUnchecked();  
  47.         }  
  48.     }  
public static void loop() {
        final Looper me = myLooper();//获取当前线程looper
        if (me == null) {
        throw new RuntimeException("No Looper; Looper.prepare() wasn't called on this thread.");
        }
        final MessageQueue queue = me.mQueue;//获取当前线程MessageQueue

      // Make sure the identity of this thread is that of the local process,
        // and keep track of what that identity token actually is.
        Binder.clearCallingIdentity();
        final long ident = Binder.clearCallingIdentity();

        for (;;) {//不断检查是否有新消息需要处理
            Message msg = queue.next(); // might block
            if (msg == null) {
                // No message indicates that the message queue is quitting.
                return;
            }

            // This must be in a local variable, in case a UI event sets the logger
            Printer logging = me.mLogging;
            if (logging != null) {
                logging.println(">>>>> Dispatching to " + msg.target + " " +
                        msg.callback + ": " + msg.what);
            }

            //msg.target指向一个Handler对象,调用Handler的dispatchMessage方法分发消息

            msg.target.dispatchMessage(msg); 
            if (logging != null) {
              logging.println("<<<<< Finished to " + msg.target + " " + msg.callback);
            }

            // Make sure that during the course of dispatching the
            // identity of the thread wasn't corrupted.
            final long newIdent = Binder.clearCallingIdentity();
            if (ident != newIdent) {
                Log.wtf(TAG, "Thread identity changed from 0x"
                        + Long.toHexString(ident) + " to 0x"
                        + Long.toHexString(newIdent) 
                        + " while dispatching to "
                        + msg.target.getClass().getName() + " "
                        + msg.callback + " what=" + msg.what);
            }

            msg.recycleUnchecked();
        }
    }
2.通过Handler类的dispatchMessage分发消息:
  1. //分发Looper传来的消息  
  2.     public void dispatchMessage(Message msg) {   
  3.         if (msg.callback != null) {  
  4.             handleCallback(msg);  
  5.         } else {  
  6.             if (mCallback != null) {  
  7.                 if (mCallback.handleMessage(msg)) {  
  8.                     return;  
  9.                 }  
  10.             }  
  11.            handleMessage(msg);  
  12.         }  
  13.     }  
//分发Looper传来的消息
    public void dispatchMessage(Message msg) { 
        if (msg.callback != null) {
            handleCallback(msg);
        } else {
            if (mCallback != null) {
                if (mCallback.handleMessage(msg)) {
                    return;
                }
            }
           handleMessage(msg);
        }
    }
3.通过Handler的成员函数handleMessage处理消息:
  1. //通过重写handleMessage方法处理消息  
  2. public void handleMessage(Message msg) {    }  
//通过重写handleMessage方法处理消息
public void handleMessage(Message msg) {    }