GCD线程线程数量上限

473 阅读21分钟

实验测试

全局队列+自定义并行队列(非overcommit)

CPU满负载 :CPU核数

CPU空闲:64

自定义串行队列

CPU满负载 :512

CPU空闲:512

GCD源码解析

GCD队列

dispatch_queue_t

typedef struct dispatch_queue_s *dispatch_queue_t;

struct dispatch_queue_s {
        DISPATCH_QUEUE_CLASS_HEADER(queue, void *__dq_opaque1);
        /* 32bit hole on LP64 */
} DISPATCH_ATOMIC64_ALIGN;

#define DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \
        _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__); \
        /* LP64 global queue cacheline boundary */ \
        unsigned long dq_serialnum; \ //编号
        const char *dq_label; \ //队列的名字
        DISPATCH_UNION_LE(uint32_t volatile dq_atomic_flags, \
                const uint16_t dq_width, \ //队列的并行处理任务的最大数量,串行队列为1
                const uint16_t __dq_opaque2 \
        ); \
        dispatch_priority_t dq_priority; \ //队列的优先级
        union { \
                struct dispatch_queue_specific_head_s *dq_specific_head; \
                struct dispatch_source_refs_s *ds_refs; \
                struct dispatch_timer_source_refs_s *ds_timer_refs; \
                struct dispatch_mach_recv_refs_s *dm_recv_refs; \
                struct dispatch_channel_callbacks_s const *dch_callbacks; \
        }; \
        int volatile dq_sref_cnt
        
#if OS_OBJECT_HAVE_OBJC1 //#if TARGET_OS_MAC && !TARGET_OS_SIMULATOR && defined(__i386__)
#define _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \
        DISPATCH_OBJECT_HEADER(x); \
        DISPATCH_UNION_LE(uint64_t volatile dq_state, \
                        dispatch_lock dq_state_lock, \
                        uint32_t dq_state_bits \
        ); \
        __pointer_sized_field__
#else
#define _DISPATCH_QUEUE_CLASS_HEADER(x, __pointer_sized_field__) \
        DISPATCH_OBJECT_HEADER(x); \
        __pointer_sized_field__; \ void *__dq_opaque1
        DISPATCH_UNION_LE(uint64_t volatile dq_state, \
                        dispatch_lock dq_state_lock, \
                        uint32_t dq_state_bits \
        )
#endif

#define DISPATCH_OBJECT_HEADER(x) \
        struct dispatch_object_s _as_do[0]; \
        _DISPATCH_OBJECT_HEADER(x)

#define _DISPATCH_OBJECT_HEADER(x) \
        struct _os_object_s _as_os_obj[0]; \
        OS_OBJECT_STRUCT_HEADER(dispatch_##x); \
        struct dispatch_##x##_s *volatile do_next; \ //队列将要处理的下个任务
        struct dispatch_queue_s *do_targetq; \ //这个队列绑定的全局root队列
        void *do_ctxt; \
        union { \
                dispatch_function_t DISPATCH_FUNCTION_POINTER do_finalizer; \
                void *do_introspection_ctxt; \
        }

#define OS_OBJECT_STRUCT_HEADER(x) \
        _OS_OBJECT_HEADER(\
        const struct x##_vtable_s *__ptrauth_objc_isa_pointer do_vtable, \ //相关操作对应的方法指针
        do_ref_cnt, \ //引用计数
        do_xref_cnt) //外部引用计数
#endif

队列创建

  • dispatch_queue_create
dispatch_queue_t
dispatch_queue_create(const char *label, dispatch_queue_attr_t attr)
{
        return _dispatch_lane_create_with_target(label, attr,
                        DISPATCH_TARGET_QUEUE_DEFAULT, true);
}

DISPATCH_NOINLINE
static dispatch_queue_t
_dispatch_lane_create_with_target(const char *label, dispatch_queue_attr_t dqa,
                dispatch_queue_t tq, bool legacy)
{
        dispatch_queue_attr_info_t dqai = _dispatch_queue_attr_to_info(dqa);

通过_dispatch_queue_attr_to_info将创建队列的attr参数解析为对应的队列属性

dispatch_queue_attr_info_t
_dispatch_queue_attr_to_info(dispatch_queue_attr_t dqa)
{
        dispatch_queue_attr_info_t dqai = { };
        //#define DISPATCH_QUEUE_SERIAL NULL
        if (!dqa) return dqai; //dqa就是参数attr,串行队列使用默认的队列属性

#if DISPATCH_VARIANT_STATIC //编译设置,值为1
        // #define DISPATCH_QUEUE_CONCURRENT \
                //DISPATCH_GLOBAL_OBJECT(dispatch_queue_attr_t, \
                //_dispatch_queue_attr_concurrent)
        if (dqa == &_dispatch_queue_attr_concurrent) { //设置并行队列属性
                dqai.dqai_concurrent = true;
                return dqai;
        }
#endif
        //_dispatch_queue_attrs是记录所以属性组合可选项的组合,_dispatch_queue_attr_concurrent(DISPATCH_QUEUE_CONCURRENT)是数组的第一个元素
        if (dqa < _dispatch_queue_attrs ||
                        dqa >= &_dispatch_queue_attrs[DISPATCH_QUEUE_ATTR_COUNT]) {
#ifndef __APPLE__
                if (memcmp(dqa, &_dispatch_queue_attrs[0],
                                sizeof(struct dispatch_queue_attr_s)) == 0) {
                        dqa = (dispatch_queue_attr_t)&_dispatch_queue_attrs[0];
                } else
#endif // __APPLE__
                DISPATCH_CLIENT_CRASH(dqa->do_vtable, "Invalid queue attribute");
        }
        //传入参数attr在_dispatch_queue_attrs中的index,可以计算出对了各项属性,这里类似用字节的每一位
        size_t idx = (size_t)(dqa - _dispatch_queue_attrs);

        dqai.dqai_inactive = (idx % DISPATCH_QUEUE_ATTR_INACTIVE_COUNT);
        idx /= DISPATCH_QUEUE_ATTR_INACTIVE_COUNT;

        dqai.dqai_concurrent = !(idx % DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT);
        idx /= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT;

        dqai.dqai_relpri = -(int)(idx % DISPATCH_QUEUE_ATTR_PRIO_COUNT);
        idx /= DISPATCH_QUEUE_ATTR_PRIO_COUNT;

        dqai.dqai_qos = idx % DISPATCH_QUEUE_ATTR_QOS_COUNT;
        idx /= DISPATCH_QUEUE_ATTR_QOS_COUNT;

        dqai.dqai_autorelease_frequency =
                        idx % DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT;
        idx /= DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT;

        dqai.dqai_overcommit = idx % DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT;
        idx /= DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT;

        return dqai;
}

处理优先级qos以及overcommit参数

//_dispatch_lane_create_with_target

dispatch_qos_t qos = dqai.dqai_qos;//dqai.dqai_qos未被设置,0,DISPATCH_QOS_UNSPECIFIED
#if !HAVE_PTHREAD_WORKQUEUE_QOS //在iOS上HAVE_PTHREAD_WORKQUEUE_QOS应该是1
        if (qos == DISPATCH_QOS_USER_INTERACTIVE) {
                dqai.dqai_qos = qos = DISPATCH_QOS_USER_INITIATED;
        }
        if (qos == DISPATCH_QOS_MAINTENANCE) {
                dqai.dqai_qos = qos = DISPATCH_QOS_BACKGROUND;
        }
#endif // !HAVE_PTHREAD_WORKQUEUE_QOS
        //_dispatch_queue_attr_to_info中串行队列和并行队列都没有设置dqai.dqai_overcommit
        _dispatch_queue_attr_overcommit_t overcommit = dqai.dqai_overcommit;//0
        //tq是个传入的参数DISPATCH_TARGET_QUEUE_DEFAULT,#define DISPATCH_TARGET_QUEUE_DEFAULT NULL
        if (overcommit != _dispatch_queue_attr_overcommit_unspecified && tq) {
                if (tq->do_targetq) {
                        DISPATCH_CLIENT_CRASH(tq, "Cannot specify both overcommit and "
                                        "a non-global target queue");
                }
        }
        //tq为NULL
        if (tq && dx_type(tq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE) {
                // Handle discrepancies between attr and target queue, attributes win
                if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
                        if (tq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
                                overcommit = _dispatch_queue_attr_overcommit_enabled;
                        } else {
                                overcommit = _dispatch_queue_attr_overcommit_disabled;
                        }
                }
                if (qos == DISPATCH_QOS_UNSPECIFIED) {
                        qos = _dispatch_priority_qos(tq->dq_priority);
                }
                tq = NULL;
        } else if (tq && !tq->do_targetq) {
                // target is a pthread or runloop root queue, setting QoS or overcommit
                // is disallowed
                if (overcommit != _dispatch_queue_attr_overcommit_unspecified) {
                        DISPATCH_CLIENT_CRASH(tq, "Cannot specify an overcommit attribute "
                                        "and use this kind of target queue");
                }
        } else {
                //执行这里的代码,overcommit此时为0,满足条件
                if (overcommit == _dispatch_queue_attr_overcommit_unspecified) {
                        // Serial queues default to overcommit!
                        overcommit = dqai.dqai_concurrent ?
                                        _dispatch_queue_attr_overcommit_disabled :
                                        _dispatch_queue_attr_overcommit_enabled;
                }
        }
        //tq为NULL
        if (!tq) {
                //dispatch_queue_create创建的queue都qos都是DISPATCH_QOS_DEFAULT
                //这里通过qos和overcommit获取全局根队列
                tq = _dispatch_get_root_queue(
                                qos == DISPATCH_QOS_UNSPECIFIED ? DISPATCH_QOS_DEFAULT : qos,
                                overcommit == _dispatch_queue_attr_overcommit_enabled)->_as_dq;
                if (unlikely(!tq)) {
                        DISPATCH_CLIENT_CRASH(qos, "Invalid queue attribute");
                }
        }

获取全局根队列

DISPATCH_ALWAYS_INLINE DISPATCH_CONST
static inline dispatch_queue_global_t
_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
        if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) {
                DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
        }
        //全局根队列是一个数组,数组长度是:qos类型种类*是否overcommit
        return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}

设置处理队列的相关属性,为初始化做准备

//_dispatch_lane_create_with_target
    // Step 2: Initialize the queue
    //
    //参数,true
    if (legacy) {
            //串行队列,并行队列,dqai_inactive和dqai_autorelease_frequency都是false
            // if any of these attributes is specified, use non legacy classes
            if (dqai.dqai_inactive || dqai.dqai_autorelease_frequency) {
                    legacy = false;
            }
    }
    
    const void *vtable;//可以理解为用于执行任务的一个OC类(粗浅的这样理解2333)
    //dqf是队列的标识位
    dispatch_queue_flags_t dqf = legacy ? DQF_MUTABLE : 0;
    if (dqai.dqai_concurrent) {
            //_OBJC_CLASS_$_OS_dispatch_queue_concurrent
            vtable = DISPATCH_VTABLE(queue_concurrent);
    } else {
            //_OBJC_CLASS_$_OS_dispatch_queue_serial
            vtable = DISPATCH_VTABLE(queue_serial);
    }
    //dqai_autorelease_frequency为空,不会命中下列case
    switch (dqai.dqai_autorelease_frequency) {
    case DISPATCH_AUTORELEASE_FREQUENCY_NEVER:
            dqf |= DQF_AUTORELEASE_NEVER;
            break;
    case DISPATCH_AUTORELEASE_FREQUENCY_WORK_ITEM:
            dqf |= DQF_AUTORELEASE_ALWAYS;
            break;
    }
    if (label) {
            //判断队列名是否需要拷贝,这里使用_dyld_is_memory_immutable私有方法判断
            const char *tmp = _dispatch_strdup_if_mutable(label);
            if (tmp != label) {
                    //表示新拷贝的队列名需要释放
                    dqf |= DQF_LABEL_NEEDS_FREE;
                    label = tmp;
            }
    }

分配内存以及相关数据初始化

    //_dispatch_lane_create_with_target 
        //用不同的类名创建对象,dispatch_lane_t可以理解为对dispatch_queue_t的包装(子类)
        dispatch_lane_t dq = _dispatch_object_alloc(vtable,
                        sizeof(struct dispatch_lane_s));
        //初始化,见_dispatch_queue_init
        _dispatch_queue_init(dq, dqf, dqai.dqai_concurrent ?
                        DISPATCH_QUEUE_WIDTH_MAX : 1, DISPATCH_QUEUE_ROLE_INNER |
                        (dqai.dqai_inactive ? DISPATCH_QUEUE_INACTIVE : 0));

        dq->dq_label = label;
        //dqai_qos,dqai_relpri都为0
        dq->dq_priority = _dispatch_priority_make((dispatch_qos_t)dqai.dqai_qos,
                        dqai.dqai_relpri);
        if (overcommit == _dispatch_queue_attr_overcommit_enabled) {
                dq->dq_priority |= DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
        }
        if (!dqai.dqai_inactive) {
                //用全局根队列对dq进行一些设置
                _dispatch_queue_priority_inherit_from_target(dq, tq);
                _dispatch_lane_inherit_wlh_from_target(dq, tq);
        }
        _dispatch_retain(tq);//引用计数加1
        dq->do_targetq = tq;//将队列和全局根队列绑定
        _dispatch_object_debug(dq, "%s", __func__);
        return _dispatch_trace_queue_create(dq)._dq;//返回队列
}

static inline dispatch_queue_class_t
_dispatch_queue_init(dispatch_queue_class_t dqu, dispatch_queue_flags_t dqf,
                uint16_t width, uint64_t initial_state_bits)
{
        //width为同时可处理任务数量的多少,串行队列为1,并行为0x1000ull-2=4094
        uint64_t dq_state = DISPATCH_QUEUE_STATE_INIT_VALUE(width);
        dispatch_queue_t dq = dqu._dq;
        
        dispatch_assert((initial_state_bits & ~(DISPATCH_QUEUE_ROLE_MASK |
                        DISPATCH_QUEUE_INACTIVE)) == 0);
        //initial_state_bits为0
        if (initial_state_bits & DISPATCH_QUEUE_INACTIVE) {
                dq->do_ref_cnt += 2; // rdar://8181908 see _dispatch_lane_resume
                if (dx_metatype(dq) == _DISPATCH_SOURCE_TYPE) {
                        dq->do_ref_cnt++; // released when DSF_DELETED is set
                }
        }

        dq_state |= initial_state_bits;
        //队列将要处理的下一个任务,DISPATCH_OBJECT_LISTLESS可以理解为null
        dq->do_next = DISPATCH_OBJECT_LISTLESS;
        dqf |= DQF_WIDTH(width);
        os_atomic_store2o(dq, dq_atomic_flags, dqf, relaxed);
        dq->dq_state = dq_state;
        //设置队列的序号,ID
        dq->dq_serialnum =
                        os_atomic_inc_orig(&_dispatch_queue_serial_numbers, relaxed);
        return dqu;
}

DISPATCH_ALWAYS_INLINE
static inline dispatch_queue_class_t
_dispatch_trace_queue_create(dispatch_queue_class_t dqu)
{
        _dispatch_only_if_ktrace_enabled({//我猜这里应该没用
                uint64_t dq_label[4] = {0}; // So that we get the right null termination
                dispatch_queue_t dq = dqu._dq;
                strncpy((char *)dq_label, (char *)dq->dq_label ?: "", sizeof(dq_label));

                _dispatch_ktrace2(DISPATCH_QOS_TRACE_queue_creation_start,
                                dq->dq_serialnum,
                                _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));

                _dispatch_ktrace4(DISPATCH_QOS_TRACE_queue_creation_end,
                                                dq_label[0], dq_label[1], dq_label[2], dq_label[3]);
        });
        //_dispatch_introspection_queue_create直接将dqu返回,没太懂introspection
        return _dispatch_introspection_queue_create(dqu);
}

总结

队列创建已经完成,回到主题,GCD队列线程的上限是多少?唯一可能有关的就是队列中的width属性,但是串行队列是1,并行队列是4094,显然不太对。并且队列创建过程中没有涉及到任何线程创建相关的操作。

dispatch_queue_create的两个参数,label作为队列名标识队列;attr控制队列的属性,它其实是一个数组的元素地址,attr为null时,对应DISPATCH_QUEUE_SERIAL,为串行队列;attr为DISPATCH_QUEUE_CONCURRENT时,其实是数组中的第一个元素_dispatch_queue_attr_concurrent的地址;

需要注意的是attr并不存储属性,而是用地址计算attr在数组中的index。目前开放的属性类型及相关方法:

DISPATCH_QUEUE_SERIAL
DISPATCH_QUEUE_SERIAL_INACTIVE 

DISPATCH_QUEUE_CONCURRENT
DISPATCH_QUEUE_CONCURRENT_INACTIVE

dispatch_queue_attr_t
dispatch_queue_attr_make_initially_inactive(
                dispatch_queue_attr_t _Nullable attr);

dispatch_queue_attr_t
dispatch_queue_attr_make_with_autorelease_frequency(
                dispatch_queue_attr_t _Nullable attr,
                dispatch_autorelease_frequency_t frequency);
                
dispatch_queue_attr_t
dispatch_queue_attr_make_with_qos_class(dispatch_queue_attr_t _Nullable attr,
                dispatch_qos_class_t qos_class, int relative_priority);

创建队列时,可以控制initially_inactive、autorelease_frequency、qos等属性,源码中还有dispatch_queue_attr_make_with_overcommit方法控制队列的overcommit属性,但是并没有开放。

但是基于我们dispatch_queue_attr_t attr的了解,我们可以自行计算想要的attr:

//dispatch_queue_attr_info_t的获取参考上文_dispatch_queue_attr_to_info
static dispatch_queue_attr_t
_dispatch_queue_attr_from_info(dispatch_queue_attr_info_t dqai)
{
   size_t idx = 0;

   idx *= DISPATCH_QUEUE_ATTR_OVERCOMMIT_COUNT;
   idx += dqai.dqai_overcommit;

   idx *= DISPATCH_QUEUE_ATTR_AUTORELEASE_FREQUENCY_COUNT;
   idx += dqai.dqai_autorelease_frequency;

   idx *= DISPATCH_QUEUE_ATTR_QOS_COUNT;
   idx += dqai.dqai_qos;

   idx *= DISPATCH_QUEUE_ATTR_PRIO_COUNT;
   idx += (size_t)(-dqai.dqai_relpri);

   idx *= DISPATCH_QUEUE_ATTR_CONCURRENCY_COUNT;
   idx += !dqai.dqai_concurrent;

   idx *= DISPATCH_QUEUE_ATTR_INACTIVE_COUNT;
   idx += dqai.dqai_inactive;

   return (dispatch_queue_attr_t)&_dispatch_queue_attrs[idx];
}

例如我们想要一个overcommit的优先级为background的自定义并发队列:

dispatch_queue_attr_t attr =DISPATCH_QUEUE_CONCURRENT;
uintptr_t testp = ((uintptr_t)attr + ( ((1*3*7+2)*16*2)*2  ) * 16);
dispatch_queue_attr_t new_attr = (__bridge dispatch_queue_attr_t)(struct dispatch_queue_attr_s *)testp;

dispatch_queue_t q1 = dispatch_queue_create("test",  new_attr);

image

获取全局队列

dispatch_get_global_queue

dispatch_queue_global_t
dispatch_get_global_queue(intptr_t priority, uintptr_t flags)
{
   //_dispatch_root_queues是存放全局根队列的数组
   dispatch_assert(countof(_dispatch_root_queues) ==
         DISPATCH_ROOT_QUEUE_COUNT);
   //DISPATCH_QUEUE_OVERCOMMIT 0x2ull, 0xFFFFFFFD, 也就是除了倒数第二位,其他位为1的flags都将返回null
   if (flags & ~(unsigned long)DISPATCH_QUEUE_OVERCOMMIT) {
      return DISPATCH_BAD_INPUT;
   }
   dispatch_qos_t qos = _dispatch_qos_from_queue_priority(priority);
#if !HAVE_PTHREAD_WORKQUEUE_QOS
   if (qos == QOS_CLASS_MAINTENANCE) {
      qos = DISPATCH_QOS_BACKGROUND;
   } else if (qos == QOS_CLASS_USER_INTERACTIVE) {
      qos = DISPATCH_QOS_USER_INITIATED;
   }
#endif
   if (qos == DISPATCH_QOS_UNSPECIFIED) {
      return DISPATCH_BAD_INPUT;
   }
   return _dispatch_get_root_queue(qos, flags & DISPATCH_QUEUE_OVERCOMMIT);
}

============= _dispatch_get_root_queue =============
static inline dispatch_queue_global_t
_dispatch_get_root_queue(dispatch_qos_t qos, bool overcommit)
{
   //DISPATCH_QOS_MIN 1 DISPATCH_QOS_MAX 6
   if (unlikely(qos < DISPATCH_QOS_MIN || qos > DISPATCH_QOS_MAX)) {
      DISPATCH_CLIENT_CRASH(qos, "Corrupted priority");
   }
   //根据优先级和overcommit获取index
   return &_dispatch_root_queues[2 * (qos - 1) + overcommit];
}

全局根队列_dispatch_root_queues

struct dispatch_queue_global_s { //结构就不扩展了,可以理解为dispatch_queue_t的子类吧
   DISPATCH_QUEUE_ROOT_CLASS_HEADER(lane);
} DISPATCH_CACHELINE_ALIGN;

struct dispatch_queue_global_s _dispatch_root_queues[] = {
#define _DISPATCH_ROOT_QUEUE_IDX(n, flags) \
      ((flags & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) ? \
      DISPATCH_ROOT_QUEUE_IDX_##n##_QOS_OVERCOMMIT : \
      DISPATCH_ROOT_QUEUE_IDX_##n##_QOS)
#define _DISPATCH_ROOT_QUEUE_ENTRY(n, flags, ...) \
   [_DISPATCH_ROOT_QUEUE_IDX(n, flags)] = { \
      DISPATCH_GLOBAL_OBJECT_HEADER(queue_global), \
      .dq_state = DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE, \
      .do_ctxt = _dispatch_root_queue_ctxt(_DISPATCH_ROOT_QUEUE_IDX(n, flags)), \
      .dq_atomic_flags = DQF_WIDTH(DISPATCH_QUEUE_WIDTH_POOL), \
      .dq_priority = flags | ((flags & DISPATCH_PRIORITY_FLAG_FALLBACK) ? \
            _dispatch_priority_make_fallback(DISPATCH_QOS_##n) : \
            _dispatch_priority_make(DISPATCH_QOS_##n, 0)), \
      __VA_ARGS__ \
   }
   _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, 0,//优先级 + 是否overcommit
      .dq_label = "com.apple.root.maintenance-qos",
      .dq_serialnum = 4,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(MAINTENANCE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
      .dq_label = "com.apple.root.maintenance-qos.overcommit",
      .dq_serialnum = 5,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, 0,
      .dq_label = "com.apple.root.background-qos",
      .dq_serialnum = 6,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(BACKGROUND, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
      .dq_label = "com.apple.root.background-qos.overcommit",
      .dq_serialnum = 7,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, 0,
      .dq_label = "com.apple.root.utility-qos",
      .dq_serialnum = 8,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(UTILITY, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
      .dq_label = "com.apple.root.utility-qos.overcommit",
      .dq_serialnum = 9,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT, DISPATCH_PRIORITY_FLAG_FALLBACK,
      .dq_label = "com.apple.root.default-qos",
      .dq_serialnum = 10,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(DEFAULT,
         DISPATCH_PRIORITY_FLAG_FALLBACK | DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
      .dq_label = "com.apple.root.default-qos.overcommit",
      .dq_serialnum = 11,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, 0,
      .dq_label = "com.apple.root.user-initiated-qos",
      .dq_serialnum = 12,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(USER_INITIATED, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
      .dq_label = "com.apple.root.user-initiated-qos.overcommit",
      .dq_serialnum = 13,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, 0,
      .dq_label = "com.apple.root.user-interactive-qos",
      .dq_serialnum = 14,
   ),
   _DISPATCH_ROOT_QUEUE_ENTRY(USER_INTERACTIVE, DISPATCH_PRIORITY_FLAG_OVERCOMMIT,
      .dq_label = "com.apple.root.user-interactive-qos.overcommit",
      .dq_serialnum = 15,
   ),
};

总结

在获取全局队列时,并没有线程创建相关的逻辑。通过dispatch_get_global_queue中的参数优先级qos+flags获取全局队列,flags为0时获取非overcommit队列,为2时获取overcommit队列

同步执行

DISPATCH_NOINLINE
void
dispatch_sync(dispatch_queue_t dq, dispatch_block_t work)
{
   uintptr_t dc_flags = DC_FLAG_BLOCK;
   //判断要执行的block中的invoke方法指针是否等于___dispatch_block_create_block_invoke的指针
   if (unlikely(_dispatch_block_has_private_data(work))) {
      return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
   }
   //通常直接执行_dispatch_sync_f
   _dispatch_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}

DISPATCH_NOINLINE
static void
_dispatch_sync_f(dispatch_queue_t dq, void *ctxt, dispatch_function_t func,
      uintptr_t dc_flags)
{
   _dispatch_sync_f_inline(dq, ctxt, func, dc_flags);
}

如果是串行队列,执行_dispatch_barrier_sync_f

DISPATCH_ALWAYS_INLINE
static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
      dispatch_function_t func, uintptr_t dc_flags)
{
   //在dispatch_queue_create中可知,dq_width==1时,为串行队列
   if (likely(dq->dq_width == 1)) {
      return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
   }
   
static void
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
      dispatch_function_t func, uintptr_t dc_flags)
{
   _dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}

static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
      dispatch_function_t func, uintptr_t dc_flags)
{
   //获取当前线程id
   dispatch_tid tid = _dispatch_tid_self();
   //可以理解为检查dq的类名
   if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
      DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
   }
   //dl和dq是相互绑定的
   dispatch_lane_t dl = upcast(dq)._dl;

尝试给队列上锁

//_dispatch_barrier_sync_f_inline
    // The more correct thing to do would be to merge the qos of the thread
   // that just acquired the barrier lock into the queue state.
   //
   // However this is too expensive for the fast path, so skip doing it.
   // The chosen tradeoff is that if an enqueue on a lower priority thread
   // contends with this fast path, this thread may receive a useless override.
   //
   // Global concurrent queues and queues bound to non-dispatch threads
   // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
   //这里尝试对队列上锁
   if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
      return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
            DC_FLAG_BARRIER | dc_flags);
   }
   
============= _dispatch_queue_try_acquire_barrier_sync =============
 static inline bool
_dispatch_queue_try_acquire_barrier_sync(dispatch_queue_class_t dq, uint32_t tid)
{
   return _dispatch_queue_try_acquire_barrier_sync_and_suspend(dq._dl, tid, 0);
}

static inline bool
_dispatch_queue_try_acquire_barrier_sync_and_suspend(dispatch_lane_t dq,
      uint32_t tid, uint64_t suspend_count)
{
   //创建队列时,dq_state被初始化为DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width)
   uint64_t init  = DISPATCH_QUEUE_STATE_INIT_VALUE(dq->dq_width);
   //标识队列宽度被占满,正在执行BARRIER,设置执行任务的线程id
   uint64_t value = DISPATCH_QUEUE_WIDTH_FULL_BIT | DISPATCH_QUEUE_IN_BARRIER |
         _dispatch_lock_value_from_tid(tid) |
         DISPATCH_QUEUE_UNCONTENDED_SYNC |
         (suspend_count * DISPATCH_QUEUE_SUSPEND_INTERVAL);
   uint64_t old_state, new_state;

   return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, acquire, {
      uint64_t role = old_state & DISPATCH_QUEUE_ROLE_MASK;
      //如果队列的dq_state与初始值不一致,说明队列在执行任务
      if (old_state != (init | role)) {
         //返回false
         os_atomic_rmw_loop_give_up(break);
      }
      //队列空闲,设置dq_state,返回true
      new_state = value | role;
   });
}

上锁失败(队列中还有任务在执行)执行_dispatch_sync_f_slow

//_dispatch_barrier_sync_f_inline
static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
      dispatch_function_t func, uintptr_t top_dc_flags,
      dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
   dispatch_queue_t top_dq = top_dqu._dq;
   dispatch_queue_t dq = dqu._dq;
   //do_targetq为绑定的全局根队列,一般不为NULL
   if (unlikely(!dq->do_targetq)) {
      return _dispatch_sync_function_invoke(dq, ctxt, func);
   }
   //类似pthrea_getspecific(dispatch_priority_key)
   pthread_priority_t pp = _dispatch_get_priority();
   //保存上下文
   struct dispatch_sync_context_s dsc = {
      .dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
      .dc_func     = _dispatch_async_and_wait_invoke,
      .dc_ctxt     = &dsc,
      .dc_other    = top_dq,
      .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
      .dc_voucher  = _voucher_get(),
      .dsc_func    = func,
      .dsc_ctxt    = ctxt,
      .dsc_waiter  = _dispatch_tid_self(),
   };
   _dispatch_trace_item_push(top_dq, &dsc);//iphone可以忽略
   //等待队列中的任务执行完,睡眠
   __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);

    //被唤醒后准备处理任务
   //这里是处理dispatch_async_and_wait等,block已经在其它线程上执行的情况
   if (dsc.dsc_func == NULL) {
      // dsc_func being cleared means that the block ran on another thread ie.
      // case (2) as listed in _dispatch_async_and_wait_f_slow.
      dispatch_queue_t stop_dq = dsc.dc_other;
      return _dispatch_sync_complete_recurse(top_dq, stop_dq, top_dc_flags);
   }

   _dispatch_introspection_sync_begin(top_dq);//{}空方法
   _dispatch_trace_item_pop(top_dq, &dsc);//_dispatch_trace相关方法可以忽略
   //执行任务
   _dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
         DISPATCH_TRACE_ARG(&dsc));
}

============= __DISPATCH_WAIT_FOR_QUEUE__ =============
static void
__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
   //获取队列的dq_state
   uint64_t dq_state = _dispatch_wait_prepare(dq);
   //检查死锁,在当前线程同步到当前线程执行
   //((dq_state ^ dsc->dsc_waiter) & DLOCK_OWNER_MASK) == 0
   if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
      DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
            "dispatch_sync called on queue "
            "already owned by current thread");
   }

   // Blocks submitted to the main thread MUST run on the main thread, and
   // dispatch_async_and_wait also executes on the remote context rather than
   // the current thread.
   //
   // For both these cases we need to save the frame linkage for the sake of
   // _dispatch_async_and_wait_invoke
   //
   //注释中说明:提交到主线程的任务,必须在主线程执行
   //dispatch_async_and_wait提交的任务,可能不是在当前线程执行,ps:同步执行block时,如果要严格保证执行任务的队列属性,使用dispatch_async_and_wait
   //换句话说,dispatch_sync提交的任务(除了提交到主线程),都是在当前线程执行
   //为可能在非当前线程执行的任务,保存frame信息
   _dispatch_thread_frame_save_state(&dsc->dsc_dtf);
   //根据dq_state判断等待提交任务执行完成的方式,具体策略没搞清楚2333
   if (_dq_state_is_suspended(dq_state) ||
         _dq_state_is_base_anon(dq_state)) {
      dsc->dc_data = DISPATCH_WLH_ANON;
   } else if (_dq_state_is_base_wlh(dq_state)) {
      dsc->dc_data = (dispatch_wlh_t)dq;
   } else {
      _dispatch_wait_compute_wlh(upcast(dq)._dl, dsc);
   }

   if (dsc->dc_data == DISPATCH_WLH_ANON) {
      dsc->dsc_override_qos_floor = dsc->dsc_override_qos =
            (uint8_t)_dispatch_get_basepri_override_qos_floor();
      //在dsc->dsc_event->dte_sema设置了一个信号量
      _dispatch_thread_event_init(&dsc->dsc_event);
   }
   //将当前提交任务及上下文压进队列
   //(&(dq)->do_vtable->_os_obj_vtable)->dq_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority))
   dx_push(dq, dsc, _dispatch_qos_from_pp(dsc->dc_priority));
   _dispatch_trace_runtime_event(sync_wait, dq, 0);
   if (dsc->dc_data == DISPATCH_WLH_ANON) {
      //等待信号量
      _dispatch_thread_event_wait(&dsc->dsc_event); // acquire
   } else if (!dsc->dsc_wlh_self_wakeup) {
      //基于dispatch_kevent
      _dispatch_event_loop_wait_for_ownership(dsc);
   }
   if (dsc->dc_data == DISPATCH_WLH_ANON) {
      //销毁信号量
      _dispatch_thread_event_destroy(&dsc->dsc_event);
      // If _dispatch_sync_waiter_wake() gave this thread an override,
      // ensure that the root queue sees it.
      if (dsc->dsc_override_qos > dsc->dsc_override_qos_floor) {
         _dispatch_set_basepri_override_qos(dsc->dsc_override_qos);
      }
   }
}

============= _dispatch_sync_invoke_and_complete_recurse =============
static void
_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq,
      void *ctxt, dispatch_function_t func, uintptr_t dc_flags
      DISPATCH_TRACE_ARG(void *dc))
{
   //执行任务
   _dispatch_sync_function_invoke_inline(dq, ctxt, func);
   _dispatch_trace_item_complete(dc);
   _dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags);
}

static inline void
_dispatch_sync_function_invoke_inline(dispatch_queue_class_t dq, void *ctxt,
      dispatch_function_t func)
{
   dispatch_thread_frame_s dtf;
   _dispatch_thread_frame_push(&dtf, dq);
   _dispatch_client_callout(ctxt, func);
   _dispatch_perfmon_workitem_inc();
   _dispatch_thread_frame_pop(&dtf);
}

void
_dispatch_client_callout(void *ctxt, dispatch_function_t f)
{
   _dispatch_get_tsd_base();
   void *u = _dispatch_get_unwind_tsd();
   if (likely(!u)) return f(ctxt);
   _dispatch_set_unwind_tsd(NULL);
   f(ctxt);
   _dispatch_free_unwind_tsd();
   _dispatch_set_unwind_tsd(u);
}

static void
_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq,
      uintptr_t dc_flags)
{
   //barrier为true
   bool barrier = (dc_flags & DC_FLAG_BARRIER);
   do {
      if (dq == stop_dq) return;
      if (barrier) {
         //_dispatch_lane_wakeup唤醒队列,执行下一个任务,为下个任务设置上下文,唤醒等待线程
         dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE);
      } else {
         _dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0);
      }
      dq = dq->do_targetq;
      barrier = (dq->dq_width == 1);
   } while (unlikely(dq->do_targetq));
}

上锁成功(队列中没有任务在执行)

//_dispatch_barrier_sync_f_inline
if (unlikely(dl->do_targetq->do_targetq)) {
      return _dispatch_sync_recurse(dl, ctxt, func,
            DC_FLAG_BARRIER | dc_flags);
   }
   _dispatch_introspection_sync_begin(dl);
   _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
         DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
               dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}

============= _dispatch_sync_invoke_and_complete_recurse =============

_dispatch_lane_barrier_sync_invoke_and_complete(dispatch_lane_t dq,
      void *ctxt, dispatch_function_t func DISPATCH_TRACE_ARG(void *dc))
{
    //执行任务
   _dispatch_sync_function_invoke_inline(dq, ctxt, func);
   _dispatch_trace_item_complete(dc);
   //队列链表还有没有处理的任务,或者是并行队列
   if (unlikely(dq->dq_items_tail || dq->dq_width > 1)) {
      //唤醒队列,处理下一个任务
      return _dispatch_lane_barrier_complete(dq, 0, 0);
   }

   // Presence of any of these bits requires more work that only
   // _dispatch_*_barrier_complete() handles properly
   //
   // Note: testing for RECEIVED_OVERRIDE or RECEIVED_SYNC_WAIT without
   // checking the role is sloppy, but is a super fast check, and neither of
   // these bits should be set if the lock was never contended/discovered.
   const uint64_t fail_unlock_mask = DISPATCH_QUEUE_SUSPEND_BITS_MASK |
         DISPATCH_QUEUE_ENQUEUED | DISPATCH_QUEUE_DIRTY |
         DISPATCH_QUEUE_RECEIVED_OVERRIDE |
         DISPATCH_QUEUE_RECEIVED_SYNC_WAIT;
   uint64_t old_state, new_state;
   dispatch_wakeup_flags_t flags = 0;

    //重置dq的dq_state,解锁操作
   // similar to _dispatch_queue_drain_try_unlock
   os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
      new_state  = old_state - DISPATCH_QUEUE_SERIAL_DRAIN_OWNED;
      new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
      new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
      if (unlikely(old_state & fail_unlock_mask)) {
         os_atomic_rmw_loop_give_up({
            return _dispatch_lane_barrier_complete(dq, 0, flags);
         });
      }
   });
   if (_dq_state_is_base_wlh(old_state)) {
      _dispatch_event_loop_assert_not_owned((dispatch_wlh_t)dq);
   }
}

如果是并行队列,直接在当前线程执行任务

//_dispatch_sync_f_inline

if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
      DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
   }

   dispatch_lane_t dl = upcast(dq)._dl;
   // Global concurrent queues and queues bound to non-dispatch threads
   // always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
   //按照注释
   if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
      //见上文_dispatch_sync_f_slow
      return _dispatch_sync_f_slow(dl, ctxt, func, 0, dl, dc_flags);
   }

   if (unlikely(dq->do_targetq->do_targetq)) {
      return _dispatch_sync_recurse(dl, ctxt, func, dc_flags);
   }
   _dispatch_introspection_sync_begin(dl);
   //在当前线程执行任务,重置dq_state,参考_dispatch_sync_invoke_and_complete_recurse
   _dispatch_sync_invoke_and_complete(dl, ctxt, func DISPATCH_TRACE_ARG(
         _dispatch_trace_item_sync_push_pop(dq, ctxt, func, dc_flags)));
}

============= _dispatch_queue_try_reserve_sync_width =============
static inline bool
_dispatch_queue_try_reserve_sync_width(dispatch_lane_t dq)
{
   uint64_t old_state, new_state;

   // <rdar://problem/24738102&24743140> reserving non barrier width
   // doesn't fail if only the ENQUEUED bit is set (unlike its barrier width
   // equivalent), so we have to check that this thread hasn't enqueued
   // anything ahead of this call or we can break ordering
   //队尾存在数据时直接return false
   if (unlikely(dq->dq_items_tail)) {
      return false;
   }

   return os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
      //!DISPATCH_QUEUE_IN_BARRIER(在执行BARRIER) || DISPATCH_QUEUE_DIRTY(队列链表还未初始化) || DISPATCH_QUEUE_PENDING_BARRIER(并行任务数接近width)
      if (unlikely(!_dq_state_is_sync_runnable(old_state)) ||
            _dq_state_is_dirty(old_state) ||
            _dq_state_has_pending_barrier(old_state)) {
         os_atomic_rmw_loop_give_up(return false);
      }
      new_state = old_state + DISPATCH_QUEUE_WIDTH_INTERVAL;
   });
}

总结

至此,还是没有发现与线程创建与管理的相关逻辑,大致的结论是:除了dispatch_sync到主线程的任务,一般都是在当前线程执行,并且任务是以当前线程的优先级执行的。

image

异步执行

dispatch_async

void
dispatch_async(dispatch_queue_t dq, dispatch_block_t work)
{
   //类似pthread_getspecific(dispatch_cache_key),没有获取到就创建一个
   //dispatch_continuation_t dc可以理解为对要执行的block的包装和暂存
   dispatch_continuation_t dc = _dispatch_continuation_alloc();
   uintptr_t dc_flags = DC_FLAG_CONSUME;
   dispatch_qos_t qos;

   qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
   _dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}

获取或创建continuation后,对continuation进行初始化

static inline dispatch_qos_t
_dispatch_continuation_init(dispatch_continuation_t dc,
      dispatch_queue_class_t dqu, dispatch_block_t work,
      dispatch_block_flags_t flags, uintptr_t dc_flags)
{
   void *ctxt = _dispatch_Block_copy(work);

   dc_flags |= DC_FLAG_BLOCK | DC_FLAG_ALLOCATED;
   //判断要执行的block中的invoke方法指针是否等于___dispatch_block_create_block_invoke的指针
   if (unlikely(_dispatch_block_has_private_data(work))) {
      dc->dc_flags = dc_flags;
      dc->dc_ctxt = ctxt;
      // will initialize all fields but requires dc_flags & dc_ctxt to be set
      return _dispatch_continuation_init_slow(dc, dqu, flags);
   }

   dispatch_function_t func = _dispatch_Block_invoke(work);
   if (dc_flags & DC_FLAG_CONSUME) {
      //最终调用这个方法执行block
      func = _dispatch_call_block_and_release;
   }
   return _dispatch_continuation_init_f(dc, dqu, ctxt, func, flags, dc_flags);
}


============= _dispatch_continuation_init_f =============
tatic inline dispatch_qos_t
_dispatch_continuation_init_f(dispatch_continuation_t dc,
      dispatch_queue_class_t dqu, void *ctxt, dispatch_function_t f,
      dispatch_block_flags_t flags, uintptr_t dc_flags)
{
   pthread_priority_t pp = 0;
   dc->dc_flags = dc_flags | DC_FLAG_ALLOCATED;
   dc->dc_func = f;
   dc->dc_ctxt = ctxt;
   // in this context DISPATCH_BLOCK_HAS_PRIORITY means that the priority
   // should not be propagated, only taken from the handler if it has one
   if (!(flags & DISPATCH_BLOCK_HAS_PRIORITY)) {//这里flags为0
      //优先级传播,类似pthread_getspecific(dispatch_priority_key)
      pp = _dispatch_priority_propagate();
   }
   _dispatch_continuation_voucher_set(dc, flags);//没明白
   
   return _dispatch_continuation_priority_set(dc, dqu, pp, flags);
}

============= _dispatch_continuation_priority_set =============
static inline dispatch_qos_t
_dispatch_continuation_priority_set(dispatch_continuation_t dc,
      dispatch_queue_class_t dqu,
      pthread_priority_t pp, dispatch_block_flags_t flags)
{
   dispatch_qos_t qos = DISPATCH_QOS_UNSPECIFIED;
#if HAVE_PTHREAD_WORKQUEUE_QOS//为1
   dispatch_queue_t dq = dqu._dq;

   if (likely(pp)) {
      bool enforce = (flags & DISPATCH_BLOCK_ENFORCE_QOS_CLASS);
      bool is_floor = (dq->dq_priority & DISPATCH_PRIORITY_FLAG_FLOOR);
      bool dq_has_qos = (dq->dq_priority & DISPATCH_PRIORITY_REQUESTED_MASK);
      if (enforce) {
         pp |= _PTHREAD_PRIORITY_ENFORCE_FLAG;
         qos = _dispatch_qos_from_pp_unsafe(pp);
      } else if (!is_floor && dq_has_qos) {
         pp = 0;
      } else {
         //自定义队列应该是执行这里
         qos = _dispatch_qos_from_pp_unsafe(pp);
      }
   }
   dc->dc_priority = pp;
#else
   (void)dc; (void)dqu; (void)pp; (void)flags;
#endif
   return qos;
}

将任务入队

static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
      dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
   if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
      _dispatch_trace_item_push(dqu, dc);
   }
#else
   (void)dc_flags;
#endif
   //(&(dqu._dq)->do_vtable->_os_obj_vtable)->dq_push(dqu._dq, dc, qos)
   //dx_push是一个宏,最终调用的是do_vtable下的dq_push
   return dx_push(dqu._dq, dc, qos);
}

============= dq_push =============
//大概可以将dispatch_queue_class_t理解为一个基类,其中声明了各种方法(例如这里的dq_push)
//在dispatch_queue_class_t的子类中对声明的方法做不同的实现,(这样说只是为了理解,真实的实现比较复杂)
//在init.c中可以找到相关的实现

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
   .do_type        = DISPATCH_QUEUE_SERIAL_TYPE,
   .do_dispose     = _dispatch_lane_dispose,
   .do_debug       = _dispatch_queue_debug,
   .do_invoke      = _dispatch_lane_invoke,

   .dq_activate    = _dispatch_lane_activate,
   .dq_wakeup      = _dispatch_lane_wakeup,
   .dq_push        = _dispatch_lane_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
   .do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
   .do_dispose     = _dispatch_lane_dispose,
   .do_debug       = _dispatch_queue_debug,
   .do_invoke      = _dispatch_lane_invoke,

   .dq_activate    = _dispatch_lane_activate,
   .dq_wakeup      = _dispatch_lane_wakeup,
   .dq_push        = _dispatch_lane_concurrent_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
   .do_type        = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
   .do_dispose     = _dispatch_object_no_dispose,
   .do_debug       = _dispatch_queue_debug,
   .do_invoke      = _dispatch_object_no_invoke,

   .dq_activate    = _dispatch_queue_no_activate,
   .dq_wakeup      = _dispatch_root_queue_wakeup,
   .dq_push        = _dispatch_root_queue_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_main, lane,
   .do_type        = DISPATCH_QUEUE_MAIN_TYPE,
   .do_dispose     = _dispatch_lane_dispose,
   .do_debug       = _dispatch_queue_debug,
   .do_invoke      = _dispatch_lane_invoke,

   .dq_activate    = _dispatch_queue_no_activate,
   .dq_wakeup      = _dispatch_main_queue_wakeup,
   .dq_push        = _dispatch_main_queue_push,
);

根据不同的队列类型,执行不同的入队方法

串行队列

队列中有任务在执行时,只将任务加入链表
//串行队列
void
_dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou,
      dispatch_qos_t qos)
{
   dispatch_wakeup_flags_t flags = 0;
   struct dispatch_object_s *prev;
   //(dou._dc->dc_flags & (DC_FLAG_SYNC_WAITER | DC_FLAG_ASYNC_AND_WAIT))
   //一般同步到串行队列,队列有任务正在执行时,执行这里
   if (unlikely(_dispatch_object_is_waiter(dou))) {
      return _dispatch_lane_push_waiter(dq, dou._dsc, qos);//实在太多了233
   }

   dispatch_assert(!_dispatch_object_is_global(dq));
   qos = _dispatch_queue_push_qos(dq, qos);

   // If we are going to call dx_wakeup(), the queue must be retained before
   // the item we're pushing can be dequeued, which means:
   // - before we exchange the tail if we have to override
   // - before we set the head if we made the queue non empty.
   // Otherwise, if preempted between one of these and the call to dx_wakeup()
   // the blocks submitted to the queue may release the last reference to the
   // queue when invoked by _dispatch_lane_drain. <rdar://problem/6932776>
   //设置队尾,队列为空时返回true
   prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next);
   if (unlikely(os_mpsc_push_was_empty(prev))) {
      _dispatch_retain_2_unsafe(dq);
      flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY;
   } else if (unlikely(_dispatch_queue_need_override(dq, qos))) {
      // There's a race here, _dispatch_queue_need_override may read a stale
      // dq_state value.
      //
      // If it's a stale load from the same drain streak, given that
      // the max qos is monotonic, too old a read can only cause an
      // unnecessary attempt at overriding which is harmless.
      //
      // We'll assume here that a stale load from an a previous drain streak
      // never happens in practice.
      _dispatch_retain_2_unsafe(dq);
      flags = DISPATCH_WAKEUP_CONSUME_2;
   }
   //入队
   os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next);
   if (flags) {
      //队列空闲和_dispatch_queue_need_override(dq, qos)时,需要唤醒队列
      //(&(dq)->do_vtable->_os_obj_vtable)->dq_wakeup(dq, qos, flags)
      return dx_wakeup(dq, qos, flags);
   }
}

队列中没有任务在执行时,唤醒队列执行任务
//_dispatch_lane_wakeup
void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos,
      dispatch_wakeup_flags_t flags)
{
   dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
   //同步执行时唤醒
   if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
      //这里会通过信号量或者kevent等形式唤醒等待的线程
      return _dispatch_lane_barrier_complete(dqu, qos, flags);
   }
   //检测队列是否为非空
   if (_dispatch_queue_class_probe(dqu)) {
      target = DISPATCH_QUEUE_WAKEUP_TARGET;
   }
   return _dispatch_queue_wakeup(dqu, qos, flags, target);
}

============= _dispatch_queue_wakeup =============

void
_dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos,
      dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
{
   dispatch_queue_t dq = dqu._dq;
   uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED;
   dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT);

   //{...}省略很多更新队列状态的代码
   
   if (likely((old_state ^ new_state) & enqueue)) {
      dispatch_queue_t tq;
      if (target == DISPATCH_QUEUE_WAKEUP_TARGET) {
         // the rmw_loop above has no acquire barrier, as the last block
         // of a queue asyncing to that queue is not an uncommon pattern
         // and in that case the acquire would be completely useless
         //
         // so instead use depdendency ordering to read
         // the targetq pointer.
         os_atomic_thread_fence(dependency);
         //这里获取自定义串行队列绑定的全局根队列
         tq = os_atomic_load_with_dependency_on2o(dq, do_targetq,
               (long)new_state);
      } else {
         tq = target;
      }
      dispatch_assert(_dq_state_is_enqueued(new_state));
      //将任务提交到全局队列
      return _dispatch_queue_push_queue(tq, dq, new_state);
   }
}

============= _dispatch_queue_push_queue =============
static inline void
_dispatch_queue_push_queue(dispatch_queue_t tq, dispatch_queue_class_t dq,
      uint64_t dq_state)
{
#if DISPATCH_USE_KEVENT_WORKLOOP
   if (likely(_dq_state_is_base_wlh(dq_state))) {
      _dispatch_trace_runtime_event(worker_request, dq._dq, 1);
      return _dispatch_event_loop_poke((dispatch_wlh_t)dq._dq, dq_state,
            DISPATCH_EVENT_LOOP_CONSUME_2);
   }
#endif // DISPATCH_USE_KEVENT_WORKLOOP
   _dispatch_trace_item_push(tq, dq);
   return dx_push(tq, dq, _dq_state_max_qos(dq_state));
}

并行队列

//并行队列
void
_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou,
                dispatch_qos_t qos)
{
        // <rdar://problem/24738102&24743140> reserving non barrier width
        // doesn't fail if only the ENQUEUED bit is set (unlike its barrier
        // width equivalent), so we have to check that this thread hasn't
        // enqueued anything ahead of this call or we can break ordering
        //自定义队列、全局队列为空时,执行_dispatch_continuation_redirect_push
        if (dq->dq_items_tail == NULL &&
                        !_dispatch_object_is_waiter(dou) &&
                        !_dispatch_object_is_barrier(dou) &&
                        _dispatch_queue_try_acquire_async(dq)) {
                return _dispatch_continuation_redirect_push(dq, dou, qos);
        }
        //同步执行的特殊情况、barrier、队列阻塞、全局队列等情况
        _dispatch_lane_push(dq, dou, qos);
}

============= _dispatch_continuation_redirect_push =============
static void
_dispatch_continuation_redirect_push(dispatch_lane_t dl,
      dispatch_object_t dou, dispatch_qos_t qos)
{
   if (likely(!_dispatch_object_is_redirection(dou))) {
      //设置任务的上下文信息
      dou._dc = _dispatch_async_redirect_wrap(dl, dou);
   } else if (!dou._dc->dc_ctxt) {
      // find first queue in descending target queue order that has
      // an autorelease frequency set, and use that as the frequency for
      // this continuation.
      dou._dc->dc_ctxt = (void *)
      (uintptr_t)_dispatch_queue_autorelease_frequency(dl);
   }

   dispatch_queue_t dq = dl->do_targetq;
   if (!qos) qos = _dispatch_priority_qos(dq->dq_priority);
   //将任务添加到do_targetq队列,一般情况下就是绑定的全局根队列,见_dispatch_root_queue_push
   dx_push(dq, dou, qos);
}

全局队列

//全局根队列
void
_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
      dispatch_qos_t qos)
{
#if DISPATCH_USE_KEVENT_WORKQUEUE //1
   dispatch_deferred_items_t ddi = _dispatch_deferred_items_get();
   if (unlikely(ddi && ddi->ddi_can_stash)) {//不是太明白,unlikely的话应该不用太关心
      dispatch_object_t old_dou = ddi->ddi_stashed_dou;
      dispatch_priority_t rq_overcommit;
      rq_overcommit = rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;

      if (likely(!old_dou._do || rq_overcommit)) {
         dispatch_queue_global_t old_rq = ddi->ddi_stashed_rq;
         dispatch_qos_t old_qos = ddi->ddi_stashed_qos;
         ddi->ddi_stashed_rq = rq;
         ddi->ddi_stashed_dou = dou;
         ddi->ddi_stashed_qos = qos;
         _dispatch_debug("deferring item %p, rq %p, qos %d",
               dou._do, rq, qos);
         if (rq_overcommit) {
            ddi->ddi_can_stash = false;
         }
         if (likely(!old_dou._do)) {
            return;
         }
         // push the previously stashed item
         qos = old_qos;
         rq = old_rq;
         dou = old_dou;
      }
   }
#endif
#if HAVE_PTHREAD_WORKQUEUE_QOS //1
   //如果qos与rq的qos不一致,将会重新获取根队里然后继续push操作
   if (_dispatch_root_queue_push_needs_override(rq, qos)) {
      return _dispatch_root_queue_push_override(rq, dou, qos);
   }
#else
   (void)qos;
#endif
   _dispatch_root_queue_push_inline(rq, dou, dou, 1);
}

============= _dispatch_root_queue_push_override =============
static void
_dispatch_root_queue_push_override(dispatch_queue_global_t orig_rq,
      dispatch_object_t dou, dispatch_qos_t qos)
{
   bool overcommit = orig_rq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
   dispatch_queue_global_t rq = _dispatch_get_root_queue(qos, overcommit);
   dispatch_continuation_t dc = dou._dc;
   //上文中_dispatch_async_redirect_wrap方法设置dc
   if (_dispatch_object_is_redirection(dc)) {
      // no double-wrap is needed, _dispatch_async_redirect_invoke will do
      // the right thing
      //自定义队列
      dc->dc_func = (void *)orig_rq;
   } else {
      //全局队列
      dc = _dispatch_continuation_alloc();
      dc->do_vtable = DC_VTABLE(OVERRIDE_OWNING);
      dc->dc_ctxt = dc;
      dc->dc_other = orig_rq;
      dc->dc_data = dou._do;
      dc->dc_priority = DISPATCH_NO_PRIORITY;
      dc->dc_voucher = DISPATCH_NO_VOUCHER;
   }
   _dispatch_root_queue_push_inline(rq, dc, dc, 1);
}

============= _dispatch_root_queue_push_inline =============
static inline void
_dispatch_root_queue_push_inline(dispatch_queue_global_t dq,
      dispatch_object_t _head, dispatch_object_t _tail, int n)
{
   struct dispatch_object_s *hd = _head._do, *tl = _tail._do;
   //os_mpsc_push_list队列为空时返回true
   if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) {
      return _dispatch_root_queue_poke(dq, n, 0);
   }
}

============= _dispatch_root_queue_poke =============
void
_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor)
{
   //检查队列是否为空
   if (!_dispatch_queue_class_probe(dq)) {
      return;
   }
#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL //1
   if (likely(dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE))
#endif
   {
      if (unlikely(!os_atomic_cmpxchg2o(dq, dgq_pending, 0, n, relaxed))) {
         _dispatch_root_queue_debug("worker thread request still pending "
               "for global queue: %p", dq);
         return;
      }
   }
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
   return _dispatch_root_queue_poke_slow(dq, n, floor);
}

============= _dispatch_root_queue_poke_slow =============

static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
   int remaining = n;
#if !defined(_WIN32)
   int r = ENOSYS;
#endif
   //这里初始化全局队列,会对_pthread_workqueue初始化
   _dispatch_root_queues_init();
   _dispatch_debug_root_queue(dq, __func__);
   _dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n);

#if !DISPATCH_USE_INTERNAL_WORKQUEUE //!0
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES //1
   if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)
#endif
   {
      _dispatch_root_queue_debug("requesting new worker thread for global "
            "queue: %p", dq);
      //这里会创建线程,remaining为1
      //_dispatch_priority_to_pp_prefer_fallback将队列优先级dispatch_priority_t转换为pthread_priority_t
      r = _pthread_workqueue_addthreads(remaining,
            _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));
      (void)dispatch_assume_zero(r);
      return;
   }
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
//还有些完全看不明白的代码,但是在全局队列添加任务时,不会执行到
}

在向全局队列push任务时,终于看到线程创建相关的逻辑了

确保全局队列已初始化_dispatch_root_queues_init
static inline void
_dispatch_root_queues_init(void)
{
   dispatch_once_f(&_dispatch_root_queues_pred, NULL,
         _dispatch_root_queues_init_once);
}

============= _dispatch_root_queues_init_once =============

static void
_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED)
{
   _dispatch_fork_becomes_unsafe();
#if DISPATCH_USE_INTERNAL_WORKQUEUE
   size_t i;
   for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
      _dispatch_root_queue_init_pthread_pool(&_dispatch_root_queues[i], 0,
            _dispatch_root_queues[i].dq_priority);
   }
#else
   int wq_supported = _pthread_workqueue_supported();
   int r = ENOTSUP;

   if (!(wq_supported & WORKQ_FEATURE_MAINTENANCE)) {
      DISPATCH_INTERNAL_CRASH(wq_supported,
            "QoS Maintenance support required");
   }

#if DISPATCH_USE_KEVENT_SETUP
   struct pthread_workqueue_config cfg = {
      .version = PTHREAD_WORKQUEUE_CONFIG_VERSION,//2
      .flags = 0,
      .workq_cb = 0,
      .kevent_cb = 0,
      .workloop_cb = 0,
      .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum, //队列序号位于队列中的偏移量
#if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2
      .queue_label_offs = dispatch_queue_offsets.dqo_label, //队列名位于队列中的偏移量
#endif
   };
#endif

#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunreachable-code"
   if (unlikely(!_dispatch_kevent_workqueue_enabled)) {
#if DISPATCH_USE_KEVENT_SETUP
      cfg.workq_cb = _dispatch_worker_thread2;//指定创建线程后执行的方法_dispatch_worker_thread2
      //调用pthread库的pthread_workqueue_setup方法
      r = pthread_workqueue_setup(&cfg, sizeof(cfg));
#else
       
      r = _pthread_workqueue_init(_dispatch_worker_thread2,
            offsetof(struct dispatch_queue_s, dq_serialnum), 0);
#endif // DISPATCH_USE_KEVENT_SETUP

//... ...

   if (r != 0) {
      DISPATCH_INTERNAL_CRASH((r << 16) | wq_supported,
            "Root queue initialization failed");
   }
#endif // DISPATCH_USE_INTERNAL_WORKQUEUE
}
初始化时,调用libpthread中的pthread_workqueue_setup方法启动workqueue
int
pthread_workqueue_setup(struct pthread_workqueue_config *cfg, size_t cfg_size)
{
        int rv = EBUSY;
        struct workq_dispatch_config wdc_cfg;
        size_t min_size = 0;

        if (cfg_size < sizeof(uint32_t)) {
                return EINVAL;
        }

        switch (cfg->version) {
                case 1:
                        min_size = offsetof(struct pthread_workqueue_config, queue_label_offs);
                        break;
                case 2:
                        min_size = sizeof(struct pthread_workqueue_config);
                        break;
                default:
                        return EINVAL;
                }

        if (!cfg || cfg_size < min_size) {
                return EINVAL;
        }

        if (cfg->flags & ~PTHREAD_WORKQUEUE_CONFIG_SUPPORTED_FLAGS ||
                cfg->version < PTHREAD_WORKQUEUE_CONFIG_MIN_SUPPORTED_VERSION) {
                return ENOTSUP;
        }

        if (__libdispatch_workerfunction == NULL) {
                __workq_newapi = true;

                wdc_cfg.wdc_version = WORKQ_DISPATCH_CONFIG_VERSION;
                wdc_cfg.wdc_flags = 0;
                wdc_cfg.wdc_queue_serialno_offs = cfg->queue_serialno_offs;
#if WORKQ_DISPATCH_CONFIG_VERSION >= 2
                wdc_cfg.wdc_queue_label_offs = cfg->queue_label_offs;
#endif

                // Tell the kernel about dispatch internals
                //调用内核方法,将相关配置传递到内核
                rv = (int) __workq_kernreturn(WQOPS_SETUP_DISPATCH, &wdc_cfg, sizeof(wdc_cfg), 0);
                if (rv == -1) {
                        return errno;
                } else {
                        __libdispatch_keventfunction = cfg->kevent_cb;
                        __libdispatch_workloopfunction = cfg->workloop_cb;
                        __libdispatch_workerfunction = cfg->workq_cb; //_dispatch_worker_thread2方法保存到这

                        // Prepare the kernel for workq action
                        (void)__workq_open();
                        if (__is_threaded == 0) {
                                __is_threaded = 1;
                        }

                        return 0;
                }
        }

        return rv;
}
调用内核libkernre方法__workq_kernreturn将workqueue的相关配置保存到内核
/**
 * Multiplexed call to interact with the workqueue mechanism
 */
int
workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
{
    int options = uap->options;
    int arg2 = uap->affinity;
    int arg3 = uap->prio;
    struct workqueue *wq = proc_get_wqptr(p);
    int error = 0;

    if ((p->p_lflag & P_LREGISTER) == 0) {
        return EINVAL;
    }

    switch (options) {
    //{... ...}
    //执行这里
    case WQOPS_SETUP_DISPATCH: {
        /*
         * item = pointer to workq_dispatch_config structure
         * arg2 = sizeof(item)
         */
        struct workq_dispatch_config cfg;
        bzero(&cfg, sizeof(cfg));
        //应该是将用户空间下的配置信息拷贝到内核区
        error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
        if (error) {
            break;
        }

        if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
            cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
            error = ENOTSUP;
            break;
        }

        /* Load fields from version 1 */
        //将配置中的信息同步到进程属性中
        p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;

        /* Load fields from version 2 */
        if (cfg.wdc_version >= 2) {
            p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
        }

        break;
    }
    default:
        error = EINVAL;
        break;
    }

    return error;
}
完成初始化后,调用_pthread_workqueue_addthreads向workqueue中添加线程
int
_pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
{
        int res = 0;

        if (__libdispatch_workerfunction == NULL) {
                return EPERM;
        }

#if TARGET_OS_OSX
        // <rdar://problem/37687655> Legacy simulators fail to boot
        //
        // Older sims set the deprecated _PTHREAD_PRIORITY_ROOTQUEUE_FLAG wrongly,
        // which is aliased to _PTHREAD_PRIORITY_SCHED_PRI_FLAG and that XNU
        // validates and rejects.
        //
        // As a workaround, forcefully unset this bit that cannot be set here
        // anyway.
        priority &= ~_PTHREAD_PRIORITY_SCHED_PRI_FLAG;
#endif
        //调用内核方法创建线程
        res = __workq_kernreturn(WQOPS_QUEUE_REQTHREADS, NULL, numthreads, (int)priority);
        if (res == -1) {
                res = errno;
        }
        return res;
}
调用内核libkernre方法__workq_kernreturn执行线程创建相关方法
/**
 * Multiplexed call to interact with the workqueue mechanism
 */
int
workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
{
    int options = uap->options;
    int arg2 = uap->affinity;
    int arg3 = uap->prio;
    struct workqueue *wq = proc_get_wqptr(p);
    int error = 0;

    if ((p->p_lflag & P_LREGISTER) == 0) {
        return EINVAL;
    }

    switch (options) {
    
    case WQOPS_QUEUE_REQTHREADS: {
        /*
         * arg2 = number of threads to start
         * arg3 = priority
         */
        error = workq_reqthreads(p, arg2, arg3);
        break;
    }
    //{... ...}
    default:
        error = EINVAL;
        break;
    }

    return error;
}

============= workq_reqthreads =============
/**
 * Entry point for libdispatch to ask for threads
 */
//线程创建和管理的关键方法
static int
workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp)
{
    thread_qos_t qos = _pthread_priority_thread_qos(pp);
    struct workqueue *wq = proc_get_wqptr(p);
    uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;

    if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
        qos == THREAD_QOS_UNSPECIFIED) {
        return EINVAL;
    }

    WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
        wq, reqcount, pp, 0, 0);

    workq_threadreq_t req = zalloc(workq_zone_threadreq);
    priority_queue_entry_init(&req->tr_entry);
    req->tr_state = WORKQ_TR_STATE_NEW;
    req->tr_flags = 0;
    req->tr_qos   = qos;

    if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
        req->tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
        upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
    }

    WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
        wq, workq_trace_req_id(req), req->tr_qos, reqcount, 0);

    workq_lock_spin(wq);
    do {
        if (_wq_exiting(wq)) {
            goto exiting;
        }

        /*
         * When userspace is asking for parallelism, wakeup up to (reqcount - 1)
         * threads without pacing, to inform the scheduler of that workload.
         *
         * The last requests, or the ones that failed the admission checks are
         * enqueued and go through the regular creator codepath.
         *
         * If there aren't enough threads, add one, but re-evaluate everything
         * as conditions may now have changed.
         */
         //reqcount为1
        if (reqcount > 1 && (req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) == 0) {
            //非overcommit的队列执行workq_constrained_allowance
            unpaced = workq_constrained_allowance(wq, qos, NULL, false);
            if (unpaced >= reqcount - 1) {
                unpaced = reqcount - 1;
            }
        } else {
            //unpaced=0
            unpaced = reqcount - 1;
        }

        /*
         * This path does not currently handle custom workloop parameters
         * when creating threads for parallelism.
         */
        assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
        //创建多个线程时执行下面的循环一个一个创建
        /*
         * This is a trimmed down version of workq_threadreq_bind_and_unlock()
         */
        while (unpaced > 0 && wq->wq_thidlecount) {
            struct uthread *uth;
            bool needs_wakeup;
            uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;

            if (req->tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) {
                uu_flags |= UT_WORKQ_OVERCOMMIT;
            }

            uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);

            _wq_thactive_inc(wq, qos);
            wq->wq_thscheduled_count[_wq_bucket(qos)]++;
            workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
            wq->wq_fulfilled++;

            uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
            uth->uu_save.uus_workq_park_data.thread_request = req;
            if (needs_wakeup) {
                workq_thread_wakeup(uth);
            }
            unpaced--;
            reqcount--;
        }
     //static uint32_t wq_max_threads              = WORKQUEUE_MAXTHREADS;512
    //static uint32_t wq_max_constrained_threads  = WORKQUEUE_MAXTHREADS / 8;
    //wq_max_threads为wq中线程数量的最大值 wq_max_threads=512
    } while (unpaced && wq->wq_nthreads < wq_max_threads &&
        workq_add_new_idle_thread(p, wq));

    if (_wq_exiting(wq)) {
        goto exiting;
    }

    req->tr_count = (uint16_t)reqcount;
    //将req添加到wq的对应属性
    //wq_event_manager_threadreq(管理线程),wq_overcommit_queue(overcommit队列),wq_constrained_queue(非overcommit队列)
    if (workq_threadreq_enqueue(wq, req)) {
        /* This can drop the workqueue lock, and take it again */
        workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
    }
    workq_unlock(wq);
    return 0;

exiting:
    workq_unlock(wq);
    zfree(workq_zone_threadreq, req);
    return ECANCELED;
}

============= workq_constrained_allowance =============
static uint32_t
workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
    struct uthread *uth, bool may_start_timer)
{
    assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
    uint32_t count = 0;

    uint32_t max_count = wq->wq_constrained_threads_scheduled;
    if (uth && (uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) == 0) {
        /*
         * don't count the current thread as scheduled
         */
        assert(max_count > 0);
        max_count--;
    }
    //static uint32_t wq_max_threads              = WORKQUEUE_MAXTHREADS;
    //static uint32_t wq_max_constrained_threads  = WORKQUEUE_MAXTHREADS / 8;
    //创建非overcommit的队列线程的最大值wq_max_constrained_threads = 64
    if (max_count >= wq_max_constrained_threads) {
        WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
            wq->wq_constrained_threads_scheduled,
            wq_max_constrained_threads, 0);
        /*
         * we need 1 or more constrained threads to return to the kernel before
         * we can dispatch additional work
         */
        return 0;
    }
    max_count -= wq_max_constrained_threads;

    /*
     * Compute a metric for many how many threads are active.  We find the
     * highest priority request outstanding and then add up the number of active
     * threads in that and all higher-priority buckets.  We'll also add any
     * "busy" threads which are not currently active but blocked recently enough
     * that we can't be sure that they won't be unblocked soon and start
     * being active again.
     *
     * We'll then compare this metric to our max concurrency to decide whether
     * to add a new thread.
     */

    uint32_t busycount, thactive_count;

    thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
        at_qos, &busycount, NULL);

    if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
        at_qos <= uth->uu_workq_pri.qos_bucket) {
        /*
         * Don't count this thread as currently active, but only if it's not
         * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
         * managers.
         */
        assert(thactive_count > 0);
        thactive_count--;
    }
    //返回CPU(logical_cpu)数量,详见sched_qos_max_parallelism
    count = wq_max_parallelism[_wq_bucket(at_qos)];
    if (count > thactive_count + busycount) {
        count -= thactive_count + busycount;
        WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
            thactive_count, busycount, 0);
        return MIN(count, max_count);
    } else {
        WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
            thactive_count, busycount, 0);
    }

    if (may_start_timer) {
        /*
         * If this is called from the add timer, we won't have another timer
         * fire when the thread exits the "busy" state, so rearm the timer.
         */
        workq_schedule_delayed_thread_creation(wq, 0);
    }

    return 0;
}

============= sched_qos_max_parallelism =============

uint32_t
sched_qos_max_parallelism(__unused int qos, uint64_t options)
{
    host_basic_info_data_t hinfo;
    mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
    /* Query the machine layer for core information */
    __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO,
        (host_info_t)&hinfo, &count);
    assert(kret == KERN_SUCCESS);

    if (options & QOS_PARALLELISM_COUNT_LOGICAL) {
        return hinfo.logical_cpu;
    } else {
        return hinfo.physical_cpu;
    }
}
调用workq_schedule_creator创建线程
static void
workq_schedule_creator(proc_t p, struct workqueue *wq,
    workq_kern_threadreq_flags_t flags)
{
    workq_threadreq_t req;
    struct uthread *uth;
    bool needs_wakeup;

    workq_lock_held(wq);
    assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);

again:
    uth = wq->wq_creator; //wq_creator中保存创建的线程

    if (!wq->wq_reqcount) {
        /*
         * There is no thread request left.
         *
         * If there is a creator, leave everything in place, so that it cleans
         * up itself in workq_push_idle_thread().
         *
         * Else, make sure the turnstile state is reset to no inheritor.
         */
        if (uth == NULL) {
            workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
        }
        return;
    }

    req = workq_threadreq_select_for_creator(wq);
    if (req == NULL) {
        /*
         * There isn't a thread request that passes the admission check.
         *
         * If there is a creator, do not touch anything, the creator will sort
         * it out when it runs.
         *
         * Else, set the inheritor to "WORKQ" so that the turnstile propagation
         * code calls us if anything changes.
         */
        if (uth == NULL) {
            workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
        }
        return;
    }

    if (uth) {
        /*
         * We need to maybe override the creator we already have
         */
        //重新设置已经创建好的线程优先级
        if (workq_thread_needs_priority_change(req, uth)) {
            WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
                wq, 1, thread_tid(uth->uu_thread), req->tr_qos, 0);
            workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
        }
        assert(wq->wq_inheritor == uth->uu_thread);
    } else if (wq->wq_thidlecount) {
        /*
         * We need to unpark a creator thread
         */
        //wq中有空闲线程时,唤醒一个,重置优先级
        wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
            &needs_wakeup);
        /* Always reset the priorities on the newly chosen creator */
        workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
        workq_turnstile_update_inheritor(wq, uth->uu_thread,
            TURNSTILE_INHERITOR_THREAD);
        WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
            wq, 2, thread_tid(uth->uu_thread), req->tr_qos, 0);
        uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
        uth->uu_save.uus_workq_park_data.yields = 0;
        if (needs_wakeup) {
            workq_thread_wakeup(uth);
        }
    } else {
        /*
         * We need to allocate a thread...
         */
        if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
            /* out of threads, just go away */
            //wq中的线程数量达到了上限,直接返回
            flags = WORKQ_THREADREQ_NONE;
        } else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
            act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
        } else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
            /* This can drop the workqueue lock, and take it again */
            workq_schedule_immediate_thread_creation(wq);
        } else if (workq_add_new_idle_thread(p, wq)) {//workq_add_new_idle_thread创建线程
            //分配栈内存,将线程加入wq
            goto again;
        } else {
            workq_schedule_delayed_thread_creation(wq, 0);
        }

        /*
         * If the current thread is the inheritor:
         *
         * If we set the AST, then the thread will stay the inheritor until
         * either the AST calls workq_kern_threadreq_redrive(), or it parks
         * and calls workq_push_idle_thread().
         *
         * Else, the responsibility of the thread creation is with a thread-call
         * and we need to clear the inheritor.
         */
        if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
            wq->wq_inheritor == current_thread()) {
            workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
        }
    }
}

在线程池中的线程执行任务

上文提到在_dispatch_root_queues_init时,会指定线程池中的线程执行的方法:_dispatch_worker_thread2

调用链为: start_wqthread->_pthread_wqthread->__libdispatch_workerfunction(_dispatch_worker_thread2)

static void
_dispatch_worker_thread2(pthread_priority_t pp)
{
   bool overcommit = pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
   dispatch_queue_global_t dq;

   pp &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG | ~_PTHREAD_PRIORITY_FLAGS_MASK;
   //设置dispatch_priority_key
   _dispatch_thread_setspecific(dispatch_priority_key, (void *)(uintptr_t)pp);
   //通过优先级和overcommit获取全局队列
   dq = _dispatch_get_root_queue(_dispatch_qos_from_pp(pp), overcommit);

   _dispatch_introspection_thread_add();//将线程信息加入一个链表
   _dispatch_trace_runtime_event(worker_unpark, dq, 0);

   int pending = os_atomic_dec2o(dq, dgq_pending, relaxed);
   dispatch_assert(pending >= 0);
   //获取任务,准备执行
   _dispatch_root_queue_drain(dq, dq->dq_priority,
         DISPATCH_INVOKE_WORKER_DRAIN | DISPATCH_INVOKE_REDIRECTING_DRAIN);
   _dispatch_voucher_debug("root queue clear", NULL);
   _dispatch_reset_voucher(NULL, DISPATCH_THREAD_PARK);
   _dispatch_trace_runtime_event(worker_park, NULL, 0);
}

============= _dispatch_root_queue_drain =============

static void
_dispatch_root_queue_drain(dispatch_queue_global_t dq,
      dispatch_priority_t pri, dispatch_invoke_flags_t flags)
{
#if DISPATCH_DEBUG
   dispatch_queue_t cq;
   if (unlikely(cq = _dispatch_queue_get_current())) {
      DISPATCH_INTERNAL_CRASH(cq, "Premature thread recycling");
   }
#endif
   _dispatch_queue_set_current(dq);
   _dispatch_init_basepri(pri);
   _dispatch_adopt_wlh_anon();

   struct dispatch_object_s *item;
   bool reset = false;
   dispatch_invoke_context_s dic = { };
#if DISPATCH_COCOA_COMPAT
   _dispatch_last_resort_autorelease_pool_push(&dic);
#endif // DISPATCH_COCOA_COMPAT
   _dispatch_queue_drain_init_narrowing_check_deadline(&dic, pri);
   _dispatch_perfmon_start();
   //从队列任务链表中取任务
   while (likely(item = _dispatch_root_queue_drain_one(dq))) {
      if (reset) _dispatch_wqthread_override_reset();
      //执行任务
      _dispatch_continuation_pop_inline(item, &dic, flags, dq);
      reset = _dispatch_reset_basepri_override();
      if (unlikely(_dispatch_queue_drain_should_narrow(&dic))) {
         break;
      }
   }

   // overcommit or not. worker thread
   if (pri & DISPATCH_PRIORITY_FLAG_OVERCOMMIT) {
      _dispatch_perfmon_end(perfmon_thread_worker_oc);
   } else {
      _dispatch_perfmon_end(perfmon_thread_worker_non_oc);
   }

#if DISPATCH_COCOA_COMPAT
   _dispatch_last_resort_autorelease_pool_pop(&dic);
#endif // DISPATCH_COCOA_COMPAT
   _dispatch_reset_wlh();
   _dispatch_clear_basepri();
   _dispatch_queue_set_current(NULL);
}

============= _dispatch_continuation_pop_inline =============
static inline void
_dispatch_continuation_pop_inline(dispatch_object_t dou,
      dispatch_invoke_context_t dic, dispatch_invoke_flags_t flags,
      dispatch_queue_class_t dqu)
{
   dispatch_pthread_root_queue_observer_hooks_t observer_hooks =
         _dispatch_get_pthread_root_queue_observer_hooks();
   if (observer_hooks) observer_hooks->queue_will_execute(dqu._dq);
   flags &= _DISPATCH_INVOKE_PROPAGATE_MASK;
   //任务dou的类型是dispatch_continuation_t,没有设置vtable
   if (_dispatch_object_has_vtable(dou)) {
      dx_invoke(dou._dq, dic, flags);
   } else {
      //这里调用_dispatch_client_callout执行任务
      _dispatch_continuation_invoke_inline(dou, flags, dqu);
   }
   if (observer_hooks) observer_hooks->queue_did_execute(dqu._dq);
}

总结

后面的源码已经看得我神志不清了,如有错误,还请校正

GCD队列的线程一般是由内核方法__workq_kernreturn创建的,最大线程数量是512,非overcommit的队列的最大线程数量是64(全局队列+自定义并行队列一共最多创建64个线程)。

//WORKQUEUE_MAXTHREADS 512
static uint32_t wq_max_threads              = WORKQUEUE_MAXTHREADS;
static uint32_t wq_max_constrained_threads  = WORKQUEUE_MAXTHREADS / 8;