iOS 多线程(三):GCD源码分析中

1,235 阅读5分钟

前言

上一篇 iOS 多线程(二):GCD基础&源码分析上 中,我们对GCD源码进行了一些初步的探索,今天将对源码进行更加深入的分析。

准备

一、死锁 源码分析

看下面这段代码:

- (void)deadlock {
    // 串行队列
    dispatch_queue_t queue = dispatch_queue_create("ssl", DISPATCH_QUEUE_SERIAL);
    // 异步函数
    dispatch_async(queue, ^{
        // 同步函数
        dispatch_sync(queue, ^{
            NSLog(@"???????");
        });
    });
}
  • 这段代码会发生死锁,这与串行队列dispatch_sync有这很大的关系,下面通过源码来看看是什么原因。

进入dispatch_sync -> _dispatch_sync_f -> _dispatch_sync_f_inline

static inline void
_dispatch_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
    if (likely(dq->dq_width == 1)) {
        return _dispatch_barrier_sync_f(dq, ctxt, func, dc_flags);
    }
    ...
}
  • 因为是串行队列,所以dq->dq_width == 1成立。

进入_dispatch_barrier_sync_f

static void
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    _dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}

进入_dispatch_barrier_sync_f_inline

static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
        dispatch_function_t func, uintptr_t dc_flags)
{
    dispatch_tid tid = _dispatch_tid_self();

    if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
        DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
    }

    dispatch_lane_t dl = upcast(dq)._dl;
    if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
        return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
                DC_FLAG_BARRIER | dc_flags);
    }

    if (unlikely(dl->do_targetq->do_targetq)) {
        return _dispatch_sync_recurse(dl, ctxt, func,
                        DC_FLAG_BARRIER | dc_flags);
    }
    _dispatch_introspection_sync_begin(dl);
    _dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
                DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
                                dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}
  • 函数中有多个return,不知道会走哪儿个。

添加符号断点,运行程序:

image.png image.png

  • 可以看到是走了_dispatch_sync_f_slow中的__DISPATCH_WAIT_FOR_QUEUE__函数。

查看_dispatch_sync_f_slow

static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
        dispatch_function_t func, uintptr_t top_dc_flags,
        dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
    ...
    dispatch_queue_t dq = dqu._dq;
    
    pthread_priority_t pp = _dispatch_get_priority();
    struct dispatch_sync_context_s dsc = {
        .dc_flags    = DC_FLAG_SYNC_WAITER | dc_flags,
        .dc_func     = _dispatch_async_and_wait_invoke,
        .dc_ctxt     = &dsc,
        .dc_other    = top_dq,
        .dc_priority = pp | _PTHREAD_PRIORITY_ENFORCE_FLAG,
        .dc_voucher  = _voucher_get(),
        .dsc_func    = func,
        .dsc_ctxt    = ctxt,
        .dsc_waiter  = _dispatch_tid_self(),
    };
    // 往一个队列中 加入任务,会push加入主队列
    _dispatch_trace_item_push(top_dq, &dsc);
    __DISPATCH_WAIT_FOR_QUEUE__(&dsc, dq);
    ...
}

进入__DISPATCH_WAIT_FOR_QUEUE__

static void

__DISPATCH_WAIT_FOR_QUEUE__(dispatch_sync_context_t dsc, dispatch_queue_t dq)
{
    uint64_t dq_state = _dispatch_wait_prepare(dq);
    // 判断dq是否为正在等待的队列,然后给出一个状态state,然后将dq的状态和当前任务依赖的队列进行匹配
    if (unlikely(_dq_state_drain_locked_by(dq_state, dsc->dsc_waiter))) {
        // 这也是上面的报错信息
        DISPATCH_CLIENT_CRASH((uintptr_t)dq_state,
                        "dispatch_sync called on queue "
                        "already owned by current thread");
    }
    ...
}
  • dsc->dsc_waiter_dispatch_sync_f_slow -> .dsc_waiter = _dispatch_tid_self() -> #define _dispatch_tid_self() ((dispatch_tid)_dispatch_thread_port()) -> 当前的线程id

进入_dq_state_drain_locked_by -> _dispatch_lock_is_locked_by

static inline bool
_dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
{
    // equivalent to _dispatch_lock_owner(lock_value) == tid
    return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
}
  • DLOCK_OWNER_MASK是一个非常大的值:#define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
  • DLOCK_OWNER_MASK足够大,只要(lock_value ^ tid)不为0,整个表达式就不会为0
  • lock_valuetid相等时,(lock_value ^ tid)的值是0,也就是执行和等待的在同一队列时。

二、异步函数分析

异步函数+并发队列:

dispatch_queue_t queue = dispatch_queue_create("ssl", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(queue, ^{
    NSLog(@"SSL 函数分析");
});

进入dispatch_async -> _dispatch_continuation_async -> dx_push -> dq_push

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
    .do_type        = DISPATCH_QUEUE_SERIAL_TYPE,
    .do_dispose     = _dispatch_lane_dispose,
    .do_debug       = _dispatch_queue_debug,
    .do_invoke      = _dispatch_lane_invoke,

    .dq_activate    = _dispatch_lane_activate,
    .dq_wakeup      = _dispatch_lane_wakeup,
    .dq_push        = _dispatch_lane_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
    .do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
    .do_dispose     = _dispatch_lane_dispose,
    .do_debug       = _dispatch_queue_debug,
    .do_invoke      = _dispatch_lane_invoke,

    .dq_activate    = _dispatch_lane_activate,
    .dq_wakeup      = _dispatch_lane_wakeup,
    .dq_push        = _dispatch_lane_concurrent_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
    .do_type        = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
    .do_dispose     = _dispatch_object_no_dispose,
    .do_debug       = _dispatch_queue_debug,
    .do_invoke      = _dispatch_object_no_invoke,

    .dq_activate    = _dispatch_queue_no_activate,
    .dq_wakeup      = _dispatch_root_queue_wakeup,
    .dq_push        = _dispatch_root_queue_push,
);
  • _dispatch_root_queue_push是全局并发队列的赋值,上一篇 我们有做过分析,_dispatch_lane_concurrent_push是普通并发队列的赋值,我们接下来分析这个。

进入_dispatch_lane_concurrent_push

void
_dispatch_lane_concurrent_push(dispatch_lane_t dq, dispatch_object_t dou,
		dispatch_qos_t qos)
{
    // <rdar://problem/24738102&24743140> reserving non barrier width
    // doesn't fail if only the ENQUEUED bit is set (unlike its barrier
    // width equivalent), so we have to check that this thread hasn't
    // enqueued anything ahead of this call or we can break ordering
    if (dq->dq_items_tail == NULL &&
            !_dispatch_object_is_waiter(dou) &&
            !_dispatch_object_is_barrier(dou) &&
            _dispatch_queue_try_acquire_async(dq)) {
        return _dispatch_continuation_redirect_push(dq, dou, qos);
    }

    _dispatch_lane_push(dq, dou, qos);
}

进入_dispatch_lane_push

DISPATCH_NOINLINE
void
_dispatch_lane_push(dispatch_lane_t dq, dispatch_object_t dou,
		dispatch_qos_t qos)
{
    dispatch_wakeup_flags_t flags = 0;
    struct dispatch_object_s *prev;

    if (unlikely(_dispatch_object_is_waiter(dou))) {
        return _dispatch_lane_push_waiter(dq, dou._dsc, qos);
    }

    dispatch_assert(!_dispatch_object_is_global(dq));
    qos = _dispatch_queue_push_qos(dq, qos);

    // If we are going to call dx_wakeup(), the queue must be retained before
    // the item we're pushing can be dequeued, which means:
    // - before we exchange the tail if we have to override
    // - before we set the head if we made the queue non empty.
    // Otherwise, if preempted between one of these and the call to dx_wakeup()
    // the blocks submitted to the queue may release the last reference to the
    // queue when invoked by _dispatch_lane_drain. <rdar://problem/6932776>

    prev = os_mpsc_push_update_tail(os_mpsc(dq, dq_items), dou._do, do_next);
    if (unlikely(os_mpsc_push_was_empty(prev))) {
        _dispatch_retain_2_unsafe(dq);
        flags = DISPATCH_WAKEUP_CONSUME_2 | DISPATCH_WAKEUP_MAKE_DIRTY;
    } else if (unlikely(_dispatch_queue_need_override(dq, qos))) {
        // There's a race here, _dispatch_queue_need_override may read a stale
        // dq_state value.
        //
        // If it's a stale load from the same drain streak, given that
        // the max qos is monotonic, too old a read can only cause an
        // unnecessary attempt at overriding which is harmless.
        //
        // We'll assume here that a stale load from an a previous drain streak
        // never happens in practice.
        _dispatch_retain_2_unsafe(dq);
        flags = DISPATCH_WAKEUP_CONSUME_2;
    }
    os_mpsc_push_update_prev(os_mpsc(dq, dq_items), prev, dou._do, do_next);
    if (flags) {
        return dx_wakeup(dq, qos, flags);
    }
}
  • 可以看到有_dispatch_lane_push_waiterdx_wakeup两个return

查看dx_wakeup

#define dx_wakeup(x, y, z) dx_vtable(x)->dq_wakeup(x, y, z)

查看dq_wakeup

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
    .do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
    .do_dispose     = _dispatch_lane_dispose,
    .do_debug       = _dispatch_queue_debug,
    .do_invoke      = _dispatch_lane_invoke,

    .dq_activate    = _dispatch_lane_activate,
    .dq_wakeup      = _dispatch_lane_wakeup,
    .dq_push        = _dispatch_lane_concurrent_push,
);

添加_dispatch_lane_push_waiter_dispatch_lane_wakeup符号断点,运行程序:

image.png

  • 可以看到程序最终是调用了_dispatch_lane_wakeup函数。

进入_dispatch_lane_wakeup

void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos,
		dispatch_wakeup_flags_t flags)
{
    dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;

    if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
        return _dispatch_lane_barrier_complete(dqu, qos, flags);
    }
    if (_dispatch_queue_class_probe(dqu)) {
        target = DISPATCH_QUEUE_WAKEUP_TARGET;
    }
    return _dispatch_queue_wakeup(dqu, qos, flags, target);
}

进入_dispatch_queue_wakeup

void
_dispatch_queue_wakeup(dispatch_queue_class_t dqu, dispatch_qos_t qos,
		dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target)
{
    dispatch_queue_t dq = dqu._dq;
    uint64_t old_state, new_state, enqueue = DISPATCH_QUEUE_ENQUEUED;
    dispatch_assert(target != DISPATCH_QUEUE_WAKEUP_WAIT_FOR_EVENT);

    if (target && !(flags & DISPATCH_WAKEUP_CONSUME_2)) {
        _dispatch_retain_2(dq);
        flags |= DISPATCH_WAKEUP_CONSUME_2;
    }

    if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
        dispatch_assert(dx_metatype(dq) == _DISPATCH_SOURCE_TYPE);
        qos = _dispatch_queue_wakeup_qos(dq, qos);
        return _dispatch_lane_class_barrier_complete(upcast(dq)._dl, qos,
                        flags, target, DISPATCH_QUEUE_SERIAL_DRAIN_OWNED);
    }
    ...
}

进入 _dispatch_lane_class_barrier_complete

static void
_dispatch_lane_class_barrier_complete(dispatch_lane_t dq, dispatch_qos_t qos,
		dispatch_wakeup_flags_t flags, dispatch_queue_wakeup_target_t target,
		uint64_t owned)
{

again:
    os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
        if (unlikely(_dq_state_needs_ensure_ownership(old_state))) {
            _dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq);
            _dispatch_queue_move_to_contended_sync(dq->_as_dq);
            os_atomic_rmw_loop_give_up(goto again);
        }
        new_state  = _dq_state_merge_qos(old_state - owned, qos);
        new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
        if (unlikely(_dq_state_is_suspended(old_state))) {
            if (likely(_dq_state_is_base_wlh(old_state))) {
                new_state &= ~DISPATCH_QUEUE_ENQUEUED;
            }
        } else if (enqueue) {
            if (!_dq_state_is_enqueued(old_state)) {
                new_state |= enqueue;
            }
        } else if (unlikely(_dq_state_is_dirty(old_state))) {
            os_atomic_rmw_loop_give_up({
                os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire);
                flags |= DISPATCH_WAKEUP_BARRIER_COMPLETE;
                return dx_wakeup(dq, qos, flags);
            });
        } else {
            new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
        }
    });
    
    if (tq) {
        if (likely((old_state ^ new_state) & enqueue)) {
            dispatch_assert(_dq_state_is_enqueued(new_state));
            dispatch_assert(flags & DISPATCH_WAKEUP_CONSUME_2);
            return _dispatch_queue_push_queue(tq, dq, new_state);
        }
    }
    ...
}

进入_dispatch_root_queue_push

void
_dispatch_root_queue_push(dispatch_queue_global_t rq, dispatch_object_t dou,
		dispatch_qos_t qos)
{
    ...
    _dispatch_root_queue_push_inline(rq, dou, dou, 1);
}

进入_dispatch_root_queue_push_inline

static inline void
_dispatch_root_queue_push_inline(dispatch_queue_global_t dq,
		dispatch_object_t _head, dispatch_object_t _tail, int n)
{
    struct dispatch_object_s *hd = _head._do, *tl = _tail._do;
    if (unlikely(os_mpsc_push_list(os_mpsc(dq, dq_items), hd, tl, do_next))) {
            return _dispatch_root_queue_poke(dq, n, 0);
    }
}

进入_dispatch_root_queue_poke

void
_dispatch_root_queue_poke(dispatch_queue_global_t dq, int n, int floor)
{
    return _dispatch_root_queue_poke_slow(dq, n, floor);
}

进入_dispatch_root_queue_poke_slow

static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
    _dispatch_root_queues_init();
    ...
}

进入_dispatch_root_queues_init

static inline void
_dispatch_root_queues_init(void)
{
    dispatch_once_f(&_dispatch_root_queues_pred, NULL,
                    _dispatch_root_queues_init_once);
}

进入_dispatch_root_queues_init_once

static void
_dispatch_root_queues_init_once(void *context DISPATCH_UNUSED)
{
    _dispatch_fork_becomes_unsafe();
#if DISPATCH_USE_INTERNAL_WORKQUEUE
    size_t i;
    // 遍历线程池,线程池的一些相关操作
    for (i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
            _dispatch_root_queue_init_pthread_pool(&_dispatch_root_queues[i], 0,
                            _dispatch_root_queues[i].dq_priority);
    }
#else
    int wq_supported = _pthread_workqueue_supported();
    int r = ENOTSUP;

#if DISPATCH_USE_KEVENT_SETUP
    // 工作队列配置相关
    struct pthread_workqueue_config cfg = {
        .version = PTHREAD_WORKQUEUE_CONFIG_VERSION,
        .flags = 0,
        .workq_cb = 0,
        .kevent_cb = 0,
        .workloop_cb = 0,
        .queue_serialno_offs = dispatch_queue_offsets.dqo_serialnum,
#if PTHREAD_WORKQUEUE_CONFIG_VERSION >= 2
            .queue_label_offs = dispatch_queue_offsets.dqo_label,
#endif
    };
#endif

    cfg.workq_cb = _dispatch_worker_thread2;
    r = pthread_workqueue_setup(&cfg, sizeof(cfg));
#else
    r = _pthread_workqueue_init(_dispatch_worker_thread2,
                offsetof(struct dispatch_queue_s, dq_serialnum), 0);
#endif // DISPATCH_USE_KEVENT_SETUP
#if DISPATCH_USE_KEVENT_WORKLOOP
    } else if (wq_supported & WORKQ_FEATURE_WORKLOOP) {
#if DISPATCH_USE_KEVENT_SETUP
        cfg.workq_cb = _dispatch_worker_thread2;
        cfg.kevent_cb = (pthread_workqueue_function_kevent_t) _dispatch_kevent_worker_thread;
        cfg.workloop_cb = (pthread_workqueue_function_workloop_t) _dispatch_workloop_worker_thread;
        r = pthread_workqueue_setup(&cfg, sizeof(cfg));
#else
        // 通过 pthread 进行封装,根据OS决定什么时候执行,受cpu调控处理,并不会立即执行
        r = _pthread_workqueue_init_with_workloop(_dispatch_worker_thread2,
                (pthread_workqueue_function_kevent_t)
                _dispatch_kevent_worker_thread,
                (pthread_workqueue_function_workloop_t)
                _dispatch_workloop_worker_thread,
                offsetof(struct dispatch_queue_s, dq_serialnum), 0);
#endif // DISPATCH_USE_KEVENT_SETUP
#endif // DISPATCH_USE_KEVENT_WORKLOOP
}

_dispatch_root_queues_init封装完毕,继续回到_dispatch_root_queue_poke_slow

static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
    int remaining = n;
    
    _dispatch_root_queues_init();
    _dispatch_debug_root_queue(dq, __func__);
    _dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n);
    
#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
    // global 类型的处理
    if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)
#endif
    {
        // 创建一个线程去执行
        _dispatch_root_queue_debug("requesting new worker thread for global "
                        "queue: %p", dq);
        r = _pthread_workqueue_addthreads(remaining,
                        _dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));
        (void)dispatch_assume_zero(r);
        return;
    }
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL

    dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt;
    if (likely(pqc->dpq_thread_mediator.do_vtable)) {
        while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) {
            _dispatch_root_queue_debug("signaled sleeping worker for "
                        "global queue: %p", dq);
            if (!--remaining) {
                return;
            }
        }
    }

    int can_request, t_count;
    // seq_cst with atomic store to tail <rdar://problem/16932833>
    // 计算队列,可以使用的线程数,dgq_thread_pool_size 是不断变化的
    t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered);
    do {
        // floor 为 0 或 1
        can_request = t_count < floor ? 0 : t_count - floor;
        // remaining 是传值过来的为1
        if (remaining > can_request) {
            // 报异常
            _dispatch_root_queue_debug("pthread pool reducing request from %d to %d",
                            remaining, can_request);
            os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed);
            remaining = can_request;
    }
    if (remaining == 0) {
            _dispatch_root_queue_debug("pthread pool is full for root queue: "
                    "%p", dq);
            return;
        }
        // 线程池的一些处理,dgq_thread_pool_size 的初始值为1
    } while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count,
            t_count - remaining, &t_count, acquire));
    
#if !defined(_WIN32)
    pthread_attr_t *attr = &pqc->dpq_thread_attr;
    pthread_t tid, *pthr = &tid;
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
    if (unlikely(dq == &_dispatch_mgr_root_queue)) {
            pthr = _dispatch_mgr_root_queue_init();
    }
#endif
    do {
        _dispatch_retain(dq); // released in _dispatch_worker_thread
        while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
            if (r != EAGAIN) {
                (void)dispatch_assume_zero(r);
            }
            _dispatch_temporary_resource_shortage();
        }
    } while (--remaining);
#else // defined(_WIN32)
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
	if (unlikely(dq == &_dispatch_mgr_root_queue)) {
            _dispatch_mgr_root_queue_init();
	}
#endif
    do {
        _dispatch_retain(dq); // released in _dispatch_worker_thread
    #if DISPATCH_DEBUG
        unsigned dwStackSize = 0;
    #else
        unsigned dwStackSize = 64 * 1024;
    #endif
        uintptr_t hThread = 0;
        while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) {
            if (errno != EAGAIN) {
                (void)dispatch_assume(hThread);
            }
            _dispatch_temporary_resource_shortage();
        }
    #if DISPATCH_USE_PTHREAD_ROOT_QUEUES
        if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) {
            (void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE);
        }
    #endif
        CloseHandle((HANDLE)hThread);
    } while (--remaining);
#endif // defined(_WIN32)
#else
    (void)floor;
#endif // DISPATCH_USE_PTHREAD_POOL
    ...
}

查看dgq_thread_pool_size的赋值:

image.png

找到DISPATCH_WORKQ_MAX_PTHREAD_COUNT的定义:

#ifndef DISPATCH_WORKQ_MAX_PTHREAD_COUNT
#define DISPATCH_WORKQ_MAX_PTHREAD_COUNT 255
#endif
  • 255表示理论上线程池的最大数量。

打开 官方文档 看下面这个表格:

image.png

  • 一个辅助线程的栈空间是512KB,而一个线程所占用的最小空间是16KB,也就是说栈空间一定的情况下,开辟线程所需的内存越大,所能开辟的线程数就越小。
  • 假设一个4GB内存的iOS系统,内存分为内核态和用户态,如果内核态全部用于创建线程,也就是1GB的空间,那么理论上最多能开辟1024KB / 16KB个线程。

三、单例底层原理

单例的使用:

static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
    
});

进入_dispatch_once

void
dispatch_once(dispatch_once_t *val, dispatch_block_t block)
{
    dispatch_once_f(val, block, _dispatch_Block_invoke(block));
}

进入dispatch_once_f

void
dispatch_once_f(dispatch_once_t *val, void *ctxt, dispatch_function_t func)
{
    dispatch_once_gate_t l = (dispatch_once_gate_t)val;

#if !DISPATCH_ONCE_INLINE_FASTPATH || DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    uintptr_t v = os_atomic_load(&l->dgo_once, acquire);
    // 如果为 DLOCK_ONCE_DONE 就return,说明已经执行过
    if (likely(v == DLOCK_ONCE_DONE)) {
        return;
    }
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    if (likely(DISPATCH_ONCE_IS_GEN(v))) {
        return _dispatch_once_mark_done_if_quiesced(l, v);
    }
#endif
#endif
    // 没有在执行的,加锁执行操作
    if (_dispatch_once_gate_tryenter(l)) {
        return _dispatch_once_callout(l, ctxt, func);
    }
    // 有在执行的,等待
    return _dispatch_once_wait(l);
}

进入_dispatch_once_gate_tryenter

static inline bool
_dispatch_once_gate_tryenter(dispatch_once_gate_t l)
{
    // 进行原子操作,防止多线程
    return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
                (uintptr_t)_dispatch_lock_value_for_self(), relaxed);
}

进入_dispatch_once_callout

static void
_dispatch_once_callout(dispatch_once_gate_t l, void *ctxt,
            dispatch_function_t func)
{
    // 执行操作
    _dispatch_client_callout(ctxt, func);
    // 处理完成,关门处理
    _dispatch_once_gate_broadcast(l);
}

进入_dispatch_once_gate_broadcast

static inline void
_dispatch_once_gate_broadcast(dispatch_once_gate_t l)
{
    dispatch_lock value_self = _dispatch_lock_value_for_self();
    uintptr_t v;
#if DISPATCH_ONCE_USE_QUIESCENT_COUNTER
    v = _dispatch_once_mark_quiescing(l);
#else
    v = _dispatch_once_mark_done(l);
#endif
    if (likely((dispatch_lock)v == value_self)) return;
    _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)v);
}

进入_dispatch_once_mark_done

static inline uintptr_t
_dispatch_once_mark_done(dispatch_once_gate_t dgo)
{
    // 标记为 DLOCK_ONCE_DONE
    return os_atomic_xchg(&dgo->dgo_once, DLOCK_ONCE_DONE, release);
}

对应到dispatch_once_f中的判断:

image.png