“这是我参与8月更文挑战的第4天,活动详情查看:8月更文挑战”
一.课程答疑
- (void)wbinterDemo{
dispatch_queue_t queue = dispatch_queue_create("com.demo.test", DISPATCH_QUEUE_SERIAL);
dispatch_async(queue, ^{
NSLog(@"1");
});
dispatch_async(queue, ^{
NSLog(@"2");
});
dispatch_sync(queue, ^{ NSLog(@"3"); });
NSLog(@"0");
dispatch_async(queue, ^{
NSLog(@"7");
});
dispatch_async(queue, ^{
NSLog(@"8");
});
dispatch_async(queue, ^{
NSLog(@"9");
});
// A: 1230789
// B: 1237890
// C: 3120798
// D: 2137890
}
上面的代码打印顺序是选择A
因为串行队列相当于把异步变成同步按顺序执行 dqf_width = 1
保证FIFO
异步函数串行队列:开启一条新线程 任务一个接着一个
- (void)wbinterDemo{
dispatch_queue_t queue = dispatch_queue_create("com.demo.test", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(queue, ^{
NSLog(@"1");
});
dispatch_async(queue, ^{
NSLog(@"2");
});
dispatch_sync(queue, ^{ NSLog(@"3"); });
NSLog(@"0");
dispatch_async(queue, ^{
NSLog(@"7");
});
dispatch_async(queue, ^{
NSLog(@"8");
});
dispatch_async(queue, ^{
NSLog(@"9");
});
// A: 1230789
// B: 1237890
// C: 3120798
// D: 2137890
}
如果把串行改为并行的话答案选择AC
因为同步阻塞3
肯定在0
之前 123
无序 789
无序
异步函数并发队列:开启线程,在当前线程执行任务 任务异步执行,没有顺序,CPU
调度有关
主队列与全局队列
死锁现象
- 主线程因为你同步函数的原因等着先执⾏任务
- 主队列等着主线程的任务执⾏完毕再执⾏⾃⼰的任务
- 主队列和主线程相互等待会造成死锁
二.同步函数死锁
同步函数和异步函数的区别
能否开辟线程
任务的回调是否具备异步性-同步性
同步串行死锁底层源码分析
查找dispatch_syn
查找_dispatch_sync_f
查找_dispatch_sync_f_inline
先看一下_dispatch_barrier_sync_f
查找_dispatch_barrier_sync_f_inline
我们运行一个demo 发现死锁
死锁最后执行的堆栈先是_dispatch_sync_f_slow
最后执行的是 __DISPATCH_WAIT_FOR_QUEUE__
所以分析刚刚查看的_dispatch_barrier_sync_f_inline
应该走的是 _dispatch_sync_f_slow
查找一下
然后查找 __DISPATCH_WAIT_FOR_QUEUE__
看着里面的日志 和我们 崩溃的死锁最后执行__DISPATCH_WAIT_FOR_QUEUE__
的截图日志是一样的 意味着死锁就在这里 你调用的队列被当前线程持有
我们先查看一下 dsc->dsc_waiter
我们查看一下 _dispatch_tid_self()
是tid
我们查看一下 _dq_state_drain_locked_by
我们查看一下 _dispatch_lock_is_locked_by
lock_value ^ tid = 0 & DLOCK_OWNER_MASK
才等于0
lock_value = tid
才等于0
dq_state
和 dsc->dsc_waiter
代表这两个值相同才发生死锁 这就是死锁的探究流程
三.同步函数任务同步
同步全局队列并发底层源码分析
我们先看一个细节_dispatch_sync_invoke_and_complete
这个方法传参以func
开头的这个为什么这么写
看一下 DISPATCH_TRACE_ARG
把逗号封装到这里 可选
并发到底走的是 _dispatch_sync_f_slow
还是 _dispatch_sync_recurse
接下来我们用个demo设置这两个系统断点看怎么走的
运行看结果
然后又走到 _dispatch_sync_function_invoke
然后走_dispatch_sync_function_invoke_inline
然后_dispatch_client_callout
回调
同步并发队列是这么走的流程
四.异步函数分析上
能否开辟线程
函数的异步性
异步并发
查找dispatch_async
进入 _dispatch_continuation_async
进入 dx_push
进入 dq_push
进入 _dispatch_lane_concurrent_push
进入_dispatch_lane_push
进入 dx_wakeup
进入 dq_wakeup
进入 _dispatch_lane_wakeup
进入 _dispatch_lane_barrier_complete
进入 _dispatch_lane_class_barrier_complete
os_atomic_rmw_loop2o
递归执行
_dispatch_root_queue_push
这里是怎么进入的
进入 _dispatch_lane_concurrent_push
进入 _dispatch_continuation_redirect_pus
h 这里的 do_targetq
变更为queue_pthread_root
进入dx_push
->dq_push
找到 _dispatch_root_queue_push
进入_dispatch_root_queue_push_inline
进入 _dispatch_root_queue_poke
进入_dispatch_root_queue_poke_slow
接下来我们看一下 _dispatch_root_queues_init
五.单例底层原理
我们平时用的gcd单例
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
});
接下来我们探索一下dispatch_once
进入dispatch_once_f
_dispatch_once_gate_tryenter
上锁
进入_dispatch_once_callout
_dispatch_client_callout
回调block
_dispatch_once_gate_broadcast
解锁
_dispatch_once_mark_done
设置DLOCK_ONCE_DONE
_dispatch_once_wait
等待检测 DLOCK_ONCE_DONE
六.异步函数分析下
单例了解完 我们继续之前的异步代码分析
进入_dispatch_root_queue_poke_slow
接下来我们看一下 _dispatch_root_queues_init
让_dispatch_root_queues_init_once
执行一次 这个任务是包装在 _dispatch_worker_thread2
这里,其实包装在pthread
中API
中,GCD
是封装了pthread
。
这里有 _pthread_workqueue_init_with_workloop
工作循环调起,并不是及时调用的,受我们当前的OS
管控。
DISPATCH_NOINLINE
static void
_dispatch_root_queue_poke_slow(dispatch_queue_global_t dq, int n, int floor)
{
//默认传1 全局并发队列创建1个线程
int remaining = n;
#if !defined(_WIN32)
int r = ENOSYS;
#endif
_dispatch_root_queues_init();
_dispatch_debug_root_queue(dq, __func__);
_dispatch_trace_runtime_event(worker_request, dq, (uint64_t)n);
#if !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (dx_type(dq) == DISPATCH_QUEUE_GLOBAL_ROOT_TYPE)
#endif
{
//全局GLOBAL队列 创建线程
_dispatch_root_queue_debug("requesting new worker thread for global "
"queue: %p", dq);
r = _pthread_workqueue_addthreads(remaining,
_dispatch_priority_to_pp_prefer_fallback(dq->dq_priority));
(void)dispatch_assume_zero(r);
return;
}
#endif // !DISPATCH_USE_INTERNAL_WORKQUEUE
#if DISPATCH_USE_PTHREAD_POOL
dispatch_pthread_root_queue_context_t pqc = dq->do_ctxt;
if (likely(pqc->dpq_thread_mediator.do_vtable)) {
while (dispatch_semaphore_signal(&pqc->dpq_thread_mediator)) {
_dispatch_root_queue_debug("signaled sleeping worker for "
"global queue: %p", dq);
if (!--remaining) {
return;
}
}
}
bool overcommit = dq->dq_priority & DISPATCH_PRIORITY_FLAG_OVERCOMMIT;
if (overcommit) {
os_atomic_add2o(dq, dgq_pending, remaining, relaxed);
} else {
if (!os_atomic_cmpxchg2o(dq, dgq_pending, 0, remaining, relaxed)) {
_dispatch_root_queue_debug("worker thread request still pending for "
"global queue: %p", dq);
return;
}
}
//如果是普通的 进入do while循环
//remaining空余的数量
int can_request, t_count;
// seq_cst with atomic store to tail <rdar://problem/16932833>
t_count = os_atomic_load2o(dq, dgq_thread_pool_size, ordered);
do {
can_request = t_count < floor ? 0 : t_count - floor;
//大于抛异常
if (remaining > can_request) {
_dispatch_root_queue_debug("pthread pool reducing request from %d to %d",
remaining, can_request);
os_atomic_sub2o(dq, dgq_pending, remaining - can_request, relaxed);
remaining = can_request;
}
//变0抛异常
if (remaining == 0) {
_dispatch_root_queue_debug("pthread pool is full for root queue: "
"%p", dq);
return;
}
} while (!os_atomic_cmpxchgv2o(dq, dgq_thread_pool_size, t_count,
t_count - remaining, &t_count, acquire));
//dgq_thread_pool_size 标记为1。
#if !defined(_WIN32)
pthread_attr_t *attr = &pqc->dpq_thread_attr;
pthread_t tid, *pthr = &tid;
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (unlikely(dq == &_dispatch_mgr_root_queue)) {
pthr = _dispatch_mgr_root_queue_init();
}
#endif
do {
_dispatch_retain(dq); // released in _dispatch_worker_thread
while ((r = pthread_create(pthr, attr, _dispatch_worker_thread, dq))) {
if (r != EAGAIN) {
(void)dispatch_assume_zero(r);
}
_dispatch_temporary_resource_shortage();
}
} while (--remaining);
#else // defined(_WIN32)
#if DISPATCH_USE_MGR_THREAD && DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (unlikely(dq == &_dispatch_mgr_root_queue)) {
_dispatch_mgr_root_queue_init();
}
#endif
do {
_dispatch_retain(dq); // released in _dispatch_worker_thread
#if DISPATCH_DEBUG
unsigned dwStackSize = 0;
#else
//到底开多大
unsigned dwStackSize = 64 * 1024;
#endif
uintptr_t hThread = 0;
while (!(hThread = _beginthreadex(NULL, dwStackSize, _dispatch_worker_thread_thunk, dq, STACK_SIZE_PARAM_IS_A_RESERVATION, NULL))) {
if (errno != EAGAIN) {
(void)dispatch_assume(hThread);
}
_dispatch_temporary_resource_shortage();
}
#if DISPATCH_USE_PTHREAD_ROOT_QUEUES
if (_dispatch_mgr_sched.prio > _dispatch_mgr_sched.default_prio) {
(void)dispatch_assume_zero(SetThreadPriority((HANDLE)hThread, _dispatch_mgr_sched.prio) == TRUE);
}
#endif
CloseHandle((HANDLE)hThread);
} while (--remaining);
#endif // defined(_WIN32)
#else
(void)floor;
#endif // DISPATCH_USE_PTHREAD_POOL
}
dgq_thread_pool_size
标记为1
DISPATCH_QUEUE_WIDTH_POOL
全局队列 比 并发大1
通过os_atomic_inc2o
自增加++
dgq_thread_pool_size
查看创建线程大小
DISPATCH_WORKQ_MAX_PTHREAD_COUNT
最大线程数
unsigned dwStackSize = 64 * 1024;
1GB = 1024*1024/16kb = 1024*64
通过代码打印bt
_dispatch_worker_thread2
->_dispatch_root_queue_drain
->_dispatch_async_redirect_invoke
->_dispatch_continuation_pop
->_dispatch_client_callout
->_dispatch_call_block_and_release