iOS底层原理(25) - GCD分析下

263 阅读5分钟

栅栏函数

dispatch_barrier_async

- (void)demo1{
    
    dispatch_queue_t concurrentQueue = dispatch_queue_create("cooci", DISPATCH_QUEUE_CONCURRENT);

//    dispatch_queue_t concurrentQueue = dispatch_get_global_queue(0, 0);

    /* 1.异步函数 */
    dispatch_async(concurrentQueue, ^{
        NSLog(@"1");
    });
    
    dispatch_async(concurrentQueue, ^{
        NSLog(@"2");
    });
    /* 2. 栅栏函数 */ // - dispatch_barrier_sync
    dispatch_barrier_async(concurrentQueue1, ^{
        NSLog(@"----3%@-----",[NSThread currentThread]);
    });
    /* 3. 异步函数 */
    dispatch_async(concurrentQueue, ^{
        NSLog(@"4");
    });
    // 4
    NSLog(@"5");
 
}

打印

5
1
2
4
----3<NSThread: 0x600001608e80>{number = 7, name = (null)}-----

dispatch_barrier_sync

将上述的dispatch_barrier_async换成dispatch_barrier_sync,查看打印:

2
1
----3<_NSMainThread: 0x6000009ac000>{number = 1, name = main}-----
5
4

dispatch_barrier_syncdispatch_barrier_async的区别

dispatch_barrier_sync:前面的任务执行完毕才会到这里 dispatch_barrier_sync:作用相同,但是这个会阻塞线程,影响后面的任务

注意:栅栏函数只能控制同一并发队列,而且全局并发队列不起作用。

- (void)demo2{
    dispatch_queue_t concurrentQueue = dispatch_queue_create("cooci", DISPATCH_QUEUE_CONCURRENT);
//    dispatch_queue_t concurrentQueue = dispatch_get_global_queue(0, 0);
    // 多线程 操作marray
    for (int i = 0; i<1000; i++) {
        dispatch_async(concurrentQueue, ^{
            NSString *imageName = [NSString stringWithFormat:@"%d.jpg", (i % 10)];
            NSURL *url = [[NSBundle mainBundle] URLForResource:imageName withExtension:nil];
            NSData *data = [NSData dataWithContentsOfURL:url];
            UIImage *image = [UIImage imageWithData:data];
            // self.mArray 多线程 地址永远是一个
            // self.mArray 0 - 1 - 2 变化
            // name = kc  getter - setter (retain release)
            // self.mArray 读 - 写  self.mArray = newaRRAY (1 2)
            // 多线程 同时  写  1: (1,2)  2: (1,2,3)  3: (1,2,4)
            // 同一时间对同一片内存空间进行操作 不安全 
            dispatch_barrier_async(concurrentQueue , ^{
                [self.mArray addObject:image];
            });
        });
    }
}

上面的代码如果不加入dispatch_barrier_async会崩溃,因为mArray的addObject,本质上是旧值的release和新值的retain,因为是多线程异步的,所以可能存在连续的release,这时retain相当于给nil赋值,发生崩溃。

栅栏函数的底层原理

dispatch_barrier_sync

void
dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work)
{
	uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK;
	if (unlikely(_dispatch_block_has_private_data(work))) {
		return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
	}
	_dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}

_dispatch_barrier_sync_f

static void
_dispatch_barrier_sync_f(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	_dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
}

_dispatch_barrier_sync_f_inline

static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	dispatch_tid tid = _dispatch_tid_self();

	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
		return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
				DC_FLAG_BARRIER | dc_flags);
	}

	if (unlikely(dl->do_targetq->do_targetq)) {
		return _dispatch_sync_recurse(dl, ctxt, func,
				DC_FLAG_BARRIER | dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);
	_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
			DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
					dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}

上面如果跟踪的话,会发现进入了如下位置: _dispatch_sync_recurse

static void
_dispatch_sync_recurse(dispatch_lane_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	dispatch_tid tid = _dispatch_tid_self();
	dispatch_queue_t tq = dq->do_targetq;

	do {
		if (likely(tq->dq_width == 1)) {
			if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) {
				return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq,
						DC_FLAG_BARRIER);
			}
		} else {
			dispatch_queue_concurrent_t dl = upcast(tq)._dl;
			if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
				return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, 0);
			}
		}
		tq = tq->do_targetq;
	} while (unlikely(tq->do_targetq));

	_dispatch_introspection_sync_begin(dq);
	_dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags
			DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
					dq, ctxt, func, dc_flags)));
}

上面的do while 里面的 _dispatch_sync_f_slow递归,也就是说要等待在栅栏函数之前的队列中的任务执行完毕,之后再进入下面的函数: _dispatch_sync_invoke_and_complete_recurse

static void
_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq,
		void *ctxt, dispatch_function_t func, uintptr_t dc_flags
		DISPATCH_TRACE_ARG(void *dc))
{
	_dispatch_sync_function_invoke_inline(dq, ctxt, func);
	_dispatch_trace_item_complete(dc);
	_dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags);
}

_dispatch_sync_complete_recurse

static void
_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq,
		uintptr_t dc_flags)
{
	bool barrier = (dc_flags & DC_FLAG_BARRIER);
	do {
		if (dq == stop_dq) return;
		if (barrier) {
			dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE);
		} else {
			_dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0);
		}
		dq = dq->do_targetq;
		barrier = (dq->dq_width == 1);
	} while (unlikely(dq->do_targetq));
}

_dispatch_lane_non_barrier_complete

static void
_dispatch_lane_non_barrier_complete(dispatch_lane_t dq,
		dispatch_wakeup_flags_t flags)
{
	uint64_t old_state, new_state, owner_self = _dispatch_lock_value_for_self();

	// see _dispatch_lane_resume()
	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, relaxed, {
		new_state = old_state - DISPATCH_QUEUE_WIDTH_INTERVAL;
		if (unlikely(_dq_state_drain_locked(old_state))) {
			// make drain_try_unlock() fail and reconsider whether there's
			// enough width now for a new item
			new_state |= DISPATCH_QUEUE_DIRTY;
		} else if (likely(_dq_state_is_runnable(new_state))) {
			new_state = _dispatch_lane_non_barrier_complete_try_lock(dq,
					old_state, new_state, owner_self);
		}
	});

	_dispatch_lane_non_barrier_complete_finish(dq, flags, old_state, new_state);
}

dx_wakeup

#define dx_wakeup(x, y, z) dx_vtable(x)->dq_wakeup(x, y, z)

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_serial, lane,
	.do_type        = DISPATCH_QUEUE_SERIAL_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
	.do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_concurrent_push,
);
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
	.do_type        = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_root_queue_wakeup,
	.dq_push        = _dispatch_root_queue_push,
);

_dispatch_root_queue_wakeup

void
_dispatch_root_queue_wakeup(dispatch_queue_global_t dq,
		DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
{
	if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) {
		DISPATCH_INTERNAL_CRASH(dq->dq_priority,
				"Don't try to wake up or override a root queue");
	}
	if (flags & DISPATCH_WAKEUP_CONSUME_2) {
		return _dispatch_release_2_tailcall(dq);
	}
}

_dispatch_lane_wakeup

void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos,
		dispatch_wakeup_flags_t flags)
{
	dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;

	if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
		return _dispatch_lane_barrier_complete(dqu, qos, flags);
	}
	if (_dispatch_queue_class_probe(dqu)) {
		target = DISPATCH_QUEUE_WAKEUP_TARGET;
	}
	return _dispatch_queue_wakeup(dqu, qos, flags, target);
}

_dispatch_lane_wakeup_dispatch_root_queue_wakeup得知,只有自定义串行队列和并发队列才会有栅栏函数,全局队列不会。

_dispatch_lane_barrier_complete

static void
_dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos,
		dispatch_wakeup_flags_t flags)
{
	dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
	dispatch_lane_t dq = dqu._dl;

	if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) {
		struct dispatch_object_s *dc = _dispatch_queue_get_head(dq);
                //串行队列
		if (likely(dq->dq_width == 1 || _dispatch_object_is_barrier(dc))) {
			if (_dispatch_object_is_waiter(dc)) {
				return _dispatch_lane_drain_barrier_waiter(dq, dc, flags, 0);
			}
                        
		} else if (dq->dq_width > 1 && !_dispatch_object_is_barrier(dc)) {//并发队列
			return _dispatch_lane_drain_non_barriers(dq, dc, flags);
		}

		if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) {
			_dispatch_retain_2(dq);
			flags |= DISPATCH_WAKEUP_CONSUME_2;
		}
		target = DISPATCH_QUEUE_WAKEUP_TARGET;
	}

	uint64_t owned = DISPATCH_QUEUE_IN_BARRIER +
			dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
	return _dispatch_lane_class_barrier_complete(dq, qos, flags, target, owned);
}

当栅栏函数都执行完成之后,会执行下面的代码: _dispatch_lane_class_barrier_complete继续下面任务的执行。

信号量

dispatch_queue_t queue = dispatch_get_global_queue(0, 0);
dispatch_semaphore_t sem = dispatch_semaphore_create(0);
//任务1
dispatch_async(queue, ^{
    dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); // 等
    NSLog(@"执行任务1");
    NSLog(@"任务1完成");
});
//任务2
dispatch_async(queue, ^{
    sleep(2);
    NSLog(@"执行任务2");
    NSLog(@"任务2完成");
    dispatch_semaphore_signal(sem); // 发信号
});

打印:

执行任务2
任务2完成
执行任务1
任务1完成

这里有三个方法很重要:

dispatch_semaphore_create //创建信号量
dispatch_semaphore_wait //信号量等待
dispatch_semaphore_signal //信号量释放

接下来一次分析一下:

dispatch_semaphore_signal

intptr_t
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
//os_atomic_inc2o 相关于 ++
	long value = os_atomic_inc2o(dsema, dsema_value, release);
	if (likely(value > 0)) {
		return 0;
	}
        
        //报出异常
	if (unlikely(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH(value,
				"Unbalanced call to dispatch_semaphore_signal()");
	}
        //异常处理
	return _dispatch_semaphore_signal_slow(dsema);
}

因为创建信号量的时候传值为0,所以这里直接 return 0;

intptr_t
_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
{
	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
	_dispatch_sema4_signal(&dsema->dsema_sema, 1);
	return 1;
}

dispatch_semaphore_create

dispatch_semaphore_t
dispatch_semaphore_create(intptr_t value)
{
	dispatch_semaphore_t dsema;

	// If the internal value is negative, then the absolute of the value is
	// equal to the number of waiting threads. Therefore it is bogus to
	// initialize the semaphore with a negative value.
	if (value < 0) {
		return DISPATCH_BAD_INPUT;
	}

	dsema = _dispatch_object_alloc(DISPATCH_VTABLE(semaphore),
			sizeof(struct dispatch_semaphore_s));
	dsema->do_next = DISPATCH_OBJECT_LISTLESS;
	dsema->do_targetq = _dispatch_get_default_queue(false);
	dsema->dsema_value = value;
	_dispatch_sema4_init(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
	dsema->dsema_orig = value;
	return dsema;
}

dispatch_semaphore_wait

intptr_t
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
        //os_atomic_dec2o  相当于 --
	long value = os_atomic_dec2o(dsema, dsema_value, acquire);
	if (likely(value >= 0)) {
		return 0;
	}
	return _dispatch_semaphore_wait_slow(dsema, timeout);
}

_dispatch_semaphore_wait_slow

static intptr_t
_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
		dispatch_time_t timeout)
{
	long orig;

	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
	switch (timeout) {
	default:
		if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {
			break;
		}
		// Fall through and try to undo what the fast path did to
		// dsema->dsema_value
	case DISPATCH_TIME_NOW:
		orig = dsema->dsema_value;
		while (orig < 0) {
			if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1,
					&orig, relaxed)) {
				return _DSEMA4_TIMEOUT();
			}
		}
		// Another thread called semaphore_signal().
		// Fall through and drain the wakeup.
	case DISPATCH_TIME_FOREVER:
		_dispatch_sema4_wait(&dsema->dsema_sema);
		break;
	}
	return 0;
}

_dispatch_sema4_wait

void
_dispatch_sema4_wait(_dispatch_sema4_t *sema)
{
	int ret = 0;
	do {
        //sem_wait 是pthread封装的代码,是个do-while循环
		ret = sem_wait(sema);
	} while (ret == -1 && errno == EINTR);
	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
}

小结:dispatch_semaphore_create信号量的值初始化为0,dispatch_semaphore_wait调用一次,信号量-1dispatch_semaphore_signal调用一次,信号量+1,当该值<0的时候,进入等待,阻塞当前线程,当>=0的时候,继续执行后面的任务。

信号量可以用来控制任务的最大并发数。

调度组

代码示例1

dispatch_group_t group = dispatch_group_create();
dispatch_queue_t queue = dispatch_get_global_queue(0, 0);
dispatch_group_async(group, queue, ^{
    NSLog(@"任务1");
});
dispatch_group_async(group, queue, ^{
    NSLog(@"任务2");
});
dispatch_group_notify(group, dispatch_get_main_queue(), ^{
    NSLog(@"任务3");
});

打印:

任务2
任务1
任务3

任务1任务2,都是异步执行,顺序不确定,但是 任务3 是需要在 任务1任务2 执行完毕之后再执行。

代码示例2

dispatch_group_async(group, queue, ^{
        NSLog(@"任务2");
    });

相当于:

dispatch_group_enter(group);
dispatch_async(group, queue, ^{
        NSLog(@"任务2");
        dispatch_group_leave(group);
    });

调度组的原理

dispatch_group_create、_dispatch_group_create_with_count

static inline dispatch_group_t
_dispatch_group_create_with_count(uint32_t n)
{
	dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group),
			sizeof(struct dispatch_group_s));
	dg->do_next = DISPATCH_OBJECT_LISTLESS;
	dg->do_targetq = _dispatch_get_default_queue(false);
	if (n) {
		os_atomic_store2o(dg, dg_bits,
				(uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed);
		os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // <rdar://22318411>
	}
	return dg;
}

dispatch_group_t
dispatch_group_create(void)
{
	return _dispatch_group_create_with_count(0);
}

dispatch_group_enter

void
dispatch_group_enter(dispatch_group_t dg)
{
	// The value is decremented on a 32bits wide atomic so that the carry
	// for the 0 -> -1 transition is not propagated to the upper 32bits.
	uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits,
			DISPATCH_GROUP_VALUE_INTERVAL, acquire);
	uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK;
	if (unlikely(old_value == 0)) {
		_dispatch_retain(dg); // <rdar://problem/22318411>
	}
	if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) {
		DISPATCH_CLIENT_CRASH(old_bits,
				"Too many nested calls to dispatch_group_enter()");
	}
}

dispatch_group_leave

void
dispatch_group_leave(dispatch_group_t dg)
{
	// The value is incremented on a 64bits wide atomic so that the carry for
	// the -1 -> 0 transition increments the generation atomically.
	uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state,
			DISPATCH_GROUP_VALUE_INTERVAL, release);
	uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);

	if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {
		old_state += DISPATCH_GROUP_VALUE_INTERVAL;
		do {
			new_state = old_state;
			if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) {
				new_state &= ~DISPATCH_GROUP_HAS_WAITERS;
				new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
			} else {
				// If the group was entered again since the atomic_add above,
				// we can't clear the waiters bit anymore as we don't know for
				// which generation the waiters are for
				new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
			}
			if (old_state == new_state) break;
		} while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state,
				old_state, new_state, &old_state, relaxed)));
		return _dispatch_group_wake(dg, old_state, true);
	}

	if (unlikely(old_value == 0)) {
		DISPATCH_CLIENT_CRASH((uintptr_t)old_value,
				"Unbalanced call to dispatch_group_leave()");
	}
}

_dispatch_group_notify

static inline void
_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
		dispatch_continuation_t dsn)
{
	uint64_t old_state, new_state;
	dispatch_continuation_t prev;

	dsn->dc_data = dq;
	_dispatch_retain(dq);

	prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next);
	if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg);
	os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next);
	if (os_mpsc_push_was_empty(prev)) {
		os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, {
			new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS;
			if ((uint32_t)old_state == 0) {
				os_atomic_rmw_loop_give_up({
					return _dispatch_group_wake(dg, new_state, false);
				});
			}
		});
	}
}

dispatch_group_enter 调用一次,state 变为-1dispatch_group_leave 调用一次,state 变为0,只有当state == 0 的时候,_dispatch_group_notify 内部的block代码块才会开始执行。

dispatch_group_async

void
dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
		dispatch_block_t db)
{
	dispatch_continuation_t dc = _dispatch_continuation_alloc();
	uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC;
	dispatch_qos_t qos;

	qos = _dispatch_continuation_init(dc, dq, db, 0, dc_flags);
	_dispatch_continuation_group_async(dg, dq, dc, qos);
}

_dispatch_continuation_group_async

static inline void
_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq,
		dispatch_continuation_t dc, dispatch_qos_t qos)
{
	dispatch_group_enter(dg);
	dc->dc_data = dg;
	_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}

_dispatch_continuation_async

static inline void
_dispatch_continuation_async(dispatch_queue_class_t dqu,
		dispatch_continuation_t dc, dispatch_qos_t qos, uintptr_t dc_flags)
{
#if DISPATCH_INTROSPECTION
	if (!(dc_flags & DC_FLAG_NO_INTROSPECTION)) {
		_dispatch_trace_item_push(dqu, dc);
	}
#else
	(void)dc_flags;
#endif
	return dx_push(dqu._dq, dc, qos);
}

最终会走到如下位置: _dispatch_continuation_with_group_invoke

static inline void
_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
{
	struct dispatch_object_s *dou = dc->dc_data;
	unsigned long type = dx_type(dou);
	if (type == DISPATCH_GROUP_TYPE) {
		_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
		_dispatch_trace_item_complete(dc);
		dispatch_group_leave((dispatch_group_t)dou);
	} else {
		DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
	}
}

小结:_dispatch_continuation_group_async 是对dispatch_group_enter和dispatch_group_leave的封装。

Dispatch_source

image.png

image.png

代码示例:

/*
当同一时间,一个事件的的触发频率很高,那么Dispatch Source会将这些响应以ADD等的方式进行累积,然后等系统空闲时最终处理,如果触发频率比较零散,那么Dispatch Source会将这些事件分别响应。
*/
self.queue = dispatch_queue_create("queue1", NULL);
self.source = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_ADD, 0, 0, dispatch_get_main_queue());
dispatch_source_set_event_handler(self.source, ^{
    NSLog(@"%@",[NSThread currentThread]);
    NSUInteger value = dispatch_source_get_data(self.source);
    NSLog(@"%lu",(**unsigned** **long**)value);
});
    
dispatch_resume(self.source);

//第一种方式
dispatch_source_merge_data(self.source, 1);
dispatch_source_merge_data(self.source, 1);

//第二种方式
dispatch_async(self.queue, ^{
    sleep(1);
    dispatch_source_merge_data(self.source, 1);
});
dispatch_async(self.queue, ^{
    sleep(1);
    dispatch_source_merge_data(self.source, 1);
});

第一种方式:

DISPATCH_SOURCE_TYPE_DATA_ADD时,上面打印为2

第二种方式:

DISPATCH_SOURCE_TYPE_DATA_ADD时,上面打印为11,打印两次;

具体使用可查看该博客:blog.csdn.net/weixin_3863…