GCD函数分析

693 阅读7分钟

栅栏函数

举个🌰

- (void)demo2{
    
  dispatch_queue_t concurrentQueue = dispatch_queue_create("cooci", DISPATCH_QUEUE_CONCURRENT);
  /* 1.异步函数 */
  dispatch_async(concurrentQueue, ^{
      NSLog(@"123");
     
  });
  
  dispatch_async(concurrentQueue, ^{
      NSLog(@"456");
      
  });
  
  /* 2. 栅栏函数 */ // 栏队列
  dispatch_barrier_async(concurrentQueue, ^{
      NSLog(@"---------------------%@------------------------",[NSThread currentThread]);
  });
  /* 3. 异步函数 */
  dispatch_async(concurrentQueue, ^{
      NSLog(@"加载那么多,喘口气!!!");
  });
  NSLog(@"**********起来干!!");

}

输出结果:

** 123**
** 456**
** **********起来干!!**
** ---------------------<NSThread: 0x600002344780>{number = 6, name = (null)}------------------------**
** 加载那么多,喘口气!!!**

dispatch_barrier_async阻碍的是在barrier里block后面加入到concurrentQueue的任务。

dispatch_barrier_async我们把dispatch_barrier_async改成dispatch_barrier_sync, 输出结果如下

** 123**
** 456**
** ---------------------<NSThread: 0x600002344780>{number = 6, name = (null)}------------------------**
** **********起来干!!**
** 加载那么多,喘口气!!!**

从上面的例子中我们看到 dispatch_barrier_async 前面的任务执行完毕才会到它里面的块的代码中来 dispatch_barrier_sync 作用相同,但是这个会阻塞线程,影响后面的任务执行 非常重要的一点:栅栏函数只能控制同一并发队列,而且必须是自定义并发队列,全局并发队列是不能加栅栏的,那么我们来看一下同步是怎么实现的。

dispatch_barrier_sync的源码分析

void
dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work)
{
	uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK;
	if (unlikely(_dispatch_block_has_private_data(work))) {
		return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
	}
	_dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}

dispatch_barrier_sync->_dispatch_barrier_sync_f _dispatch_barrier_sync_f里面封装的是_dispatch_barrier_sync_f_inline

static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
		dispatch_function_t func, uintptr_t dc_flags)
{
	dispatch_tid tid = _dispatch_tid_self();

	if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
		DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
	}

	dispatch_lane_t dl = upcast(dq)._dl;
	// The more correct thing to do would be to merge the qos of the thread
	// that just acquired the barrier lock into the queue state.
	//
	// However this is too expensive for the fast path, so skip doing it.
	// The chosen tradeoff is that if an enqueue on a lower priority thread
	// contends with this fast path, this thread may receive a useless override.
	//
	// Global concurrent queues and queues bound to non-dispatch threads
	// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
	if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
		return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
				DC_FLAG_BARRIER | dc_flags);
	}

	if (unlikely(dl->do_targetq->do_targetq)) {
		return _dispatch_sync_recurse(dl, ctxt, func,
				DC_FLAG_BARRIER | dc_flags);
	}
	_dispatch_introspection_sync_begin(dl);
	_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
			DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
					dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}

截屏2021-08-22 上午10.01.28.png

添加符号断点,走的是_dispatch_sync_f_slow

DISPATCH_NOINLINE
static void
_dispatch_sync_f_slow(dispatch_queue_class_t top_dqu, void *ctxt,
		dispatch_function_t func, uintptr_t top_dc_flags,
		dispatch_queue_class_t dqu, uintptr_t dc_flags)
{
         ...
	_dispatch_sync_invoke_and_complete_recurse(top_dq, ctxt, func,top_dc_flags
			DISPATCH_TRACE_ARG(&dsc));
}

static void
_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq,
		void *ctxt, dispatch_function_t func, uintptr_t dc_flags
		DISPATCH_TRACE_ARG(void *dc))
{
	_dispatch_sync_function_invoke_inline(dq, ctxt, func);
	_dispatch_trace_item_complete(dc);
	_dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags);
}

调用_dispatch_sync_complete_recurse,这个函数中有一个do..while如果没有barrier就直接走_dispatch_lane_non_barrier_complete,如果有barrier就走dx_wakeup(dq, 0,DISPATCH_WAKEUP_BARRIER_COMPLETE);唤醒队列中的任务开始执行

static void
_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq,
		uintptr_t dc_flags)
{
	bool barrier = (dc_flags & DC_FLAG_BARRIER);
	do {
		if (dq == stop_dq) return;
		if (barrier) {
			dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE);
		} else {
			_dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0);
		}
		dq = dq->do_targetq;
		barrier = (dq->dq_width == 1);
	} while (unlikely(dq->do_targetq));
}
#define dx_wakeup(x, y, z) dx_vtable(x)->dq_wakeup(x, y, z)
DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_concurrent, lane,
	.do_type        = DISPATCH_QUEUE_CONCURRENT_TYPE,
	.do_dispose     = _dispatch_lane_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_lane_invoke,

	.dq_activate    = _dispatch_lane_activate,
	.dq_wakeup      = _dispatch_lane_wakeup,
	.dq_push        = _dispatch_lane_concurrent_push,
);

DISPATCH_VTABLE_SUBCLASS_INSTANCE(queue_global, lane,
	.do_type        = DISPATCH_QUEUE_GLOBAL_ROOT_TYPE,
	.do_dispose     = _dispatch_object_no_dispose,
	.do_debug       = _dispatch_queue_debug,
	.do_invoke      = _dispatch_object_no_invoke,

	.dq_activate    = _dispatch_queue_no_activate,
	.dq_wakeup      = _dispatch_root_queue_wakeup,
	.dq_push        = _dispatch_root_queue_push,
);

并发队列调用的是_dispatch_lane_wakeup,而全局并发队列调用的是_dispatch_root_queue_wakeup _dispatch_lane_wakeup源码实现,如果if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) { flags是DISPATCH_WAKEUP_BARRIER_COMPLETE则走_dispatch_lane_barrier_complete

void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos,
		dispatch_wakeup_flags_t flags)
{
	dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;

	if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
		return _dispatch_lane_barrier_complete(dqu, qos, flags);
	}
	if (_dispatch_queue_class_probe(dqu)) {
		target = DISPATCH_QUEUE_WAKEUP_TARGET;
	}
	return _dispatch_queue_wakeup(dqu, qos, flags, target);
}

_dispatch_lane_barrier_complete这是一个循环操作,如果是_dispatch_object_is_barrier_dispatch_lane_drain_barrier_waiter,一直等到没有barrier了然后走_dispatch_lane_drain_non_barriers _dispatch_lane_drain_barrier_waiter里面做了一些barrier的唤醒和重新标记barrier状态的事情


_dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos,
		dispatch_wakeup_flags_t flags)
{
	dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
	dispatch_lane_t dq = dqu._dl;

	if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) {
		struct dispatch_object_s *dc = _dispatch_queue_get_head(dq);
		if (likely(dq->dq_width == 1 || _dispatch_object_is_barrier(dc))) {
			if (_dispatch_object_is_waiter(dc)) {
				return _dispatch_lane_drain_barrier_waiter(dq, dc, flags, 0);
			}
		} else if (dq->dq_width > 1 && !_dispatch_object_is_barrier(dc)) {
			return _dispatch_lane_drain_non_barriers(dq, dc, flags);
		}

		if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) {
			_dispatch_retain_2(dq);
			flags |= DISPATCH_WAKEUP_CONSUME_2;
		}
		target = DISPATCH_QUEUE_WAKEUP_TARGET;
	}

	uint64_t owned = DISPATCH_QUEUE_IN_BARRIER +
			dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
	return _dispatch_lane_class_barrier_complete(dq, qos, flags, target, owned);

dispatch_lane_drain_barrier_waiter调用了_dispatch_barrier_waiter_redirect_or_wake里面传入了新状态和旧状态

dispatch_lane_drain_barrier_waiter(dispatch_lane_t dq,
		struct dispatch_object_s *dc, dispatch_wakeup_flags_t flags,
		uint64_t enqueued_bits)
{
	dispatch_sync_context_t dsc = (dispatch_sync_context_t)dc;
	struct dispatch_object_s *next_dc;
	uint64_t next_owner = 0, old_state, new_state;

	next_owner = _dispatch_lock_value_from_tid(dsc->dsc_waiter);
	next_dc = _dispatch_queue_pop_head(dq, dc);

transfer_lock_again:
	os_atomic_rmw_loop2o(dq, dq_state, old_state, new_state, release, {
		if (unlikely(_dq_state_needs_ensure_ownership(old_state))) {
			_dispatch_event_loop_ensure_ownership((dispatch_wlh_t)dq);
			_dispatch_queue_move_to_contended_sync(dq->_as_dq);
			os_atomic_rmw_loop_give_up(goto transfer_lock_again);
		}

		new_state  = old_state;
		new_state &= ~DISPATCH_QUEUE_DRAIN_UNLOCK_MASK;
		new_state &= ~DISPATCH_QUEUE_DIRTY;
		new_state |= next_owner;

		if (_dq_state_is_base_wlh(old_state)) {
			if (next_dc) {
				// we know there's a next item, keep the enqueued bit if any
			} else if (unlikely(_dq_state_is_dirty(old_state))) {
				os_atomic_rmw_loop_give_up({
					os_atomic_xor2o(dq, dq_state, DISPATCH_QUEUE_DIRTY, acquire);
					next_dc = os_atomic_load2o(dq, dq_items_head, relaxed);
					goto transfer_lock_again;
				});
			} else {
				new_state &= ~DISPATCH_QUEUE_MAX_QOS_MASK;
				new_state &= ~DISPATCH_QUEUE_ENQUEUED;
			}
		} else {
			new_state -= enqueued_bits;
		}
	});

	return _dispatch_barrier_waiter_redirect_or_wake(dq, dc, flags,
			old_state, new_state);
}
DISPATCH_NOINLINE
static void
_dispatch_barrier_waiter_redirect_or_wake(dispatch_queue_class_t dqu,
		dispatch_object_t dc, dispatch_wakeup_flags_t flags,
		uint64_t old_state, uint64_t new_state)
{
	...
    //重新标记
	if (unlikely(_dq_state_is_inner_queue(old_state))) {
		dispatch_queue_t tq = dq->do_targetq;
		if (dsc->dc_flags & DC_FLAG_ASYNC_AND_WAIT) {
			_dispatch_async_waiter_update(dsc, dq);
		}
		if (likely(tq->dq_width == 1)) {
			dsc->dc_flags |= DC_FLAG_BARRIER;
		} else {
			dispatch_lane_t dl = upcast(tq)._dl;
			dsc->dc_flags &= ~DC_FLAG_BARRIER;
			if (_dispatch_queue_try_reserve_sync_width(dl)) {
				return _dispatch_non_barrier_waiter_redirect_or_wake(dl, dc);
			}
		}
		// passing the QoS of `dq` helps pushing on low priority waiters with
		// legacy workloops.
		dsc->dsc_from_async = false;
        //层层push
		return dx_push(tq, dsc, _dq_state_max_qos(old_state));
	}

    ...
	return _dispatch_waiter_wake(dsc, wlh, old_state, new_state);
}

当barrier前面的都执行完了,也就没有barrier了,然后调用barrier里面的block执行,调用完之后

  • _dispatch_sync_complete_recurse
  • _dispatch_lane_barrier_complete
  • _dispatch_lane_class_barrier_complete 我们看下全局并发队列的
void
_dispatch_root_queue_wakeup(dispatch_queue_global_t dq,
		DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
{
	if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) {
		DISPATCH_INTERNAL_CRASH(dq->dq_priority,
				"Don't try to wake up or override a root queue");
	}
	if (flags & DISPATCH_WAKEUP_CONSUME_2) {
		return _dispatch_release_2_tailcall(dq);
	}
}

它里面没有对barrier的处理,所以栈栏函数只对自定义的队列有用。从上面的源码中看到,栅栏函数会去队列中找到之前队列的任务进行一个唤醒,前面的都唤醒执行完之后才会执行栅栏函数的block,栅栏执行完之后标记完成。栅栏函数起到一个阻塞队列的效果,全局并发队列里面除了处理用户加入的一些事件,还要处理很多系统的事件,如果给全局并发队列中加入栅栏,系统将可能出现一些意想不到的情况。所以栅栏不能阻塞全局并发队列。

信号量

由前面知道,栅栏函数只能阻塞当前自定义的队列。我们有时候会用到一些三方库,如果我们想在别的队列里面做一些处理,栅栏函数就不在起作用了。 信号量常用函数

  • dispatch_semaphore_create 创建信号量
  • dispatch_semaphore_wait 信号量等待
  • dispatch_semaphore_signal 信号量释放
  dispatch_semaphore_t semap = dispatch_semaphore_create(2);
    //任务1
    dispatch_async(queue, ^{
        dispatch_semaphore_wait(semap, DISPATCH_TIME_FOREVER);
        NSLog(@"执行任务1");
        sleep(1);
        NSLog(@"任务1完成");
        dispatch_semaphore_signal(semap);
    });
    
    //任务2
    dispatch_async(queue, ^{
        dispatch_semaphore_signal(semap);
      
        NSLog(@"执行任务2");
        sleep(1);
        NSLog(@"任务2完成");

    });

dispatch_semaphore_t semap = dispatch_semaphore_create(2);可以控制最大并发量为2。 截屏2021-08-22 上午11.38.11.png 当信号量小于0的时候,将返回NULL 我们来看一下工作原理: dispatch_semaphore_wait

intptr_t
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{       
	long value = os_atomic_dec2o(dsema, dsema_value, acquire);
	if (likely(value >= 0)) {
		return 0;
	}
	return _dispatch_semaphore_wait_slow(dsema, timeout);
}

os_atomic_dec2o减一操作,执行wait,则信号量减一,如果vlaue>=0,则返回0,如果小于0则执行_dispatch_semaphore_wait_slow


  static intptr_t
_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
		dispatch_time_t timeout)
{
	long orig;

	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
	switch (timeout) {
	default:
		if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {
			break;
		}
		// Fall through and try to undo what the fast path did to
		// dsema->dsema_value
	case DISPATCH_TIME_NOW:
		orig = dsema->dsema_value;
		while (orig < 0) {
			if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1,
					&orig, relaxed)) {
				return _DSEMA4_TIMEOUT();
			}
		}
		// Another thread called semaphore_signal().
		// Fall through and drain the wakeup.
	case DISPATCH_TIME_FOREVER:
		_dispatch_sema4_wait(&dsema->dsema_sema);
		break;
	}
	return 0;
}

我们一般传参为DISPATCH_TIME_FOREVER 看下_dispatch_sema4_wait的实现:

void
_dispatch_sema4_wait(_dispatch_sema4_t *sema)
{
	int ret = 0;
	do {
		ret = sem_wait(sema);
	} while (ret == -1 && errno == EINTR);
	DISPATCH_SEMAPHORE_VERIFY_RET(ret);
}

里面是一个do..while循环,do..while循环里面执行了sem_wait(sema) 所以当我们调用dispatch_semaphore_wait(semap, DISPATCH_TIME_FOREVER);如果信号量<0,则相当于加了一个do...while循环,此时就处于等待中

intptr_t
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
	long value = os_atomic_inc2o(dsema, dsema_value, release);
	if (likely(value > 0)) {
		return 0;
	}
	if (unlikely(value == LONG_MIN)) {
		DISPATCH_CLIENT_CRASH(value,
				"Unbalanced call to dispatch_semaphore_signal()");
	}
	return _dispatch_semaphore_signal_slow(dsema);
}

os_atomic_inc2o是一个加一的操作,当信号量>0时发返回0,否则,报错"Unbalanced call to dispatch_semaphore_signal()" 执行_dispatch_semaphore_signal_slow,大概是为了防止程序的异常,错误抛出之后,仍要进行加加操作

DISPATCH_NOINLINE
intptr_t
_dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
{
	_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
	_dispatch_sema4_signal(&dsema->dsema_sema, 1);
	return 1;
}

调度组

调度组函数常用API如下: dispatch_group_create 创建组 dispatch_group_async 进组 dispatch_group_notify 进组任务执行完成通知 dispatch_group_wait 进组任务执行等待时间

dispatch_group_enter 进组 dispatch_group_leave 出组 注意搭配使用

eg:

- (void)groupDemo{

    self.group = dispatch_group_create();
    dispatch_queue_t queue = dispatch_get_global_queue(0, 0);
    dispatch_group_async(_group, queue, ^{
        //创建调度组
        NSString *logoStr1 = @"https://f12.baidu.com/it/u=711217113,818398466&fm=72";
        NSData *data1 = [NSData dataWithContentsOfURL:[NSURL URLWithString:logoStr1]];
        [self.mArray addObject:data1];
    });


    dispatch_group_async( self.group , queue, ^{
        //创建调度组
       NSString *logoStr2 = @"https://f12.baidu.com/it/u=3172787957,1000491180&fm=72";
        NSData *data2 = [NSData dataWithContentsOfURL:[NSURL URLWithString:logoStr2]];
        [self.mArray addObject:data2];
    });
    
     // 进组和出租 成对  先进后出
   
    dispatch_group_enter( self.group );
    dispatch_async(queue, ^{
        //创建调度组
       NSString *logoStr2 = @"https://f12.baidu.com/it/u=3172787957,1000491180&fm=72";
        NSData *data2 = [NSData dataWithContentsOfURL:[NSURL URLWithString:logoStr2]];
        [self.mArray addObject:data2];
       
    });
    
//    long time = dispatch_group_wait(group, 1);
//
//    if (time == 0) {
//
//    }
    
    
    dispatch_group_notify( self.group , dispatch_get_main_queue(), ^{
        UIImage *newImage = nil;
       NSLog(@"数组个数:%ld",self.mArray.count);

    });

}

- (void)touchesBegan:(NSSet<UITouch *> *)touches withEvent:(UIEvent *)event {
    
    dispatch_group_leave( self.group );
    
}

当点击屏幕的时候,打印**数组个数:3**

我们看到 dispatch_group_enter dispatch_group_leave 这一对组合 就相当于dispatch_group_async ,然后他们执行完之后dispatch_group_notify才会执行。 dispatch_group_create之前的源码中底层是由信号量封装,但是818之后,重写了一套,实现和信号量的比较相似 底层源码分析:

dispatch_group_t
dispatch_group_create(void)
{
	return _dispatch_group_create_with_count(0);
}

我们看下_dispatch_group_create_with_count

static inline dispatch_group_t
_dispatch_group_create_with_count(uint32_t n)
{
	dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group),
			sizeof(struct dispatch_group_s));
	dg->do_next = DISPATCH_OBJECT_LISTLESS;
	dg->do_targetq = _dispatch_get_default_queue(false);
	if (n) {
		os_atomic_store2o(dg, dg_bits,
				(uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed);
		os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // <rdar://22318411>
	}
	return dg;
}

我们看下dispatch_group_enter

void
dispatch_group_enter(dispatch_group_t dg)
{
	// The value is decremented on a 32bits wide atomic so that the carry
	// for the 0 -> -1 transition is not propagated to the upper 32bits.
	uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits,
			DISPATCH_GROUP_VALUE_INTERVAL, acquire);
	uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK;
	if (unlikely(old_value == 0)) {
		_dispatch_retain(dg); // <rdar://problem/22318411>
	}
	if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) {
		DISPATCH_CLIENT_CRASH(old_bits,
				"Too many nested calls to dispatch_group_enter()");
	}
}

dispatch_group_leave


void
dispatch_group_leave(dispatch_group_t dg)
{
	// The value is incremented on a 64bits wide atomic so that the carry for
	// the -1 -> 0 transition increments the generation atomically.
	uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state,
			DISPATCH_GROUP_VALUE_INTERVAL, release);
	uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);

	if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {
		old_state += DISPATCH_GROUP_VALUE_INTERVAL;
		do {
			new_state = old_state;
			if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) {
				new_state &= ~DISPATCH_GROUP_HAS_WAITERS;
				new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
			} else {
				// If the group was entered again since the atomic_add above,
				// we can't clear the waiters bit anymore as we don't know for
				// which generation the waiters are for
				new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
			}
			if (old_state == new_state) break;
		} while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state,
				old_state, new_state, &old_state, relaxed)));
		return _dispatch_group_wake(dg, old_state, true);
	}

	if (unlikely(old_value == 0)) {
		DISPATCH_CLIENT_CRASH((uintptr_t)old_value,
				"Unbalanced call to dispatch_group_leave()");
	}
}

可以看到dispatch_group_enter里面是一个减一的操作os_atomic_sub_orig2odispatch_group_leave#define DISPATCH_GROUP_VALUE_1          DISPATCH_GROUP_VALUE_MASK如果 if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {要执行,那么old_value为-1 在这个if里面 执行了old_state += DISPATCH_GROUP_VALUE_INTERVAL;_dispatch_group_wake(dg, old_state, true); _dispatch_group_wake唤起了_dispatch_group_notify函数。

dispatch_group_enterdispatch_group_leave阻塞的是_dispatch_group_notify的执行,我们看下_dispatch_group_notify

static inline void
_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
		dispatch_continuation_t dsn)
{
	uint64_t old_state, new_state;
	dispatch_continuation_t prev;

	dsn->dc_data = dq;
	_dispatch_retain(dq);

	prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next);
	if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg);
	os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next);
	if (os_mpsc_push_was_empty(prev)) {
		os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, {
			new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS;
			if ((uint32_t)old_state == 0) {
				os_atomic_rmw_loop_give_up({
					return _dispatch_group_wake(dg, new_state, false);
				});
			}
		});
	}
}

如果old_state == 0会执行_dispatch_group_wake,在dispatch_group_leave里面也有唤醒的函数,说明dispatch_group_notify函数中将block块和group绑定在了一起。由于异步函数,可能直接先执行到dispatch_group_notify函数中,但是此时dispatch_group_leave还没有回来,当dispatch_group_leave执行完的时候,也会进行一个唤醒,由于dispatch_group_notify已经将block和group绑定在了一起,所以dispatch_group_leave也可以进行唤醒。

为什么dispatch_group_async就等于dispatch_group_enterdispatch_group_leave,是不是底层封装了dispatch_group_enterdispatch_group_leave。 我们看一下源码:

void
dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
		dispatch_block_t db)
{
	dispatch_continuation_t dc = _dispatch_continuation_alloc();
	uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_GROUP_ASYNC;
	dispatch_qos_t qos;

	qos = _dispatch_continuation_init(dc, dq, db, 0, dc_flags);
	_dispatch_continuation_group_async(dg, dq, dc, qos);
}

我们看下_dispatch_continuation_group_async

static inline void
_dispatch_continuation_group_async(dispatch_group_t dg, dispatch_queue_t dq,
		dispatch_continuation_t dc, dispatch_qos_t qos)
{
	dispatch_group_enter(dg);
	dc->dc_data = dg;
	_dispatch_continuation_async(dq, dc, qos, dc->dc_flags);
}

dispatch_group_leave函数在哪里呢,应该在block执行完之后。我们继续追踪源码 经过一系列的调用栈,最后到了下面这个函数

static inline void
_dispatch_continuation_invoke_inline(dispatch_object_t dou,
		dispatch_invoke_flags_t flags, dispatch_queue_class_t dqu)
{
	    ...
		if (unlikely(dc_flags & DC_FLAG_GROUP_ASYNC)) {
			_dispatch_continuation_with_group_invoke(dc);
		} else {
			_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
			_dispatch_trace_item_complete(dc);
		}
		if (unlikely(dc1)) {
			_dispatch_continuation_free_to_cache_limit(dc1);
		}
	});
	_dispatch_perfmon_workitem_inc();
}

_dispatch_continuation_with_group_invoke这个函数实现如下:

static inline void
_dispatch_continuation_with_group_invoke(dispatch_continuation_t dc)
{
	struct dispatch_object_s *dou = dc->dc_data;
	unsigned long type = dx_type(dou);
	if (type == DISPATCH_GROUP_TYPE) {
		_dispatch_client_callout(dc->dc_ctxt, dc->dc_func);
		_dispatch_trace_item_complete(dc);
		dispatch_group_leave((dispatch_group_t)dou);
	} else {
		DISPATCH_INTERNAL_CRASH(dx_type(dou), "Unexpected object type");
	}
}

所以dispatch_group_async底层是对dispatch_group_enterdispatch_group_leave的封装。