本章内容
- 栅栏函数
- 信号量
- 调度组
- dispatch_source
本章内容
- 熟悉栅栏函数,信号量,调用组,dispatch_source的使用
- 了解其底层原理
栅栏函数
栅栏函数分为同步dispatch_barrier_sync和异步dispatch_barrier_async。跟我们的同步和异步有着相似的过程。先举例看其使用过程
栅栏函数的特性是:对队列的拦截
栅栏函数问题:1. 全局并发不生效,所以要使用自定义并发队列。2. 必须在同一队列中(所以跨层的话栅栏函数很鸡肋,所以简单用还可以。那么解决这个问题推荐调度组)
使用
// 并发队列,执行的过程会是什么?
dispatch_queue_t concurrentQueue = dispatch_queue_create("cooci", DISPATCH_QUEUE_CONCURRENT);
dispatch_async(concurrentQueue, ^{
NSLog(@"1");
});
dispatch_async(concurrentQueue, ^{
sleep(1);
NSLog(@"2");
});
dispatch_barrier_async(concurrentQueue, ^{
NSLog(@"----%@-----",[NSThread currentThread]);
sleep(2);
});
dispatch_async(concurrentQueue, ^{
NSLog(@"3");
});
dispatch_async(concurrentQueue, ^{
NSLog(@"4");
});
NSLog(@"5");
// dispatch_barrier_async 的情况:2是延迟执行,而且是异步,则1,5不确定。
// 可能是:5,1,2,[NSThread currentThread],3,4。
// dispatch_barrier_sync 的情况:1,2,[NSThread currentThread],3,4,5。3,4,5顺序不确定。
// 可以看出同步的情况也会阻塞线程,护犊子
栅栏函数作为锁应用
// 数组线程不安全,后面会补充为什么数组线程不安全
dispatch_queue_t concurrentQueue = dispatch_queue_create("cooci", DISPATCH_QUEUE_CONCURRENT);
for (int i = 0; i<1000; i++) {
dispatch_async(concurrentQueue, ^{
NSString *imageName = [NSString stringWithFormat:@"%d.jpg", (i % 10)];
NSURL *url = [[NSBundle mainBundle] URLForResource:imageName withExtension:nil];
NSData *data = [NSData dataWithContentsOfURL:url];
UIImage *image = [UIImage imageWithData:data];
// 如果不加barrier的话会崩溃访问到野指针。
// 加完以后会因为队列的原因,先执行异步函数,再添加
dispatch_barrier_async(concurrentQueue , ^{
[self.mArray addObject:image];
});
});
}
同步栅栏
会因为同步特性阻塞线程,看其源码跟dispatch_sync是不是有一定相似性,而且当同步串行的情况下会走_dispatch_barrier_sync_f函数。说明底层源码是趋于相似的
dispatch_barrier_sync
void
dispatch_barrier_sync(dispatch_queue_t dq, dispatch_block_t work)
{
uintptr_t dc_flags = DC_FLAG_BARRIER | DC_FLAG_BLOCK;
if (unlikely(_dispatch_block_has_private_data(work))) {
return _dispatch_sync_block_with_privdata(dq, work, dc_flags);
}
// 这个是个中间函数调用的是_dispatch_barrier_sync_f_inline。所以直接略过
//_dispatch_barrier_sync_f_inline(dq, ctxt, func, dc_flags);
_dispatch_barrier_sync_f(dq, work, _dispatch_Block_invoke(work), dc_flags);
}
_dispatch_barrier_sync_f_inline
// DC_FLAG_BARRIER是栅栏函数的标识符,而且这个函数与同步函数也非常相似例如_dispatch_sync_f_inline可以查看
static inline void
_dispatch_barrier_sync_f_inline(dispatch_queue_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
dispatch_tid tid = _dispatch_tid_self();
if (unlikely(dx_metatype(dq) != _DISPATCH_LANE_TYPE)) {
DISPATCH_CLIENT_CRASH(0, "Queue type doesn't support dispatch_sync");
}
dispatch_lane_t dl = upcast(dq)._dl;
// The more correct thing to do would be to merge the qos of the thread
// that just acquired the barrier lock into the queue state.
//
// However this is too expensive for the fast path, so skip doing it.
// The chosen tradeoff is that if an enqueue on a lower priority thread
// contends with this fast path, this thread may receive a useless override.
//
// Global concurrent queues and queues bound to non-dispatch threads
// always fall into the slow case, see DISPATCH_ROOT_QUEUE_STATE_INIT_VALUE
if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(dl, tid))) {
// 到这里可以看到比较熟悉了,证明栅栏也可能出现死锁
return _dispatch_sync_f_slow(dl, ctxt, func, DC_FLAG_BARRIER, dl,
DC_FLAG_BARRIER | dc_flags);
}
// 而这里内部其实还是递归调用了_dispatch_sync_f_slow函数
if (unlikely(dl->do_targetq->do_targetq)) {
return _dispatch_sync_recurse(dl, ctxt, func,
DC_FLAG_BARRIER | dc_flags);
}
_dispatch_introspection_sync_begin(dl);
_dispatch_lane_barrier_sync_invoke_and_complete(dl, ctxt, func
DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
dq, ctxt, func, dc_flags | DC_FLAG_BARRIER)));
}
_dispatch_sync_recurse
这个函数是上一节没有展示出来的部分,并没有什么重要。就是看看有没有什么任务需要执行的话就走do-while。而我们想栅栏函数的流程,如果是想栅栏函数的任务进行执行,必须要把队列前面的任务执行完毕才行。所以执行完以后才能执行完成代码
static void
_dispatch_sync_recurse(dispatch_lane_t dq, void *ctxt,
dispatch_function_t func, uintptr_t dc_flags)
{
dispatch_tid tid = _dispatch_tid_self();
dispatch_queue_t tq = dq->do_targetq;
do {
// 串行
if (likely(tq->dq_width == 1)) {
if (unlikely(!_dispatch_queue_try_acquire_barrier_sync(tq, tid))) {
return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq,
DC_FLAG_BARRIER);
}
} else {
// 并发
dispatch_queue_concurrent_t dl = upcast(tq)._dl;
if (unlikely(!_dispatch_queue_try_reserve_sync_width(dl))) {
return _dispatch_sync_f_slow(dq, ctxt, func, dc_flags, tq, 0);
}
}
tq = tq->do_targetq;
} while (unlikely(tq->do_targetq));
_dispatch_introspection_sync_begin(dq);
_dispatch_sync_invoke_and_complete_recurse(dq, ctxt, func, dc_flags
DISPATCH_TRACE_ARG(_dispatch_trace_item_sync_push_pop(
dq, ctxt, func, dc_flags)));
}
_dispatch_sync_invoke_and_complete_recurse
这个是中间过渡
static void
_dispatch_sync_invoke_and_complete_recurse(dispatch_queue_class_t dq,
void *ctxt, dispatch_function_t func, uintptr_t dc_flags
DISPATCH_TRACE_ARG(void *dc))
{
_dispatch_sync_function_invoke_inline(dq, ctxt, func);
_dispatch_trace_item_complete(dc);
_dispatch_sync_complete_recurse(dq._dq, NULL, dc_flags);
}
_dispatch_sync_complete_recurse
到了这里就代表了前面任务执行完了,就要开始执行barrier了,其实同步函数也是走的这个流程,所以说gcd底层都是越来越相似,只是因为特性不同前面加了不同的api执行流程。而到这里就能说明为什么全局并发队列没有栅栏效果
static void
_dispatch_sync_complete_recurse(dispatch_queue_t dq, dispatch_queue_t stop_dq,
uintptr_t dc_flags)
{
bool barrier = (dc_flags & DC_FLAG_BARRIER);
do {
if (dq == stop_dq) return;
// 如果是队列有栅栏函数的话,让栅栏函数进行任务执行
if (barrier) {
// dx_wakeup是个宏定义,就是不同队列执行的不同函数
// 而全局并发执行的是 _dispatch_root_queue_wakeup
// 普通并发执行的是 _dispatch_lane_wakeup
dx_wakeup(dq, 0, DISPATCH_WAKEUP_BARRIER_COMPLETE);
} else {
// 当栅栏函数执行完毕后,则才会执行
_dispatch_lane_non_barrier_complete(upcast(dq)._dl, 0);
}
dq = dq->do_targetq;
barrier = (dq->dq_width == 1);
} while (unlikely(dq->do_targetq));
}
全局并发和自定义并发唤醒函数对比
_dispatch_root_queue_wakeup 全局并发
可以看到全局并发并没有对栅栏函数做处理。为什么呢?因为全局并发队列并不一定只有你在使用,可能系统还在使用,如果你突然加个栅栏那么就会造成了线程阻塞。
void
_dispatch_root_queue_wakeup(dispatch_queue_global_t dq,
DISPATCH_UNUSED dispatch_qos_t qos, dispatch_wakeup_flags_t flags)
{
// 判断等待
if (!(flags & DISPATCH_WAKEUP_BLOCK_WAIT)) {
DISPATCH_INTERNAL_CRASH(dq->dq_priority,
"Don't try to wake up or override a root queue");
}
// 全局并发直接走这个,但是普通并发的会走很多中间过程
if (flags & DISPATCH_WAKEUP_CONSUME_2) {
return _dispatch_release_2_tailcall(dq);
}
}
_dispatch_lane_wakeup 普通并发
void
_dispatch_lane_wakeup(dispatch_lane_class_t dqu, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags)
{
dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
// 栅栏函数走这里
if (unlikely(flags & DISPATCH_WAKEUP_BARRIER_COMPLETE)) {
return _dispatch_lane_barrier_complete(dqu, qos, flags);
}
if (_dispatch_queue_class_probe(dqu)) {
target = DISPATCH_QUEUE_WAKEUP_TARGET;
}
// 普通的同步和异步走这个
return _dispatch_queue_wakeup(dqu, qos, flags, target);
}
_dispatch_lane_barrier_complete
看到这个函数其实就已经差不多了,至于完成的代码有兴趣可以再跟,里面还有对栅栏的处理
static void
_dispatch_lane_barrier_complete(dispatch_lane_class_t dqu, dispatch_qos_t qos,
dispatch_wakeup_flags_t flags)
{
dispatch_queue_wakeup_target_t target = DISPATCH_QUEUE_WAKEUP_NONE;
dispatch_lane_t dq = dqu._dl;
if (dq->dq_items_tail && !DISPATCH_QUEUE_IS_SUSPENDED(dq)) {
struct dispatch_object_s *dc = _dispatch_queue_get_head(dq);
// 如果是串行队列,那么就会等待。否则就会走执行完成代码
if (likely(dq->dq_width == 1 || _dispatch_object_is_barrier(dc))) {
if (_dispatch_object_is_waiter(dc)) {
return _dispatch_lane_drain_barrier_waiter(dq, dc, flags, 0);
}
} else if (dq->dq_width > 1 && !_dispatch_object_is_barrier(dc)) {
// 并发队列会保证,队列前面内容执行完了,那么就会把栅栏拔掉,然后才会执行完成代码
return _dispatch_lane_drain_non_barriers(dq, dc, flags);
}
if (!(flags & DISPATCH_WAKEUP_CONSUME_2)) {
_dispatch_retain_2(dq);
flags |= DISPATCH_WAKEUP_CONSUME_2;
}
target = DISPATCH_QUEUE_WAKEUP_TARGET;
}
uint64_t owned = DISPATCH_QUEUE_IN_BARRIER +
dq->dq_width * DISPATCH_QUEUE_WIDTH_INTERVAL;
return _dispatch_lane_class_barrier_complete(dq, qos, flags, target, owned);
}
异步栅栏
异步栅栏不会阻塞其他线程的执行。
dispatch_barrier_async
看上一章异步函数的处理。又是很相似,而且执行的流程都是一模一样。而不再展示了,只是不一样的是找的_dispatch_lane_push函数而已,而wakeup的话其实跟同步的也是一致的
void
dispatch_barrier_async(dispatch_queue_t dq, dispatch_block_t work)
{
dispatch_continuation_t dc = _dispatch_continuation_alloc();
uintptr_t dc_flags = DC_FLAG_CONSUME | DC_FLAG_BARRIER;
dispatch_qos_t qos;
qos = _dispatch_continuation_init(dc, dq, work, 0, dc_flags);
_dispatch_continuation_async(dq, dc, qos, dc_flags);
}
信号量
题外话:之前的时候调度组是封装的信号量,但是现在调度组都是自己又自己写了。
信号量使用场景:1.当锁,2.控制最大并发数
信号量的主要api:
dispatch_semaphore_create创建信号量。一般传“1”的时候就当锁用了。dispatch_semaphore_wait信号量等待。dispatch_semaphore_signal信号量来了(释放)与2配套使用。
使用
我们可以用信号量做上传,或者也可以起到同步加锁效果
// 那么看下面,任务1,和任务2相当于执行的时候是一块执行的,
// 因为它被当做是一个任务来执行的,然后才是任务3。
// 也就是说一次只能执行一个任务
// 如果说并发数改为2的话,那么任务1,任务2当一个任务,任务3一个任务,就一起执行了,执行顺序不一定
dispatch_queue_t queue = dispatch_get_global_queue(0, 0);
// 如果>=0的时候有用,否则返回nil
dispatch_semaphore_t sem = dispatch_semaphore_create(1);
dispatch_async(queue, ^{
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
NSLog(@"执行任务1");
NSLog(@"任务1完成");
});
dispatch_async(queue, ^{
NSLog(@"执行任务2");
NSLog(@"任务2完成");
dispatch_semaphore_signal(sem);
});
dispatch_async(queue, ^{
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
NSLog(@"执行任务3");
NSLog(@"任务3完成");
dispatch_semaphore_signal(sem);
});
如果说上面代码改为下面的话,那么任务2先执行,然后才是任务1。这是为什么?
因为signal会发信号给上面说等待的不用再等待了。这个我们看源码解析
dispatch_queue_t queue = dispatch_get_global_queue(0, 0);
dispatch_semaphore_t sem = dispatch_semaphore_create(0);
dispatch_async(queue, ^{
dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER);
NSLog(@"执行任务1");
NSLog(@"任务1完成");
});
dispatch_async(queue, ^{
NSLog(@"执行任务2");
NSLog(@"任务2完成");
dispatch_semaphore_signal(sem);
});
dispatch_semaphore_signal
intptr_t
dispatch_semaphore_signal(dispatch_semaphore_t dsema)
{
// 就是+1处理,假如并发数为0那么久变为1,也就是可以执行
long value = os_atomic_inc2o(dsema, dsema_value, release);
if (likely(value > 0)) {
return 0;
}
// 这代表延迟操作过多
if (unlikely(value == LONG_MIN)) {
DISPATCH_CLIENT_CRASH(value,
"Unbalanced call to dispatch_semaphore_signal()");
}
// 这里就是异常处理,延迟等待的死循环
return _dispatch_semaphore_signal_slow(dsema);
}
dispatch_semaphore_wait
就是等待信号量的值变为正数
intptr_t
dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
{
// 就是-1操作,跟上面对应,如果说我们设置的最大并发数为0则结果是-1
long value = os_atomic_dec2o(dsema, dsema_value, acquire);
if (likely(value >= 0)) {
return 0;
}
return _dispatch_semaphore_wait_slow(dsema, timeout);
}
_dispatch_semaphore_wait_slow
static intptr_t
_dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
dispatch_time_t timeout)
{
long orig;
_dispatch_sema4_create(&dsema->dsema_sema, _DSEMA4_POLICY_FIFO);
// 这个就是wait的第二个参数,枚举。
// DISPATCH_TIME_FOREVER和DISPATCH_TIME_NOW
switch (timeout) {
default:
// 判断时长
if (!_dispatch_sema4_timedwait(&dsema->dsema_sema, timeout)) {
break;
}
// Fall through and try to undo what the fast path did to
// dsema->dsema_value
case DISPATCH_TIME_NOW:
orig = dsema->dsema_value;
// 如果是now,就会及时的发送一个超时处理
while (orig < 0) {
if (os_atomic_cmpxchgv2o(dsema, dsema_value, orig, orig + 1,
&orig, relaxed)) {
return _DSEMA4_TIMEOUT();
}
}
// Another thread called semaphore_signal().
// Fall through and drain the wakeup.
case DISPATCH_TIME_FOREVER:
// 在这里发现两个函数semaphore_wait或sem_wait
// 这两个函数是pthread的内核函数。其实这两个不重要
// 最主要的是这个函数里面有一个do-while循环。所以信号量等待就是执行一个循环而已
_dispatch_sema4_wait(&dsema->dsema_sema);
break;
}
return 0;
}
调度组
几个api
-
dispatch_group_create创建组 -
dispatch_group_async进入组任务,(封装了5的三个函数) -
dispatch_group_notify进组任务执行完毕通知 -
dispatch_group_wait进组任务执行等待时间 -
dispatch_group_leave和dispatch_group_enter是出租和进组,与dispatch_async一起使用的话相当于2(这个看底层就知道相当于执行了2就是相当于执行了5的这三个函数)
使用
// 任务1,2互不干扰,但是调度组后的任务是在任务1,2执行后才执行
dispatch_group_t group = dispatch_group_create();
dispatch_queue_t queue = dispatch_get_global_queue(0, 0);
dispatch_group_async(group, queue, ^{
//执行任务1
});
// 先进组,再出租
dispatch_group_enter(group);
dispatch_async(queue, ^{
// 执行任务2
dispatch_group_leave(group);
});
dispatch_group_notify(group, dispatch_get_main_queue(), ^{
// 任务组的任务执行完了
});
分析
之前的时候是封装的信号量。现在是自己写一套
dispatch_group_t
dispatch_group_create(void)
{
return _dispatch_group_create_with_count(0);
}
_dispatch_group_create_with_count
与信号量的create相似。只是一个创建。
static inline dispatch_group_t
_dispatch_group_create_with_count(uint32_t n)
{
dispatch_group_t dg = _dispatch_object_alloc(DISPATCH_VTABLE(group),
sizeof(struct dispatch_group_s));
dg->do_next = DISPATCH_OBJECT_LISTLESS;
dg->do_targetq = _dispatch_get_default_queue(false);
if (n) {
os_atomic_store2o(dg, dg_bits,
(uint32_t)-n * DISPATCH_GROUP_VALUE_INTERVAL, relaxed);
os_atomic_store2o(dg, do_ref_cnt, 1, relaxed); // <rdar://22318411>
}
return dg;
}
dispatch_group_enter
与信号量不同的是变为-1的时候不会wait,但是其实在这个函数中堵塞住了任务。而dispatch_group_leave与这个函数对比。
void
dispatch_group_enter(dispatch_group_t dg)
{
// The value is decremented on a 32bits wide atomic so that the carry
// for the 0 -> -1 transition is not propagated to the upper 32bits.
// 这里意思就是说从0变为-1,也就是减减操作
uint32_t old_bits = os_atomic_sub_orig2o(dg, dg_bits,
DISPATCH_GROUP_VALUE_INTERVAL, acquire);
uint32_t old_value = old_bits & DISPATCH_GROUP_VALUE_MASK;
if (unlikely(old_value == 0)) {
_dispatch_retain(dg); // <rdar://problem/22318411>
}
if (unlikely(old_value == DISPATCH_GROUP_VALUE_MAX)) {
DISPATCH_CLIENT_CRASH(old_bits,
"Too many nested calls to dispatch_group_enter()");
}
}
dispatch_group_leave
当离开组以后就会去唤醒dispatch_group_notify,就是将enter从0变为-1,然后leave就是要将-1变为0,那么dispatch_group_notify这个函数就会执行了
void
dispatch_group_leave(dispatch_group_t dg)
{
// The value is incremented on a 64bits wide atomic so that the carry for
// the -1 -> 0 transition increments the generation atomically.
// -1变为0 ,++操作,就是看dg_state这些参数值做一些加号运算。
// 我们对比进组的时候是减法操作,-1的16进制是很大的0xffffff...
uint64_t new_state, old_state = os_atomic_add_orig2o(dg, dg_state,
DISPATCH_GROUP_VALUE_INTERVAL, release);
// 如果说old_state 为-1则oldvalue就是DISPATCH_GROUP_VALUE_1
// DISPATCH_GROUP_VALUE_MASK 和 DISPATCH_GROUP_VALUE_1相同
uint32_t old_value = (uint32_t)(old_state & DISPATCH_GROUP_VALUE_MASK);
// 就是防止异步的影响,能够及时的唤醒
if (unlikely(old_value == DISPATCH_GROUP_VALUE_1)) {
old_state += DISPATCH_GROUP_VALUE_INTERVAL;
do {
new_state = old_state;
if ((old_state & DISPATCH_GROUP_VALUE_MASK) == 0) {
new_state &= ~DISPATCH_GROUP_HAS_WAITERS;
new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
} else {
// If the group was entered again since the atomic_add above,
// we can't clear the waiters bit anymore as we don't know for
// which generation the waiters are for
new_state &= ~DISPATCH_GROUP_HAS_NOTIFS;
}
if (old_state == new_state) break;
} while (unlikely(!os_atomic_cmpxchgv2o(dg, dg_state,
old_state, new_state, &old_state, relaxed)));
// 这个是为了以防万一,如果已经执行完了就去唤醒一下
return _dispatch_group_wake(dg, old_state, true);
}
if (unlikely(old_value == 0)) {
DISPATCH_CLIENT_CRASH((uintptr_t)old_value,
"Unbalanced call to dispatch_group_leave()");
}
}
_dispatch_group_notify
任务组的执行
static inline void
_dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
dispatch_continuation_t dsn)
{
uint64_t old_state, new_state;
dispatch_continuation_t prev;
dsn->dc_data = dq;
_dispatch_retain(dq);
prev = os_mpsc_push_update_tail(os_mpsc(dg, dg_notify), dsn, do_next);
if (os_mpsc_push_was_empty(prev)) _dispatch_retain(dg);
os_mpsc_push_update_prev(os_mpsc(dg, dg_notify), prev, dsn, do_next);
if (os_mpsc_push_was_empty(prev)) {
// 在这里就是一直去等待任务组执行完成后也就是old_state==0的时候可以执行任务组的内容了
os_atomic_rmw_loop2o(dg, dg_state, old_state, new_state, release, {
new_state = old_state | DISPATCH_GROUP_HAS_NOTIFS;
if ((uint32_t)old_state == 0) {
os_atomic_rmw_loop_give_up({
return _dispatch_group_wake(dg, new_state, false);
});
}
});
}
}
dispatch_source
其CPU符合很小,它会尽量不去占用资源。它的原理就是在任一线程调用它的一个函数 dispatch_source_merge_data后,会执行其事先定义好的句柄(可以理解为block),这个过程叫Cuntom event,用户事件。是dispatch_source支持处理的一种事件。(通过条件来控制block执行)。而且不受runloop的影响,不受运行循环的限制。workloop
补充:句柄是一种指向指针的指针,它指向的就是一个类或者结构,与系统有很密切的关系。如:实例句柄(HINSTANCE),位图句柄(HBITMAP),设备表述句柄(HDC),图标句柄(HICON),通用句柄(HANDLE)
常见的几个函数:
dispatch_source_create,创建源,参数(type:代表的是事件源的类型,多用DISPATCH_SOURCE_TYPE_TIMER来做定时器。handle:是源的句柄,多传0。mask:事件标志的掩码,多传0)dispatch_source_set_event_handler,设置源事件回调。dispatch_source_merge_data,源事件设置数据dispatch_source_get_data,获取源事件数据,也就是3设置,4获取dispatch_resume继续。dispatch_suspend挂起
使用场景
下载模型
@interface ViewController ()
@property (weak, nonatomic) IBOutlet UIProgressView *progressView;
@property (nonatomic, strong) dispatch_source_t source;
@property (nonatomic, strong) dispatch_queue_t queue;
@property (nonatomic, assign) NSUInteger totalComplete;
@property (nonatomic) BOOL isRunning;
@end
@implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
self.totalComplete = 0;
self.queue = dispatch_queue_create("seginal", NULL);
// type 这里代表的是增加
self.source = dispatch_source_create(DISPATCH_SOURCE_TYPE_DATA_ADD, 0, 0, dispatch_get_main_queue());
dispatch_source_set_event_handler(self.source, ^{
NSLog(@"%@",[NSThread currentThread]);
// 获取下面设置的数据
NSUInteger value = dispatch_source_get_data(self.source);
self.totalComplete += value;
NSLog(@"进度: %.2f",self.totalComplete/100.0);
self.progressView.progress = self.totalComplete/100.0;
});
self.isRunning = YES;
dispatch_resume(self.source);
}
- (IBAction)didClickStartOrPauseAction:(id)sender {
if (self.isRunning) {
dispatch_suspend(self.source);
dispatch_suspend(self.queue);
NSLog(@"已经暂停");
self.isRunning = NO;
[sender setTitle:@"暂停中.." forState:UIControlStateNormal];
}else{
dispatch_resume(self.source);
dispatch_resume(self.queue);
NSLog(@"已经执行了");
self.isRunning = YES;
[sender setTitle:@"暂停中.." forState:UIControlStateNormal];
}
}
- (void)touchesBegan:(NSSet<UITouch *> *)touches withEvent:(UIEvent *)event{
NSLog(@"开始了");
for (int i= 0; i<100; i++) {
dispatch_async(self.queue, ^{
if (!self.isRunning) {
NSLog(@"已经暂停");
return;
}
sleep(1);
// 这里设置数据后,在上面获取
dispatch_source_merge_data(self.source, 1);
});
}
}
计时器,只写用法
-(instancetype)initWithTarget:(id )target withSelector:(SEL )selector withTime:(double )time withDelayTime:(double)delay
{
self = [super init];
if (self) {
self.queue = dispatch_get_global_queue(0, 0);
self.source = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, self.queue);
dispatch_time_t start = dispatch_time(DISPATCH_TIME_NOW, (delay * NSEC_PER_SEC));
// 计时器误差值,越小越精确,cpu负荷越大。0最精确
uint64_t leeway = 1 * NSEC_PER_SEC;
dispatch_source_set_timer(self.source, start, time * NSEC_PER_SEC, leeway);
dispatch_source_set_event_handler(self.source, ^{
if ([target respondsToSelector:selector]) {
[target performSelector:selector];
}
});
}
return self;
}
// 开始
-(void)start
{
dispatch_resume(self.source);
}
// 挂起
-(void)suspend
{
dispatch_suspend(self.source);
}
// 取消
-(void)cancle
{
dispatch_source_cancel(self.source);
}