为了解决查询任务太大导致的服务无法一次性返回全部数据的问题,异步调用用到了CompletableFuture 和 reactor 两种并行框架,借鉴了ForkJoin 的思想,并且试用guava 对任务集合拆分Lists#partition 只能拆分List类型其他类型更加业务自行拆分,CompletableFuture 试用时需要指定线程池,不知道会默认试用ForkJoinPool.commonPool() 线程池,如parallelStream并行流也是试用的ForkJoinPool.commonPool() 线程池
CompletableFuture 是JDK8并行包中提供的API:CompletableFuture对Future进行了扩展,可以通过设置回调的方式处理计算结果,同时也支持组合操作,支持进一步的编排,同时一定程度解决了回调地狱的问题
Reactor是这些年比较流行的响应式编程,现在Spring WebFlux 中有用到,还有在高德的技术文章中看到有落地项目
package com.example.dragon.org.utils;
import com.google.common.collect.Lists;
import org.apache.commons.collections4.CollectionUtils;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Executor;
import java.util.concurrent.ForkJoinPool;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* @author Administrator
*/
public class CompletableFutureUtils {
private CompletableFutureUtils() {}
/**
* 默认批次大小50
*/
private static final Integer DEFAULT_BATCH_SIZE = 50;
private static final Scheduler SCHEDULER = Schedulers.elastic();
public static <T> List<T> callBatchJoin(Collection<Long> ids, Function<Collection<Long>, List<T>> callFunc) {
List<T> result = new ArrayList<>();
// 去重一下
List<Long> idList = ids.stream().filter(Objects::nonNull).distinct().collect(Collectors.toList());
if (CollectionUtils.isEmpty(idList)) {
return result;
}
// 批量大小默认是50
return exec(callFunc, idList, DEFAULT_BATCH_SIZE, null);
}
/**
*
* 批量调用每次100条
* CompletableFutureUtils.callBatchJoin(inputDTO.getOrderIds(), (ids) -> {
* SignOffOrderQueryInputDTO newInput = new SignOffOrderQueryInputDTO();
* newInput.setOrderIds(new ArrayList<>(ids));
* return Optional.ofNullable(clinicalOrderQueryRpcService.querySignOffOrderByExample(newInput)).map(WinRpcResponse::getData).orElse(Lists.newArrayList());
* }, 100);
*
* @param ids ids
* @param callFunc callFunc
* @param batchSize batchSize
* @param <T>
* @return
*/
public static <T> List<T> callBatchJoin(Collection<Long> ids, Function<Collection<Long>, List<T>> callFunc, Integer batchSize) {
List<T> result = new ArrayList<>();
// 去重一下
List<Long> idList = ids.stream().filter(Objects::nonNull).distinct().collect(Collectors.toList());
if (CollectionUtils.isEmpty(idList)) {
return result;
}
// 批量大小默认是50
batchSize = Optional.ofNullable(batchSize).orElse(DEFAULT_BATCH_SIZE);
return exec(callFunc, idList, batchSize, null);
}
public static <T> List<T> callBatchJoin(Collection<Long> ids, Function<Collection<Long>, List<T>> callFunc, Integer batchSize, Executor executorPool) {
List<T> result = new ArrayList<>();
// 去重一下
List<Long> idList = ids.stream().filter(Objects::nonNull).distinct().collect(Collectors.toList());
if (CollectionUtils.isEmpty(idList)) {
return result;
}
// 批量大小默认是50
batchSize = Optional.ofNullable(batchSize).orElse(DEFAULT_BATCH_SIZE);
return fluxExec(callFunc, idList, batchSize, executorPool);
}
private static <T> List<T> exec(Function<Collection<Long>, List<T>> callFunc, List<Long> idList, Integer batchSize, Executor executorPool) {
List<List<Long>> partition = Lists.partition(idList, batchSize);
Executor completableFuturePool = Optional.ofNullable(executorPool).orElse(ThreadPoolUtil.getThreadPoolExecutor());
List<CompletableFuture<List<T>>> completableFutures = new ArrayList<>();
for (List<Long> subList : partition) {
// 异步执行结果
completableFutures.add(CompletableFuture.supplyAsync(() -> callFunc.apply(subList), Optional.ofNullable(completableFuturePool).orElse(ForkJoinPool.commonPool())));
}
// 等待所有执行结果
CompletableFuture.allOf(completableFutures.toArray(new CompletableFuture[partition.size()]));
// 合并所有结果
return completableFutures.stream().map(CompletableFuture::join).filter(CollectionUtils::isNotEmpty).flatMap(Collection::stream).collect(Collectors.toList());
}
public static <T> List<T> fluxExec(Function<Collection<Long>, List<T>> callFunc, List<Long> idList, Integer batchSize, Executor executorPool) {
List<List<Long>> partition = Lists.partition(idList, batchSize);
List<List<T>> listList = Flux.fromIterable(partition)
.parallel(10)
.runOn(SCHEDULER)
.flatMap(ids -> Mono.fromSupplier(() -> callFunc.apply(ids)))
.sequential()
.collectList()
.block();
if (CollectionUtils.isEmpty(listList)) {
return Collections.emptyList();
}
// 合并所有结果
return listList.stream().flatMap(Collection::stream).collect(Collectors.toList());
}
}
参考: