springboot2.6.4集成sharding-jdbc5.1

412 阅读6分钟

前言:我这个分片算法是基于时间来做的分片健。可以对单表的时间范围进行配置,比如:想要3天一张表或者5天一张表,可以实现根据数据量动态配置。下面的片段代码,完全的代码在github上,[代码连接](yguanrong/sharding-jdbc (github.com))。 代码结构如下图:

image.png

一、添加依赖

<dependencies>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter</artifactId>
        <version>2.6.4</version>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-web</artifactId>
        <version>2.6.4</version>
    </dependency>
    <dependency>
        <groupId>org.springframework.cloud</groupId>
        <artifactId>spring-cloud-starter-netflix-eureka-client</artifactId>
        <version>3.1.4</version>
    </dependency>
    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-actuator</artifactId>
        <version>2.6.4</version>
    </dependency>
    <dependency>
        <groupId>org.springframework.cloud</groupId>
        <artifactId>spring-cloud-commons</artifactId>
        <version>3.1.5</version>
    </dependency>
    <dependency>
        <groupId>org.flywaydb</groupId>
        <artifactId>flyway-core</artifactId>
        <!--            <version>5.2.4</version>-->
    </dependency>

    <dependency>
        <groupId>org.springframework.cloud</groupId>
        <artifactId>spring-cloud-starter-openfeign</artifactId>
        <version>3.1.5</version>
    </dependency>

    <!-- https://mvnrepository.com/artifact/com.alibaba/druid -->
    <dependency>
        <groupId>com.alibaba</groupId>
        <artifactId>druid</artifactId>
        <version>1.2.20</version>
    </dependency>


    <dependency>
        <groupId>org.projectlombok</groupId>
        <artifactId>lombok</artifactId>
        <version>1.18.20</version>
    </dependency>

    <!--数据库连接为mysql -->
    <dependency>
        <groupId>mysql</groupId>
        <artifactId>mysql-connector-java</artifactId>
        <version>8.0.22</version>
    </dependency>

    <dependency>
        <groupId>org.apache.shardingsphere</groupId>
        <artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
        <version>5.1.0</version>
    </dependency>
    <!-- ShardingJDBC 5.1.0使用druid连接池需要加dbcp依赖 -->
    <dependency>
        <groupId>org.apache.tomcat</groupId>
        <artifactId>tomcat-dbcp</artifactId>
        <version>10.0.16</version>
    </dependency>

    <!-- MyBatis-Plus -->
    <dependency>
        <groupId>com.baomidou</groupId>
        <artifactId>mybatis-plus-boot-starter</artifactId>
        <version>3.3.1</version>
    </dependency>

    <dependency>
        <groupId>com.baomidou</groupId>
        <artifactId>mybatis-plus-generator</artifactId>
        <version>3.3.1</version>
    </dependency>

    <!-- Mybatis的分页插件 -->
    <dependency>
        <groupId>com.github.pagehelper</groupId>
        <artifactId>pagehelper-spring-boot-starter</artifactId>
        <version>1.3.0</version>
    </dependency>

    <dependency>
        <groupId>org.apache.commons</groupId>
        <artifactId>commons-lang3</artifactId>
        <version>3.9</version>
    </dependency>

    <dependency>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-freemarker</artifactId>
        <version>2.6.4</version>
    </dependency>
</dependencies>

添加配置文件

  • 数据库方面的配置默认正常添加即可。默认使用druid的连接池。
  • 分片规则相关的配置:

(1)

# 分表的实际表,物理表
spring.shardingsphere.rules.sharding.tables.t_user.actual-data-nodes=ifaas_client.t_user
# 分表的字段,依据该字段来分片
spring.shardingsphere.rules.sharding.tables.t_user.table-strategy.standard.sharding-column=create_time
# 分表算法名称
spring.shardingsphere.rules.sharding.tables.t_user.table-strategy.standard.sharding-algorithm-name=time-sharding-algorithm
# 分表的主键是,
spring.shardingsphere.rules.sharding.tables.t_user.key-generate-strategy.column=id
# 分表主键的生成逻辑,默认是雪花算法,防止主键冲突
spring.shardingsphere.rules.sharding.tables.t_user.key-generate-strategy.key-generator-name=snowflake

# 时间分表的类型,基于基础类型
spring.shardingsphere.rules.sharding.sharding-algorithms.time-sharding-algorithm.type=CLASS_BASED
# 分表的模式是标准模式
spring.shardingsphere.rules.sharding.sharding-algorithms.time-sharding-algorithm.props.strategy=standard
# 分表的算法名称,对应分片算法的类名TimeShardingAlgorithm(自定义的类),不能有大写,驼峰用短扛连接 - 
spring.shardingsphere.rules.sharding.sharding-algorithms.time-sharding-algorithm.props.algorithmClassName=com.yuzi.sharding.TimeShardingAlgorithm

server.port=9002
spring.application.name=ifaas-file

eureka.client.service-url.defaultZone = http://127.0.0.1:9527/eureka/
eureka.instance.lease-expiration-duration-in-seconds = 65
eureka.instance.lease-renewal-interval-in-seconds = 20
eureka.client.healthcheck.enabled = true
eureka.instance.prefer-ip-address = true

spring.main.allow-bean-definition-overriding=true
spring.main.allow-circular-references=true

spring.datasource.host=jdbc:mysql://localhost:3306
spring.datasource.url = ${spring.datasource.host}/ifaas_client?autoReconnect=true&useUnicode=true&characterEncoding=UTF-8&useSSL=false&serverTimezone=Asia/Shanghai
spring.datasource.password = 123456
spring.datasource.driver-class-name = com.mysql.cj.jdbc.Driver
spring.datasource.username = root

spring.datasource.hikari.maximum-pool-size=30
spring.datasource.hikari.minimum-idle=10
spring.datasource.hikari.max-lifetime=2000

spring.shardingsphere.props.sql.show=true
spring.shardingsphere.datasource.names=mydb
spring.shardingsphere.datasource.mydb.type=com.alibaba.druid.pool.DruidDataSource
spring.shardingsphere.datasource.mydb.url=${spring.datasource.host}/ifaas_client?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai
spring.shardingsphere.datasource.mydb.driver-class-name=com.mysql.cj.jdbc.Driver
spring.shardingsphere.datasource.mydb.username=root
spring.shardingsphere.datasource.mydb.password=123456
spring.shardingsphere.datasource.mydb.initial-size=5
spring.shardingsphere.datasource.mydb.min-idle=5
spring.shardingsphere.datasource.mydb.max-active=20
spring.shardingsphere.datasource.mydb.max-wait=60000
spring.shardingsphere.datasource.mydb.time-between-eviction-runs-millis=60000
spring.shardingsphere.datasource.mydb.min-evictable-idle-time-millis=300000
spring.shardingsphere.datasource.mydb.validation-query=SELECT 1 FROM DUAL
spring.shardingsphere.datasource.mydb.test-while-idle=true
spring.shardingsphere.datasource.mydb.test-on-borrow=false
spring.shardingsphere.datasource.mydb.test-on-return=false
spring.shardingsphere.datasource.mydb.pool-prepared-statements=true
spring.shardingsphere.datasource.mydb.max-pool-prepared-statement-per-connection-size=20
spring.shardingsphere.datasource.mydb.use-global-data-source-stat=true
spring.shardingsphere.datasource.mydb.connection-properties=druid.stat.mergeSql=true;druid.stat.slowSqlMillis=500

spring.shardingsphere.rules.sharding.tables.t_user.actual-data-nodes=ifaas_client.t_user
spring.shardingsphere.rules.sharding.tables.t_user.table-strategy.standard.sharding-column=create_time
spring.shardingsphere.rules.sharding.tables.t_user.table-strategy.standard.sharding-algorithm-name=time-sharding-algorithm
spring.shardingsphere.rules.sharding.tables.t_user.key-generate-strategy.column=id
spring.shardingsphere.rules.sharding.tables.t_user.key-generate-strategy.key-generator-name=snowflake
spring.shardingsphere.rules.sharding.sharding-algorithms.time-sharding-algorithm.type=CLASS_BASED
spring.shardingsphere.rules.sharding.sharding-algorithms.time-sharding-algorithm.props.strategy=standard
spring.shardingsphere.rules.sharding.sharding-algorithms.time-sharding-algorithm.props.algorithmClassName=com.yuzi.sharding.TimeShardingAlgorithm

mybatis-plus.mapper-locations=classpath*:mybatis/mapper/*.xml
mybatis-plus.type-aliases-package=cn.yuzi.entity
mybatis-plus.configuration.log-impl=org.apache.ibatis.logging.stdout.StdOutImpl

pagehelper.helperDialect=postgresql

spring.flyway.enabled= true
spring.flyway.encoding= UTF-8
spring.flyway.locations=classpath:db/migration
spring.flyway.url= jdbc:mysql://localhost:3306/ifaas_client?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai
spring.flyway.user= root
spring.flyway.password= 123456

3、编写分片算法,分表的核心逻辑

TimeShardingAlgorithm类,需要实现分片算法接口 StandardShardingAlgorithm。该分片算法是以时间为分片键进行的分片,其中分片的时间范围支持配置。

@Slf4j
public class TimeShardingAlgorithm implements StandardShardingAlgorithm<LocalDateTime> {


    /**
     * 表分片符号,例:t_contract_202201 中,分片符号为 "_"
     */
    private final String TABLE_SPLIT_SYMBOL = "_";


    /**
     * 精准分片
     * @param tableNames 对应分片库中所有分片表的集合
     * @param preciseShardingValue 分片键值,其中 logicTableName 为逻辑表,columnName 分片键,value 为从 SQL 中解析出来的分片键的值
     * @return 表名
     */
    @Override
    public String doSharding(Collection<String> tableNames, PreciseShardingValue<LocalDateTime> preciseShardingValue) {
        String logicTableName = preciseShardingValue.getLogicTableName();
        ShardingTableCacheEnum logicTable = ShardingTableCacheEnum.of(logicTableName);
        if (logicTable == null) {
            log.error(">>>>>>>>>> 【ERROR】数据表类型错误,请稍后重试,logicTableNames:{},logicTableName:{}",
                    ShardingTableCacheEnum.logicTableNames(), logicTableName);
            throw new IllegalArgumentException("数据表类型错误,请稍后重试");
        }

        // 判断命中的分表
        LocalDateTime dateTime = preciseShardingValue.getValue();
        String resultTableName = getActualTable(logicTable, dateTime);

        if (StringUtils.isBlank(resultTableName)){
            // 如果分表不存在就返回默认表
            resultTableName =  logicTableName + TABLE_SPLIT_SYMBOL + "0";
        }
        // 打印分片信息
        log.info(">>>>>>>>>> 【INFO】精确分片,逻辑表:{},物理表:{}", logicTableName, resultTableName);
        return resultTableName;
    }

    /**
     * 范围分片
     * @param tableNames 对应分片库中所有分片表的集合
     * @param rangeShardingValue 分片范围
     * @return 表名集合
     */
    @Override
    public Collection<String> doSharding(Collection<String> tableNames, RangeShardingValue<LocalDateTime> rangeShardingValue) {
        String logicTableName = rangeShardingValue.getLogicTableName();
        ShardingTableCacheEnum logicTable = ShardingTableCacheEnum.of(logicTableName);
        if (logicTable == null) {
            log.error(">>>>>>>>>> 【ERROR】逻辑表范围异常,请稍后重试,logicTableNames:{},logicTableName:{}",
                    ShardingTableCacheEnum.logicTableNames(), logicTableName);
            throw new IllegalArgumentException("逻辑表范围异常,请稍后重试");
        }

        // 循环计算分表范围
        Set<String> resultTableNames = getActualTableSet(logicTable,rangeShardingValue.getValueRange());
        if (CollectionUtils.isEmpty(resultTableNames)){
            // 不存在就返回默认表
            resultTableNames = new HashSet<>();
            resultTableNames.add(logicTableName + TABLE_SPLIT_SYMBOL + "0");
        }
        // 打印分片信息
        log.info(">>>>>>>>>> 【INFO】范围分片,逻辑表:{},物理表:{}", logicTableName, resultTableNames);
        return resultTableNames;
    }

    @Override
    public String getType() {
        return null;
    }

    @Override
    public void init() {

    }

    // --------------------------------------------------------------------------------------------------------------
    // 私有方法
    // --------------------------------------------------------------------------------------------------------------

    /**
     * 获取 最小分片值
     * @param tableNames 表名集合
     * @return 最小分片值
     */
    private LocalDateTime getLowerEndpoint(Collection<ShardingRecord> tableNames) {
        Optional<LocalDateTime> optional = tableNames.stream()
                .map(ShardingRecord::getStartTime)
                .min(Comparator.comparing(Function.identity()));
        if (optional.isPresent()) {
            return optional.get();
        } else {
            log.error(">>>>>>>>>> 【ERROR】获取数据最小分表失败,请稍后重试,tableName:{}", tableNames);
            throw new IllegalArgumentException("获取数据最小分表失败,请稍后重试");
        }
    }

    /**
     * 获取 最大分片值
     * @param tableNames 表名集合
     * @return 最大分片值
     */
    private LocalDateTime getUpperEndpoint(Collection<ShardingRecord> tableNames) {
        Optional<LocalDateTime> optional = tableNames.stream()
                .map(ShardingRecord::getEndTime)
                .max(Comparator.comparing(Function.identity()));
        if (optional.isPresent()) {
            return optional.get();
        } else {
            log.error(">>>>>>>>>> 【ERROR】获取数据最大分表失败,请稍后重试,tableName:{}", tableNames);
            throw new IllegalArgumentException("获取数据最大分表失败,请稍后重试");
        }
    }

    private String getActualTable(ShardingTableCacheEnum logicTable, LocalDateTime dateTime) {
        AtomicReference<String> tableName = new AtomicReference<>();
        long dateTimeLong = DateUtil.localDateTimeToLong(dateTime);
        logicTable.resultTableNamesCache().forEach(shardingRecord -> {
            if (DateUtil.localDateTimeToLong(shardingRecord.getStartTime()) <= dateTimeLong &&
                    DateUtil.localDateTimeToLong(shardingRecord.getEndTime()) >= dateTimeLong){
                tableName.set(shardingRecord.getActualTable());
            }
        });
        return tableName.get();
    }

    private Set<String> getActualTableSet(ShardingTableCacheEnum logicTable, Range<LocalDateTime> valueRange) {
        Set<String> tableNameList = new HashSet<>();
        // between and 的起始值
        boolean hasLowerBound = valueRange.hasLowerBound();
        boolean hasUpperBound = valueRange.hasUpperBound();

        // 获取最大值和最小值
        Set<ShardingRecord> tableNameCache = logicTable.resultTableNamesCache();
        LocalDateTime min = hasLowerBound ? valueRange.lowerEndpoint() :getLowerEndpoint(tableNameCache);
        LocalDateTime max = hasUpperBound ? valueRange.upperEndpoint() :getUpperEndpoint(tableNameCache);

        long minLong = DateUtil.localDateTimeToLong(min);
        long maxLong =  DateUtil.localDateTimeToLong(max);

        logicTable.resultTableNamesCache().forEach(shardingRecord -> {
            long startTimeLong = DateUtil.localDateTimeToLong(shardingRecord.getStartTime());
            long endTimeLong = DateUtil.localDateTimeToLong(shardingRecord.getEndTime());
            if (!(maxLong < startTimeLong || endTimeLong < minLong)){
                // 有相交
                tableNameList.add(shardingRecord.getActualTable());
            }
        });
        return tableNameList;
    }
}

自定义分片算法工具类

@Slf4j
public class ShardingAlgorithmTool {

    private static IShardingRecordService shardingRecordService;

    /** 表分片符号,例:t_user_202201 中,分片符号为 "_" */
    private static final String TABLE_SPLIT_SYMBOL = "_";

    /** 数据库配置 */
    private static final Environment ENV = SpringUtil.getApplicationContext().getEnvironment();
    private static final String DATASOURCE_URL = ENV.getProperty("spring.shardingsphere.datasource.mydb.url");
    private static final String DATASOURCE_USERNAME = ENV.getProperty("spring.shardingsphere.datasource.mydb.username");
    private static final String DATASOURCE_PASSWORD = ENV.getProperty("spring.shardingsphere.datasource.mydb.password");

    /**
     * 初始化分片管理service
     * @param service
     */
    public static void initService(IShardingRecordService service){
        shardingRecordService = service;
    }


    /**
     * 重载全部缓存
     */
    public static void tableNameCacheReloadAll() {
        Arrays.stream(ShardingTableCacheEnum.values()).forEach(ShardingAlgorithmTool::tableNameCacheReload);
    }

    /**
     * 重载指定分表缓存
     * @param logicTable 逻辑表,例:t_user
     */
    public static void tableNameCacheReload(ShardingTableCacheEnum logicTable) {
        // 读取数据库中所有表名
        List<ShardingRecord> tableNameList = getAllTableNameBySchema(logicTable);
        // 更新缓存、配置(原子操作)
        logicTable.atomicUpdateCacheAndActualDataNodes(tableNameList);
        // 删除旧的缓存(如果存在)
        logicTable.resultTableNamesCache().clear();
        // 写入新的缓存
        logicTable.resultTableNamesCache().addAll(tableNameList);
    }

    /**
     * 获取所有表名
     * @return 表名集合
     * @param logicTable 逻辑表
     */
    public static List<ShardingRecord> getAllTableNameBySchema(ShardingTableCacheEnum logicTable) {
        List<ShardingRecord> tableNames = new ArrayList<>();
        try {
            String logicTableName = logicTable.logicTableName();
            tableNames = shardingRecordService.queryAllShardingRecordByLogicName(logicTableName);
        } catch (Exception e) {
            log.error(">>>>>>>>>> 【ERROR】数据库连接失败,请稍后重试,原因:{}", e.getMessage(), e);
        }
        return tableNames;
    }

    /**
     * 动态更新配置 actualDataNodes
     *
     * @param logicTableName  逻辑表名
     * @param tableNamesCache 真实表名集合
     */
    public static void actualDataNodesRefresh(String logicTableName, List<ShardingRecord> tableNamesCache)  {
        try {
            if (CollectionUtils.isEmpty(tableNamesCache)){
                // 防止刚开始初始化的时候,没有表
                return;
            }
            // 获取数据分片节点
            String dbName = "mydb";
            log.info(">>>>>>>>>> 【INFO】更新分表配置,logicTableName:{},tableNamesCache:{}", logicTableName, tableNamesCache);

            // generate actualDataNodes
            String newActualDataNodes = tableNamesCache.stream().map(ShardingRecord::getActualTable).map(o -> String.format("%s.%s", dbName, o)).collect(Collectors.joining(","));
            ShardingSphereDataSource shardingSphereDataSource = SpringUtil.getBean(ShardingSphereDataSource.class);
            updateShardRuleActualDataNodes(shardingSphereDataSource, logicTableName, newActualDataNodes);
        }catch (Exception e){
            log.error("初始化 动态表单失败,原因:{}", e.getMessage(), e);
        }
    }


    // --------------------------------------------------------------------------------------------------------------
    // 私有方法
    // --------------------------------------------------------------------------------------------------------------


    /**
     * 刷新ActualDataNodes
     */
    private static void updateShardRuleActualDataNodes(ShardingSphereDataSource dataSource, String logicTableName, String newActualDataNodes) {
        // Context manager.
        ContextManager contextManager = dataSource.getContextManager();
        // Rule configuration.
        String schemaName = "logic_db";
        Collection<RuleConfiguration> newRuleConfigList = new LinkedList<>();
        Collection<RuleConfiguration> oldRuleConfigList = dataSource.getContextManager()
                .getMetaDataContexts()
                .getMetaData(schemaName)
                .getRuleMetaData()
                .getConfigurations();

        for (RuleConfiguration oldRuleConfig : oldRuleConfigList) {
            if (oldRuleConfig instanceof AlgorithmProvidedShardingRuleConfiguration) {

                // Algorithm provided sharding rule configuration
                AlgorithmProvidedShardingRuleConfiguration oldAlgorithmConfig = (AlgorithmProvidedShardingRuleConfiguration) oldRuleConfig;
                AlgorithmProvidedShardingRuleConfiguration newAlgorithmConfig = new AlgorithmProvidedShardingRuleConfiguration();

                // Sharding table rule configuration Collection
                Collection<ShardingTableRuleConfiguration> newTableRuleConfigList = new LinkedList<>();
                Collection<ShardingTableRuleConfiguration> oldTableRuleConfigList = oldAlgorithmConfig.getTables();

                oldTableRuleConfigList.forEach(oldTableRuleConfig -> {
                    if (logicTableName.equals(oldTableRuleConfig.getLogicTable())) {
                        ShardingTableRuleConfiguration newTableRuleConfig = new ShardingTableRuleConfiguration(oldTableRuleConfig.getLogicTable(), newActualDataNodes);
                        newTableRuleConfig.setTableShardingStrategy(oldTableRuleConfig.getTableShardingStrategy());
                        newTableRuleConfig.setDatabaseShardingStrategy(oldTableRuleConfig.getDatabaseShardingStrategy());
                        newTableRuleConfig.setKeyGenerateStrategy(oldTableRuleConfig.getKeyGenerateStrategy());

                        newTableRuleConfigList.add(newTableRuleConfig);
                    } else {
                        newTableRuleConfigList.add(oldTableRuleConfig);
                    }
                });

                newAlgorithmConfig.setTables(newTableRuleConfigList);
                newAlgorithmConfig.setAutoTables(oldAlgorithmConfig.getAutoTables());
                newAlgorithmConfig.setBindingTableGroups(oldAlgorithmConfig.getBindingTableGroups());
                newAlgorithmConfig.setBroadcastTables(oldAlgorithmConfig.getBroadcastTables());
                newAlgorithmConfig.setDefaultDatabaseShardingStrategy(oldAlgorithmConfig.getDefaultDatabaseShardingStrategy());
                newAlgorithmConfig.setDefaultTableShardingStrategy(oldAlgorithmConfig.getDefaultTableShardingStrategy());
                newAlgorithmConfig.setDefaultKeyGenerateStrategy(oldAlgorithmConfig.getDefaultKeyGenerateStrategy());
                newAlgorithmConfig.setDefaultShardingColumn(oldAlgorithmConfig.getDefaultShardingColumn());
                newAlgorithmConfig.setShardingAlgorithms(oldAlgorithmConfig.getShardingAlgorithms());
                newAlgorithmConfig.setKeyGenerators(oldAlgorithmConfig.getKeyGenerators());

                newRuleConfigList.add(newAlgorithmConfig);
            }
        }

        // update context
        contextManager.alterRuleConfiguration(schemaName, newRuleConfigList);
    }

    /**
     * 创建分表
     * @param logicTable 逻辑表
     * @param tableName 真实表名,例:t_user_1
     * @return 创建结果(true创建成功,false未创建)
     */
    public static boolean createShardingTable(ShardingTableCacheEnum logicTable, String tableName, LocalDateTime startTime, LocalDateTime endTime) {
        String index = tableName.replace(logicTable.logicTableName() + TABLE_SPLIT_SYMBOL,"");
        synchronized (logicTable.logicTableName().intern()) {
            // 缓存中无此表,则建表并添加缓存
            executeSql(Collections.singletonList("CREATE TABLE IF NOT EXISTS `" + tableName + "` LIKE `" + logicTable.logicTableName() + "`;"));
            // 添加记录
            ShardingRecord shardingRecord = new ShardingRecord();
            shardingRecord.setLogicTable(logicTable.logicTableName());
            shardingRecord.setActualTable(tableName);
            shardingRecord.setIndexNum(Long.valueOf(index));
            shardingRecord.setStartTime(startTime);
            shardingRecord.setEndTime(endTime);
            shardingRecordService.insert(shardingRecord);
            // 缓存重载
            tableNameCacheReload(logicTable);
        }
        return true;
    }

    /**
     * 执行SQL
     * @param sqlList SQL集合
     */
    private static void executeSql(List<String> sqlList) {
        if (StringUtils.isEmpty(DATASOURCE_URL) || StringUtils.isEmpty(DATASOURCE_USERNAME) || StringUtils.isEmpty(DATASOURCE_PASSWORD)) {
            log.error(">>>>>>>>>> 【ERROR】数据库连接配置有误,请稍后重试,URL:{}, username:{}, password:{}", DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD);
            throw new IllegalArgumentException("数据库连接配置有误,请稍后重试");
        }
        try (Connection conn = DriverManager.getConnection(DATASOURCE_URL, DATASOURCE_USERNAME, DATASOURCE_PASSWORD)) {
            try (Statement st = conn.createStatement()) {
                conn.setAutoCommit(false);
                for (String sql : sqlList) {
                    st.execute(sql);
                }
            } catch (Exception e) {
                conn.rollback();
                log.error(">>>>>>>>>> 【ERROR】数据表创建执行失败,请稍后重试,原因:{}", e.getMessage(), e);
                throw new IllegalArgumentException("数据表创建执行失败,请稍后重试");
            }
        } catch (SQLException e) {
            log.error(">>>>>>>>>> 【ERROR】数据库连接失败,请稍后重试,原因:{}", e.getMessage(), e);
            throw new IllegalArgumentException("数据库连接失败,请稍后重试");
        }
    }

}

创建和初始化分表的定时任务

@Component
@Slf4j
public class TableInitJob {

    @Resource
    private ConfigProperty configProperty;

    private final String TABLE_SPLIT_SYMBOL = "_";

    @Resource
    private IShardingRecordService shardingRecordService;

    @Scheduled(cron = "${sharding.table.init.job:0 0 2 * * ?}")
    public void createTableAndDeleteExpiredTable() {
        // 初始化未来的表-3天
        ShardingTableCacheEnum.logicTableNames().forEach(logicTable->{
            List<ShardingRecord> shardingRecords = shardingRecordService.queryAllShardingRecordByLogicName(logicTable);
            if (CollectionUtils.isEmpty(shardingRecords)){
                int index = 1;

                // 从开始时间开始往后创建,直到 endTime > 当前时间 + configProperty.shardingTimeDys 就结束
                LocalDateTime startTime = DateUtil.string2LocalDateTime(configProperty.getShardingStartTime(),null);
                LocalDateTime endTime = DateUtil.getEndDateTime(DateUtil.addDays(startTime, configProperty.getShardingTimeDys()));
                while (!endTime.isAfter(DateUtil.addDays(LocalDateTime.now(),configProperty.getShardingTimeDys()))){
                    String tableName = logicTable + TABLE_SPLIT_SYMBOL + index;
                    ShardingAlgorithmTool.createShardingTable(ShardingTableCacheEnum.of(logicTable),tableName,startTime,endTime);
                    startTime = DateUtil.addDays(startTime,configProperty.getShardingTimeDys() + 1);
                    endTime = DateUtil.addDays(endTime,configProperty.getShardingTimeDys() + 1);
                    index++;
                }

            }else {
                // 从最后时间往后开始创建 直到 endTime > 当前时间 + configProperty.shardingTimeDys 就结束
                ShardingRecord shardingRecord = shardingRecords.get(shardingRecords.size() - 1);
                int index = shardingRecord.getIndexNum().intValue() + 1;

                // 从开始时间开始往后创建,直到 endTime > 当前时间 + configProperty.shardingTimeDys 就结束
                LocalDateTime startTime = shardingRecord.getStartTime();
                LocalDateTime endTime = shardingRecord.getEndTime();

                while (!endTime.isAfter(DateUtil.addDays(LocalDateTime.now(),configProperty.getShardingTimeDys()))){
                    String tableName = logicTable + TABLE_SPLIT_SYMBOL + index;
                    startTime = DateUtil.addDays(startTime,configProperty.getShardingTimeDys() + 1);
                    endTime = DateUtil.addDays(endTime,configProperty.getShardingTimeDys() + 1);
                    ShardingAlgorithmTool.createShardingTable(ShardingTableCacheEnum.of(logicTable),tableName,startTime,endTime);
                    index++;
                }
            }

        });
    }

}