Maven依赖
<dependency>
<groupId>io.shardingsphere</groupId>
<artifactId>sharding-jdbc-core</artifactId>
<version>3.0.0</version>
</dependency>
<!-- mysql 数据库驱动. -->
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>6.0.5</version>
</dependency>
<!-- 数据源 -->
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.0.26</version>
</dependency>
数据库表样例
CREATE SCHEMA IF NOT EXISTS `ds_0`;
CREATE TABLE IF NOT EXISTS `ds_0`.`t_order_0` (`order_id` BIGINT PRIMARY KEY AUTO_INCREMENT, `user_id` BIGINT NOT NULL, `status` VARCHAR(50));
CREATE TABLE IF NOT EXISTS `ds_0`.`t_order_1` (`order_id` BIGINT PRIMARY KEY AUTO_INCREMENT, `user_id` BIGINT NOT NULL, `status` VARCHAR(50));
CREATE TABLE IF NOT EXISTS `ds_0`.`t_order_item_0` (`item_id` INT NOT NULL, `order_id` BIGINT NOT NULL, `user_id` INT NOT NULL, PRIMARY KEY (`item_id`));
CREATE TABLE IF NOT EXISTS `ds_0`.`t_order_item_1` (`item_id` INT NOT NULL, `order_id` BIGINT NOT NULL, `user_id` INT NOT NULL, PRIMARY KEY (`item_id`));
CREATE SCHEMA IF NOT EXISTS `ds_1`;
CREATE TABLE IF NOT EXISTS `ds_1`.`t_order_0` (`order_id` BIGINT PRIMARY KEY AUTO_INCREMENT, `user_id` BIGINT NOT NULL, `status` VARCHAR(50));
CREATE TABLE IF NOT EXISTS `ds_1`.`t_order_1` (`order_id` BIGINT PRIMARY KEY AUTO_INCREMENT, `user_id` BIGINT NOT NULL, `status` VARCHAR(50));
CREATE TABLE IF NOT EXISTS `ds_1`.`t_order_item_0` (`item_id` INT NOT NULL, `order_id` BIGINT NOT NULL, `user_id` INT NOT NULL, PRIMARY KEY (`item_id`));
CREATE TABLE IF NOT EXISTS `ds_1`.`t_order_item_1` (`item_id` INT NOT NULL, `order_id` BIGINT NOT NULL, `user_id` INT NOT NULL, PRIMARY KEY (`item_id`));
代码样例
package com.xiaofeng.cn;
import com.alibaba.druid.pool.DruidDataSource;
import com.mysql.cj.jdbc.Driver;
import io.shardingsphere.api.config.ShardingRuleConfiguration;
import io.shardingsphere.api.config.TableRuleConfiguration;
import io.shardingsphere.api.config.strategy.InlineShardingStrategyConfiguration;
import io.shardingsphere.shardingjdbc.api.ShardingDataSourceFactory;
import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
/**
* 1,通过Druid构建数据源
* 2,通过ShardingDataSourceFactory 构建分片数据源.
* 3,编写测试例子: 通过DataSource获取到Connection.
*/
public class App {
public DataSource createDataSource(String url,String username,String pwd){
DruidDataSource ds = new DruidDataSource();
/**
* driver : 数据库驱动
* url : 数据库地址
* username/pwd : 账号和密码
*/
ds.setDriverClassName(Driver.class.getName());
ds.setUrl(url);
ds.setUsername(username);
ds.setPassword(pwd);
return ds;
}
public DataSource getShardingDataSource() throws SQLException {
//1,数据源集合: dataSourceMap
Map<String,DataSource> dataSourceMap = new HashMap<String, DataSource>();
//添加数据源.两个数据源ds_0和ds_1
dataSourceMap.put("ds_0", createDataSource("jdbc:mysql://127.0.0.1:3306/ds_0?nullNamePatternMatchesAll=true", "root", "xiaofeng_123"));
dataSourceMap.put("ds_1", createDataSource("jdbc:mysql://127.0.0.1:3306/ds_1?nullNamePatternMatchesAll=true", "root", "xiaofeng_123"));
/**
* 需要构建表规则
* 1,指定逻辑表
* 2,配置实际节点
* 3,指定主键字段
* 4,分库和分表的规则
*
* 表的分片策略 order_id
* 库的分片策略 user_id
*/
TableRuleConfiguration tableRuleConfiguration = new TableRuleConfiguration();
tableRuleConfiguration.setLogicTable("t_order");
tableRuleConfiguration.setActualDataNodes("ds_${0..1}.t_order_${0..1}");
tableRuleConfiguration.setKeyGeneratorColumnName("order_id");
tableRuleConfiguration.setDatabaseShardingStrategyConfig(new InlineShardingStrategyConfiguration("user_id","ds_${user_id%2}"));
tableRuleConfiguration.setTableShardingStrategyConfig(new InlineShardingStrategyConfiguration("order_id","t_order_${order_id%2}"));
ShardingRuleConfiguration shardingRuleConfiguration = new ShardingRuleConfiguration();
shardingRuleConfiguration.getTableRuleConfigs().add(tableRuleConfiguration);
DataSource ds = ShardingDataSourceFactory.createDataSource(dataSourceMap,shardingRuleConfiguration,new HashMap<String, Object>(),new Properties());
return ds;
}
public static void main(String[] args) throws SQLException {
App app = new App();
//1.获取DataSource
DataSource dataSource = app.getShardingDataSource();
//2.通过DataSource获取Connection
Connection connection = dataSource.getConnection();
//3.定义一条SQL语句
String sql = "insert into t_order(user_id,status) values (1005,'insert')";
// String sql = "insert into t_order(order_id,user_id,status) values (10,1007,'insert')";
//4.通过Connection获取到PreparedStatement
PreparedStatement preparedStatement = connection.prepareStatement(sql);
//5.执行SQL语句
preparedStatement.execute();
connection = dataSource.getConnection();
sql = "insert into t_order(user_id,status) values (1006,'insert')";
// sql = "insert into t_order(order_id,user_id,status) values (11,1007,'insert')";
preparedStatement = connection.prepareStatement(sql);
preparedStatement.execute();
//6. 关闭连接
preparedStatement.close();
connection.close();
/**
* 测来发现,sharding-sphere生成的自增主键是不连续的,且大多数是偶数,
* 以至于可以根据user_id分库,但是根据order_id分不了表。
*
* 为什么呢?
* Sharding-Sphere采用的是snowflake算法作为默认的分布式自增主键策略,用于保证
* 分布式情况下可以无中心化的生成不重复的自增序列.因此自增主键可以保证递增,但无法保证连续.
* 而snowflake算法的最后4位是在同一毫秒内的访问递增值.因此,如果毫秒内并发度不高,
* 最后4位为零的几率则很大.因此并发度不高的应用生成偶数主键的几率会更高.
*
*/
}
}