环境使用: Springboot 2.6.0 + Mybatis 2.1.4 + ShardingSphere 5.1.0 + Druid 1.1.22
相关的 Maven依赖
<dependency>
<groupId>org.apache.shardingsphere</groupId>
<artifactId>shardingsphere-jdbc-core-spring-boot-starter</artifactId>
<version>5.1.0</version>
</dependency>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
<version>2.1.4</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid</artifactId>
<version>1.1.22</version>
</dependency>
<dependency>
<groupId>org.apache.tomcat</groupId>
<artifactId>tomcat-dbcp</artifactId>
<version>10.0.16</version>
</dependency>
按照水平进行分表分库
db0与db1数据库以及表创建如下:
create database db0
use db0
DROP TABLE IF EXISTS `address`;
CREATE TABLE `address` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`student_id` bigint(20) NOT NULL COMMENT '学生id',
`name` int(64) NOT NULL COMMENT '学生地址',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
DROP TABLE IF EXISTS `student0`;
CREATE TABLE `student0` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`age` int(20) NOT NULL COMMENT '年龄',
`name` varchar(64) CHARACTER SET utf8mb4 DEFAULT NULL COMMENT '姓名',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
DROP TABLE IF EXISTS `student1`;
CREATE TABLE `student1` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`age` int(20) NOT NULL COMMENT '年龄',
`name` varchar(64) CHARACTER SET utf8mb4 DEFAULT NULL COMMENT '姓名',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
DROP TABLE IF EXISTS `student2`;
CREATE TABLE `student2` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`age` int(4) NOT NULL COMMENT '年龄',
`name` varchar(64) CHARACTER SET utf8mb4 DEFAULT NULL COMMENT '姓名',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
create database db1
use db1
DROP TABLE IF EXISTS `address`;
CREATE TABLE `address` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`student_id` bigint(20) NOT NULL COMMENT '学生id',
`name` int(64) NOT NULL COMMENT '学生地址',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
DROP TABLE IF EXISTS `student0`;
CREATE TABLE `student0` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`age` int(20) NOT NULL COMMENT '年龄',
`name` varchar(64) CHARACTER SET utf8mb4 DEFAULT NULL COMMENT '姓名',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
DROP TABLE IF EXISTS `student1`;
CREATE TABLE `student1` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`age` int(20) NOT NULL COMMENT '年龄',
`name` varchar(64) CHARACTER SET utf8mb4 DEFAULT NULL COMMENT '姓名',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
DROP TABLE IF EXISTS `student2`;
CREATE TABLE `student2` (
`id` bigint(20) NOT NULL AUTO_INCREMENT COMMENT '自增ID',
`age` int(4) NOT NULL COMMENT '年龄',
`name` varchar(64) CHARACTER SET utf8mb4 DEFAULT NULL COMMENT '姓名',
PRIMARY KEY (`id`)
) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=utf8mb4 COMMENT='学生';
项目的目录结构:
学生实体类:
@Data
public class Student {
private Long id;
private Integer age;
private String name;
}
学生Dao:
@Mapper
public interface StudentDao {
// 插入一条数据
int insert (Student student);
// 查询所以学生数据
List<Student> selectAll ();
// 通过表连接查询所以学生信息以及地址
Map<String, Object> studentInfo (Long id);
}
Service类:
@Service
public class StudentService implements IStudentService {
@Resource
StudentDao studentDao;
@Override
public int insert(Student student) {
return studentDao.insert(student);
}
@Override
public List<Student> selectAll() {
return studentDao.selectAll();
}
@Override
public Map<String, Object> studentInfo(Long id) {
return studentDao.studentInfo(id);
}
}
resources 下xml 文件:
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.example.shardingsphere.dao.StudentDao">
<insert id="insert" parameterType="com.example.shardingsphere.entity.Student">
INSERT INTO student (age, name) VALUES (#{age}, #{name})
</insert>
<select id="selectAll" resultType="com.example.shardingsphere.entity.Student">
select * from student
</select>
<select id="studentInfo" parameterType="java.lang.Long" resultType="java.util.Map">
select student.id as student_id, address.name as address_name,
student.name as student_name from `student`
join `address` on `student`.id = `address`.student_id
where student.id = #{id}
</select>
</mapper>
ShardingSphere配置文件如下:
# mybatis 的 xml文件路径
mybatis:
mapper-locations: classpath:/mapper/*.xml
spring:
shardingsphere:
# 打开sql 执行的日志输出
props:
sql-show: true
datasource:
# 配置第一个数据库连接
ds0:
driver-class-name: com.mysql.cj.jdbc.Driver
type: com.alibaba.druid.pool.DruidDataSource
password: chenpeng1998..
username: root
url: jdbc:mysql://localhost:3306/ds0?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=GMT&allowPublicKeyRetrieval=true
# 配置第二个数据库连接
ds1:
driver-class-name: com.mysql.cj.jdbc.Driver
type: com.alibaba.druid.pool.DruidDataSource
password: chenpeng1998..
username: root
url: jdbc:mysql://localhost:3306/ds1?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=GMT&allowPublicKeyRetrieval=true
# 记录你所使用的所有数据库
names: ds0,ds1
# 配置规则信息, 确定某一条数据在哪个数据库、表里进行插入或查询
rules:
sharding:
sharding-algorithms:
# 自定义的分库的规则
custom-db-inline:
props:
# 根据 id 进行分库
algorithm-expression: ds$->{id%2}
type: INLINE
# 自定义的分表的规则
custom-table-inline:
props:
# 根据 id 进行分表
algorithm-expression: student$->{id%3}
type: INLINE
# 自定义的分库的规则
address-db-inline:
props:
# 根据 student_id 进行分库
algorithm-expression: ds$->{student_id%2}
type: INLINE
# 选择主键生成的方式,本案例使用雪花算法进行生成id
key-generators:
snowflake:
type: SNOWFLAKE
props:
worker-id: 666
# 根据上面自定义的规则对表进行分库分表
tables:
# 对 address 表进行分库
address:
# 对 address 表进行分库, {0..1} 代表 可供选择的数据库为ds0和ds1
actual-data-nodes: ds$->{0..1}.address
# 利用address中的student_id 进行分库
database-strategy:
standard:
sharding-algorithm-name: address-db-inline
sharding-column: student_id
# 对 student 表进行分表分库
student:
# 对student的id 字段进行雪花算法生成
key-generate-strategy:
column: id
key-generator-name: snowflake
# 对 student 表进行分库分库, {0..1} 代表 可供选择的数据库为ds0和ds1
# {0..2} 代表可供选择的表为student0、student1和student2
actual-data-nodes: ds$->{0..1}.student$->{0..2}
# 利用student中的 id进行分库
database-strategy:
standard:
sharding-algorithm-name: custom-db-inline
sharding-column: id
# 利用student中的 id进行分表
table-strategy:
standard:
sharding-algorithm-name: custom-table-inline
sharding-column: id
# 启动ShardingSphere
enabled: true
Test 类下进行测试:
@RunWith(SpringRunner.class)
@SpringBootTest
public class TestApi {
@Resource
IStudentService studentService;
@Test
public void insertStudent() {
for(int i = 0 ; i < 6 ; i++) {
Student student = new Student();
student.setAge(18);
student.setName("chen");
studentService.insert(student);
}
}
@Test
public void selectAllTest() {
List<Student> studentList = studentService.selectAll();
System.out.println(studentList.size());
}
@Test
public void studentInfo() {
Long id = 871345639999406080L;
Map<String, Object> studentInfo = studentService.studentInfo(id);
System.out.println(studentInfo);
}
}
插入多条学生测试结果:从打印的记录中,6条学生记录分别插入了不同库、不同表中
查询所有学生测试结果:查询过程中对所有库所有表进行总计,一共54条
多表连接查询:对Student 表和 Address 表进行关联查询出学生信息以及地址信息
注意: 在多表进行关联查询时, Student表与Address表应该在一个数据库下,否则会报错。