上文我们曾经分析了sharding-jdbc ShardingSphereConnection 和ShardingSphereDatasource 这两个类的源码和实现,接下去这篇文章会分析另两个jdbc这个包里最重要的类ShardingSphereStatement 和 ShardingSphereResultSet
ShardingSphereStatement
Statement是sharding-jdbc真正的执行器,所有逻辑都封装在 Statement 中。
Sharding-Jdbc 执行流程分析
- SimpleQueryShardingEngine(或 PreparedQueryShardingEngine):完成 SQL 解析、路由、改写,位于 sharding-jdbc-core 工程中。SimpleQueryShardingEngine 直接将路由的功能委托给 StatementRoutingEngine(或 PreparedQueryShardingEngine),本质是对 StatementRoutingEngine、SQLParseEngine、ShardingSQLRewriteEngine 的封装。
- StatementExecutor(或 PreparedStatementExecutor): 提供 SQL 执行的操作,位于 sharding-jdbc-core 工程中。本质是对 ShardingExecuteEngine 的封装。
- StatementRoutingEngine:SQL 路由引擎,位于 sharding-core-route 工程中。路由引擎包装了 SQL 解析、路由、改写三点。SQL 路由分两步,先进行数据分片路由(ShardingRouter),再进行主从路由(ShardingMasterSlaveRouter)。
- SQLParseEngine:SQL 解析引擎,位于 shardingsphere-sql-parser 工程中。目前有 MySQL和 PostgreSQL 两种。
- ShardingSQLRewriteEngine:SQL 改写引擎,位于 sharding-core-rewrite 工程中。
- ShardingExecuteEngine:执行引擎,位于 sharding-core-execute 工程中。StatementExecutor 对
- MergeEngine:结果合并引擎,位于 sharding-core-merge 工程中。
ShardingStatement 内部有三个核心的类,一是 SimpleQueryShardingEngine 完成 SQL 解析、路由、改写;一是 StatementExecutor 进行 SQL 执行;最后调用 MergeEngine 对结果进行合并处理。
public ShardingSphereStatement(final ShardingSphereConnection connection) {
this(connection, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT);
}
public ShardingSphereStatement(final ShardingSphereConnection connection, final int resultSetType, final int resultSetConcurrency) {
this(connection, resultSetType, resultSetConcurrency, ResultSet.HOLD_CURSORS_OVER_COMMIT);
}
public ShardingSphereStatement(final ShardingSphereConnection connection, final int resultSetType, final int resultSetConcurrency, final int resultSetHoldability) {
super(Statement.class);
this.connection = connection;
metaDataContexts = connection.getMetaDataContexts();
statements = new LinkedList<>();
statementOption = new StatementOption(resultSetType, resultSetConcurrency, resultSetHoldability);
JDBCExecutor jdbcExecutor = new JDBCExecutor(metaDataContexts.getExecutorEngine(), connection.isHoldTransaction());
driverJDBCExecutor = new DriverJDBCExecutor(connection.getDataSourceMap(), metaDataContexts, jdbcExecutor);
rawExecutor = new RawExecutor(metaDataContexts.getExecutorEngine(), connection.isHoldTransaction());
kernelProcessor = new KernelProcessor();
}
初始化过程中调用了
public final class JDBCExecutor {
private final ExecutorEngine executorEngine;
private final boolean serial;
public <T> List<T> execute(final Collection<ExecutionGroup<JDBCExecutionUnit>> executionGroups, final JDBCExecutorCallback<T> callback) throws SQLException {
return execute(executionGroups, null, callback);
}
public <T> List<T> execute(final Collection<ExecutionGroup<JDBCExecutionUnit>> executionGroups,
final JDBCExecutorCallback<T> firstCallback, final JDBCExecutorCallback<T> callback) throws SQLException {
try {
return executorEngine.execute(executionGroups, firstCallback, callback, serial);
} catch (final SQLException ex) {
SQLExecutorExceptionHandler.handleException(ex);
return Collections.emptyList();
}
}
}
JDBCExecutor作为执行类中,调用了
ExecutorEngine,ExecutorEngine包含了serialExecute ,parallelExecute 单线程执行,多线程执行等多个执行方法。
public final class ExecutorEngine implements AutoCloseable {
private final ExecutorServiceManager executorServiceManager;
public ExecutorEngine(final int executorSize) {
executorServiceManager = new ExecutorServiceManager(executorSize);
}
public <I, O> List<O> execute(final Collection<ExecutionGroup<I>> executionGroups, final ExecutorCallback<I, O> callback) throws SQLException {
return execute(executionGroups, null, callback, false);
}
public <I, O> List<O> execute(final Collection<ExecutionGroup<I>> executionGroups,
final ExecutorCallback<I, O> firstCallback, final ExecutorCallback<I, O> callback, final boolean serial) throws SQLException {
if (executionGroups.isEmpty()) {
return Collections.emptyList();
}
return serial ? serialExecute(executionGroups.iterator(), firstCallback, callback) : parallelExecute(executionGroups.iterator(), firstCallback, callback);
}
private <I, O> List<O> serialExecute(final Iterator<ExecutionGroup<I>> executionGroups, final ExecutorCallback<I, O> firstCallback, final ExecutorCallback<I, O> callback) throws SQLException {
ExecutionGroup<I> firstInputs = executionGroups.next();
List<O> result = new LinkedList<>(syncExecute(firstInputs, null == firstCallback ? callback : firstCallback));
while (executionGroups.hasNext()) {
result.addAll(syncExecute(executionGroups.next(), callback));
}
return result;
}
private <I, O> List<O> parallelExecute(final Iterator<ExecutionGroup<I>> executionGroups, final ExecutorCallback<I, O> firstCallback, final ExecutorCallback<I, O> callback) throws SQLException {
ExecutionGroup<I> firstInputs = executionGroups.next();
Collection<ListenableFuture<Collection<O>>> restResultFutures = asyncExecute(executionGroups, callback);
return getGroupResults(syncExecute(firstInputs, null == firstCallback ? callback : firstCallback), restResultFutures);
}
该方法会根据初始化的 serial参数选择不同的多线程或者单线程执行方法
if (executionGroups.isEmpty()) {
return Collections.emptyList();
}
return serial ? serialExecute(executionGroups.iterator(), firstCallback, callback) : parallelExecute(executionGroups.iterator(), firstCallback, callback);
在serialExecute中还有异步和同步执行的区别
private <I, O> List<O> serialExecute(final Iterator<ExecutionGroup<I>> executionGroups, final ExecutorCallback<I, O> firstCallback, final ExecutorCallback<I, O> callback) throws SQLException {
ExecutionGroup<I> firstInputs = executionGroups.next();
List<O> result = new LinkedList<>(syncExecute(firstInputs, null == firstCallback ? callback : firstCallback));
while (executionGroups.hasNext()) {
result.addAll(syncExecute(executionGroups.next(), callback));
}
return result;
}
executeQuery 执行过程
首先创建了
private ExecutionContext createExecutionContext(final String sql) throws SQLException {
clearStatements();
LogicSQL logicSQL = createLogicSQL(sql);
SQLCheckEngine.check(logicSQL.getSqlStatementContext().getSqlStatement(), logicSQL.getParameters(), metaDataContexts.getDefaultMetaData(), metaDataContexts.getAuthentication());
return kernelProcessor.generateExecutionContext(logicSQL, metaDataContexts.getDefaultMetaData(), metaDataContexts.getProps());
}
LogicSQL
@RequiredArgsConstructor
@Getter
public final class LogicSQL {
private final SQLStatementContext<?> sqlStatementContext;
private final String sql;
private final List<Object> parameters;
}
LogicSQL包含SQLStatementContext的上下文和 sql以及参数
private LogicSQL createLogicSQL(final String sql) {
ShardingSphereSchema schema = metaDataContexts.getDefaultMetaData().getSchema();
ShardingSphereSQLParserEngine sqlParserEngine = new ShardingSphereSQLParserEngine(
DatabaseTypeRegistry.getTrunkDatabaseTypeName(metaDataContexts.getDefaultMetaData().getResource().getDatabaseType()));
SQLStatement sqlStatement = sqlParserEngine.parse(sql, false);
SQLStatementContext<?> sqlStatementContext = SQLStatementContextFactory.newInstance(schema, Collections.emptyList(), sqlStatement);
return new LogicSQL(sqlStatementContext, sql, Collections.emptyList());
}
private Collection<ExecutionGroup<JDBCExecutionUnit>> createExecutionGroups() throws SQLException {
int maxConnectionsSizePerQuery = metaDataContexts.getProps().<Integer>getValue(ConfigurationPropertyKey.MAX_CONNECTIONS_SIZE_PER_QUERY);
DriverExecutionPrepareEngine<JDBCExecutionUnit, Connection> prepareEngine = new DriverExecutionPrepareEngine<>(
JDBCDriverType.STATEMENT, maxConnectionsSizePerQuery, connection, statementOption, metaDataContexts.getDefaultMetaData().getRuleMetaData().getRules());
return prepareEngine.prepare(executionContext.getRouteContext(), executionContext.getExecutionUnits());
}
private Collection<ExecutionGroup<RawSQLExecutionUnit>> createRawExecutionGroups() throws SQLException {
int maxConnectionsSizePerQuery = metaDataContexts.getProps().<Integer>getValue(ConfigurationPropertyKey.MAX_CONNECTIONS_SIZE_PER_QUERY);
return new RawExecutionPrepareEngine(maxConnectionsSizePerQuery, metaDataContexts.getDefaultMetaData().getRuleMetaData().getRules())
.prepare(executionContext.getRouteContext(), executionContext.getExecutionUnits());
}
private void cacheStatements(final Collection<ExecutionGroup<JDBCExecutionUnit>> executionGroups) {
for (ExecutionGroup<JDBCExecutionUnit> each : executionGroups) {
statements.addAll(each.getInputs().stream().map(JDBCExecutionUnit::getStorageResource).collect(Collectors.toList()));
}
replay();
}
接下来才是执行的方法
@Override
public ResultSet executeQuery(final String sql) throws SQLException {
if (Strings.isNullOrEmpty(sql)) {
throw new SQLException(SQLExceptionConstant.SQL_STRING_NULL_OR_EMPTY);
}
ResultSet result;
try {
executionContext = createExecutionContext(sql);
List<QueryResult> queryResults = executeQuery0();
MergedResult mergedResult = mergeQuery(queryResults);
result = new ShardingSphereResultSet(getResultSetsForShardingSphereResultSet(), mergedResult, this, executionContext);
} finally {
currentResultSet = null;
}
currentResultSet = result;
return result;
}