在处理分布式事务的过程中,TM(事务发起者)向TC请求开启分布式事务,TM和RM(事务参与者)向TC(seata-server)请求注册分支事务并报告一阶段的执行结果,那么,他们之间是如何进行交互的呢?接下来就介绍一下TM、RM和TC的通信机制。
RM、TM与TC之间是通过netty进行通信的。
TC在启动时,会初始化一个netty server,用于接受RM和TM的请求;引入了seata依赖的服务在启动时,会启动一个RM client和一个TM client。一个服务可以同时是TM和RM。
1 netty client的启动流程
GlobalTransactionScanner实现了InitializingBean的afterPropertiesSet初始化方法,spring容器会在bean初始化的时候回调该方法。
在GlobalTransactionScanner.afterPropertiesSet中,初始化了一个netty 客户端。
public class GlobalTransactionScanner extends AbstractAutoProxyCreator
implements InitializingBean, ApplicationContextAware,
DisposableBean {
public void afterPropertiesSet() {
if (disableGlobalTransaction) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Global transaction is disabled.");
}
return;
}
//初始化netty客户端 TM RM
initClient();
}
private void initClient() {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Initializing Global Transaction Clients ... ");
}
if (StringUtils.isNullOrEmpty(applicationId) || StringUtils.isNullOrEmpty(txServiceGroup)) {
throw new IllegalArgumentException(String.format("applicationId: %s, txServiceGroup: %s", applicationId, txServiceGroup));
}
/**
* TM (Transaction Manager):全局事务管理器,控制全局事务边界,负责全局事务开启、全局提交、全局回滚。
* RM (Resource Manager):资源管理器,控制分支事务,负责分支注册、状态汇报,并接收事务协调器的指令,驱动分支(本地)事务的提交和回滚。
* TC (Transaction Coordinator):事务协调器,维护全局事务的运行状态,负责协调并驱动全局事务的提交或回滚。
*/
//初始化TM
TMClient.init(applicationId, txServiceGroup);
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Transaction Manager Client is initialized. applicationId[{}] txServiceGroup[{}]", applicationId, txServiceGroup);
}
//初始化RM
RMClient.init(applicationId, txServiceGroup);
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Resource Manager is initialized. applicationId[{}] txServiceGroup[{}]", applicationId, txServiceGroup);
}
if (LOGGER.isInfoEnabled()) {
LOGGER.info("Global Transaction Clients are initialized. ");
}
registerSpringShutdownHook();
}
}
一个服务,既可以是TM,也可以是RM
1.1 TM客户端
TMClient.init
public class TMClient {
public static void init(String applicationId, String transactionServiceGroup) {
//创建TM客户端
TmRpcClient tmRpcClient = TmRpcClient.getInstance(applicationId, transactionServiceGroup);
//TM客户端初始化
tmRpcClient.init();
}
}
TmRpcClient
public final class TmRpcClient extends AbstractRpcRemotingClient {
private TmRpcClient(NettyClientConfig nettyClientConfig,
EventExecutorGroup eventExecutorGroup,
ThreadPoolExecutor messageExecutor) {
//父类构造器创建seata封装的netty客户端启动引导类
super(nettyClientConfig, eventExecutorGroup, messageExecutor, NettyPoolKey.TransactionRole.TMROLE);
}
public static TmRpcClient getInstance(String applicationId, String transactionServiceGroup) {
//获取单例对象
TmRpcClient tmRpcClient = getInstance();
tmRpcClient.setApplicationId(applicationId);
tmRpcClient.setTransactionServiceGroup(transactionServiceGroup);
return tmRpcClient;
}
//DCL创建对象
public static TmRpcClient getInstance() {
if (null == instance) {
synchronized (TmRpcClient.class) {
if (null == instance) {
NettyClientConfig nettyClientConfig = new NettyClientConfig();
final ThreadPoolExecutor messageExecutor = new ThreadPoolExecutor(
nettyClientConfig.getClientWorkerThreads(), nettyClientConfig.getClientWorkerThreads(),
KEEP_ALIVE_TIME, TimeUnit.SECONDS,
new LinkedBlockingQueue<>(MAX_QUEUE_SIZE),
new NamedThreadFactory(nettyClientConfig.getTmDispatchThreadPrefix(),
nettyClientConfig.getClientWorkerThreads()),
RejectedPolicies.runsOldestTaskPolicy());
instance = new TmRpcClient(nettyClientConfig, null, messageExecutor);
}
}
}
return instance;
}
public void init() {
if (initialized.compareAndSet(false, true)) {
enableDegrade = CONFIG.getBoolean(ConfigurationKeys.SERVICE_PREFIX + ConfigurationKeys.ENABLE_DEGRADE_POSTFIX);
//父类的初始化方法
super.init();
}
}
}
AbstractRpcRemotingClient.init
public abstract class AbstractRpcRemotingClient extends AbstractRpcRemoting
implements RegisterMsgListener, ClientMessageSender {
public AbstractRpcRemotingClient(NettyClientConfig nettyClientConfig, EventExecutorGroup eventExecutorGroup,
ThreadPoolExecutor messageExecutor, NettyPoolKey.TransactionRole transactionRole) {
super(messageExecutor);
this.transactionRole = transactionRole;
//创建netty客户端启动引导类
clientBootstrap = new RpcClientBootstrap(nettyClientConfig, eventExecutorGroup, transactionRole);
clientChannelManager = new NettyClientChannelManager(
new NettyPoolableFactory(this, clientBootstrap), getPoolKeyFunction(), nettyClientConfig);
}
public void init() {
//seata封装的netty客户端启动引导类
clientBootstrap.setChannelHandlers(new ClientHandler());
//定义netty客户端启动流程
clientBootstrap.start();
//启动一个重连的定时任务
timerExecutor.scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
clientChannelManager.reconnect(getTransactionServiceGroup());
}
}, SCHEDULE_DELAY_MILLS, SCHEDULE_INTERVAL_MILLS, TimeUnit.MILLISECONDS);
if (NettyClientConfig.isEnableClientBatchSendRequest()) {
mergeSendExecutorService = new ThreadPoolExecutor(MAX_MERGE_SEND_THREAD,
MAX_MERGE_SEND_THREAD,
KEEP_ALIVE_TIME, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<>(),
new NamedThreadFactory(getThreadPrefix(), MAX_MERGE_SEND_THREAD));
mergeSendExecutorService.submit(new MergedSendRunnable());
}
super.init();
}
}
RpcClientBootstrap.start netty client的启动流程
public class RpcClientBootstrap implements RemotingClient {
public RpcClientBootstrap(NettyClientConfig nettyClientConfig, final EventExecutorGroup eventExecutorGroup,
NettyPoolKey.TransactionRole transactionRole) {
if (null == nettyClientConfig) {
nettyClientConfig = new NettyClientConfig();
if (LOGGER.isInfoEnabled()) {
LOGGER.info("use default netty client config.");
}
}
this.nettyClientConfig = nettyClientConfig;
int selectorThreadSizeThreadSize = this.nettyClientConfig.getClientSelectorThreadSize();
this.transactionRole = transactionRole;
//创建nio事件循环组
this.eventLoopGroupWorker = new NioEventLoopGroup(selectorThreadSizeThreadSize,
new NamedThreadFactory(getThreadPrefix(this.nettyClientConfig.getClientSelectorThreadPrefix()),
selectorThreadSizeThreadSize));
this.defaultEventExecutorGroup = eventExecutorGroup;
}
//定义netty client启动流程 还没一连接netty server
public void start() {
if (this.defaultEventExecutorGroup == null) {
this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(nettyClientConfig.getClientWorkerThreads(),
new NamedThreadFactory(getThreadPrefix(nettyClientConfig.getClientWorkerThreadPrefix()),
nettyClientConfig.getClientWorkerThreads()));
}
//设置事件循环组
this.bootstrap.group(this.eventLoopGroupWorker).channel(
//设置channel的类 决定了io模型
nettyClientConfig.getClientChannelClazz()).option(
//设置tcp连接参数
ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_KEEPALIVE, true).option(
ChannelOption.CONNECT_TIMEOUT_MILLIS, nettyClientConfig.getConnectTimeoutMillis()).option(
ChannelOption.SO_SNDBUF, nettyClientConfig.getClientSocketSndBufSize()).option(ChannelOption.SO_RCVBUF,
nettyClientConfig.getClientSocketRcvBufSize());
if (nettyClientConfig.enableNative()) {
if (PlatformDependent.isOsx()) {
if (LOGGER.isInfoEnabled()) {
LOGGER.info("client run on macOS");
}
} else {
bootstrap.option(EpollChannelOption.EPOLL_MODE, EpollMode.EDGE_TRIGGERED)
.option(EpollChannelOption.TCP_QUICKACK, true);
}
}
//设置client处理请求的handler
bootstrap.handler(
new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(
//心跳处理器
new IdleStateHandler(nettyClientConfig.getChannelMaxReadIdleSeconds(),
nettyClientConfig.getChannelMaxWriteIdleSeconds(),
nettyClientConfig.getChannelMaxAllIdleSeconds()))
//解码器
.addLast(new ProtocolV1Decoder())
//编码器
.addLast(new ProtocolV1Encoder());
if (null != channelHandlers) {
//将处理器添加到流水线中 默认只有一个ClientHandler
addChannelPipelineLast(ch, channelHandlers);
}
}
});
if (initialized.compareAndSet(false, true) && LOGGER.isInfoEnabled()) {
LOGGER.info("RpcClientBootstrap has started");
}
}
/**
* Gets new channel.
* h创建到指定服务器的连接
* @param address the address
* @return the new channel
*/
public Channel getNewChannel(InetSocketAddress address) {
Channel channel;
ChannelFuture f = this.bootstrap.connect(address);
try {
f.await(this.nettyClientConfig.getConnectTimeoutMillis(), TimeUnit.MILLISECONDS);
if (f.isCancelled()) {
throw new FrameworkException(f.cause(), "connect cancelled, can not connect to services-server.");
} else if (!f.isSuccess()) {
throw new FrameworkException(f.cause(), "connect failed, can not connect to services-server.");
} else {
channel = f.channel();
}
} catch (Exception e) {
throw new FrameworkException(e, "can not connect to services-server.");
}
return channel;
}
}
1.2 RM客户端
RMClient.init
public class RMClient {
public static void init(String applicationId, String transactionServiceGroup) {
//创建一个RM客户端
RmRpcClient rmRpcClient = RmRpcClient.getInstance(applicationId, transactionServiceGroup);
rmRpcClient.setResourceManager(DefaultResourceManager.get());
rmRpcClient.setClientMessageListener(new RmMessageListener(DefaultRMHandler.get(), rmRpcClient));
//RM客户端初始化
rmRpcClient.init();
}
}
RmRpcClient
public final class RmRpcClient extends AbstractRpcRemotingClient {
public static RmRpcClient getInstance(String applicationId, String transactionServiceGroup) {
RmRpcClient rmRpcClient = getInstance();
rmRpcClient.setApplicationId(applicationId);
rmRpcClient.setTransactionServiceGroup(transactionServiceGroup);
return rmRpcClient;
}
public static RmRpcClient getInstance() {
if (null == instance) {
synchronized (RmRpcClient.class) {
if (null == instance) {
NettyClientConfig nettyClientConfig = new NettyClientConfig();
final ThreadPoolExecutor messageExecutor = new ThreadPoolExecutor(
nettyClientConfig.getClientWorkerThreads(), nettyClientConfig.getClientWorkerThreads(),
KEEP_ALIVE_TIME, TimeUnit.SECONDS, new LinkedBlockingQueue<>(MAX_QUEUE_SIZE),
new NamedThreadFactory(nettyClientConfig.getRmDispatchThreadPrefix(),
nettyClientConfig.getClientWorkerThreads()), new ThreadPoolExecutor.CallerRunsPolicy());
instance = new RmRpcClient(nettyClientConfig, null, messageExecutor);
}
}
}
return instance;
}
}
RM 的netty client启动和TM类似,这里就不做赘述了。
到此,RM和TM的netty client就启动完成了。
2 netty server启动流程
netty server去启动入口是Server#main。
public class Server {
public static void main(String[] args) throws IOException {
ParameterParser parameterParser = new ParameterParser(args);
//initialize the metrics
MetricsManager.get().init();
//理解为将store.moded的value存在内存中
System.setProperty(ConfigurationKeys.STORE_MODE, parameterParser.getStoreMode());
/**
* 包装ServerBootstrap对象 并且初始化了一些netty启动的必要信息
*/
RpcServer rpcServer = new RpcServer(WORKING_THREADS);
//server port
rpcServer.setListenPort(parameterParser.getPort());
//初始化uuid生成器
UUIDGenerator.init(parameterParser.getServerNode());
//log store mode : file, db
//初始化session管理器
SessionHolder.init(parameterParser.getStoreMode());
DefaultCoordinator coordinator = new DefaultCoordinator(rpcServer);
//初始化协调者 定时任务
coordinator.init();
//协调者作为handler设置到netty
rpcServer.setHandler(coordinator);
// register ShutdownHook
ShutdownHook.getInstance().addDisposable(coordinator);
ShutdownHook.getInstance().addDisposable(rpcServer);
//127.0.0.1 and 0.0.0.0 are not valid here.
if (NetUtil.isValidIp(parameterParser.getHost(), false)) {
XID.setIpAddress(parameterParser.getHost());
} else {
XID.setIpAddress(NetUtil.getLocalIp());
}
XID.setPort(rpcServer.getListenPort());
try {
/**
* 启动netty server
*/
rpcServer.init();
} catch (Throwable e) {
LOGGER.error("rpcServer init error:{}", e.getMessage(), e);
System.exit(-1);
}
//netty server启动后会阻塞等待
System.exit(0);
}
}
在main方法中,首先创建RpcServer,在RpcServer构造器中,会初始化netty server的bossGroup和workerGroup;然后调用rpcServer.setHandler(coordinator)设置协调者作为netty srver的handler,即由Coordinator负责处理RM和TM的请求;最后调用rpcServer.init()启动netty server
RpcServer
public class RpcServer extends AbstractRpcRemotingServer {
public RpcServer(ThreadPoolExecutor messageExecutor) {
super(messageExecutor, new NettyServerConfig());
}
public void init() {
DefaultServerMessageListenerImpl defaultServerMessageListenerImpl =
new DefaultServerMessageListenerImpl(getTransactionMessageHandler());
defaultServerMessageListenerImpl.init();
defaultServerMessageListenerImpl.setServerMessageSender(this);
super.setServerMessageListener(defaultServerMessageListenerImpl);
//设置处理客户端请求的handler
super.setChannelHandlers(new ServerHandler());
//父类init
super.init();
}
}
AbstractRpcRemotingServer 关注其构造方法和初始化方法
public abstract class AbstractRpcRemotingServer extends AbstractRpcRemoting implements ServerMessageSender {
public AbstractRpcRemotingServer(final ThreadPoolExecutor messageExecutor, NettyServerConfig nettyServerConfig) {
super(messageExecutor);
//创建seata封装的netty server启动引导类
serverBootstrap = new RpcServerBootstrap(nettyServerConfig);
}
public void init() {
//启动定时任务
super.init();
//启动netty server
serverBootstrap.start();
}
}
RpcServerBootstrap
public class RpcServerBootstrap implements RemotingServer {
private static final Logger LOGGER = LoggerFactory.getLogger(RpcServerBootstrap.class);
//netty server启动引导类
private final ServerBootstrap serverBootstrap = new ServerBootstrap();
//workergroup
private final EventLoopGroup eventLoopGroupWorker;
//boss group
private final EventLoopGroup eventLoopGroupBoss;
private final NettyServerConfig nettyServerConfig;
//客户端请求处理器
private ChannelHandler[] channelHandlers;
private int listenPort;
private final AtomicBoolean initialized = new AtomicBoolean(false);
public RpcServerBootstrap(NettyServerConfig nettyServerConfig) {
this.nettyServerConfig = nettyServerConfig;
//os是否支持epoll
if (NettyServerConfig.enableEpoll()) {
//支持epoll的事件循环
this.eventLoopGroupBoss = new EpollEventLoopGroup(nettyServerConfig.getBossThreadSize(),
new NamedThreadFactory(nettyServerConfig.getBossThreadPrefix(), nettyServerConfig.getBossThreadSize()));
this.eventLoopGroupWorker = new EpollEventLoopGroup(nettyServerConfig.getServerWorkerThreads(),
new NamedThreadFactory(nettyServerConfig.getWorkerThreadPrefix(),
nettyServerConfig.getServerWorkerThreads()));
} else {
//支持nio的事件循环
this.eventLoopGroupBoss = new NioEventLoopGroup(nettyServerConfig.getBossThreadSize(),
new NamedThreadFactory(nettyServerConfig.getBossThreadPrefix(), nettyServerConfig.getBossThreadSize()));
this.eventLoopGroupWorker = new NioEventLoopGroup(nettyServerConfig.getServerWorkerThreads(),
new NamedThreadFactory(nettyServerConfig.getWorkerThreadPrefix(),
nettyServerConfig.getServerWorkerThreads()));
}
setListenPort(nettyServerConfig.getDefaultListenPort());
}
public void start() {
//设置bossGrouo和workerGroup
this.serverBootstrap.group(this.eventLoopGroupBoss, this.eventLoopGroupWorker)
//设置服务端channel 决定io模型
//nio -> NioServerSocketChannel
.channel(nettyServerConfig.SERVER_CHANNEL_CLAZZ)
//设置tcp参数
.option(ChannelOption.SO_BACKLOG, nettyServerConfig.getSoBackLogSize())
.option(ChannelOption.SO_REUSEADDR, true)
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.TCP_NODELAY, true)
.childOption(ChannelOption.SO_SNDBUF, nettyServerConfig.getServerSocketSendBufSize())
.childOption(ChannelOption.SO_RCVBUF, nettyServerConfig.getServerSocketResvBufSize())
.childOption(ChannelOption.WRITE_BUFFER_WATER_MARK,
new WriteBufferWaterMark(nettyServerConfig.getWriteBufferLowWaterMark(),
nettyServerConfig.getWriteBufferHighWaterMark()))
//绑定的端口
.localAddress(new InetSocketAddress(listenPort))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) {
ch.pipeline().addLast(new IdleStateHandler(nettyServerConfig.getChannelMaxReadIdleSeconds(), 0, 0))
.addLast(new ProtocolV1Decoder())
.addLast(new ProtocolV1Encoder());
if (null != channelHandlers) {
//Coordinator和ServerHandler
addChannelPipelineLast(ch, channelHandlers);
}
}
});
try {
//启动netty server
ChannelFuture future = this.serverBootstrap.bind(listenPort).sync();
LOGGER.info("Server started ... ");
RegistryFactory.getInstance().register(new InetSocketAddress(XID.getIpAddress(), XID.getPort()));
initialized.set(true);
future.channel().closeFuture().sync();
} catch (Exception exx) {
throw new RuntimeException(exx);
}
}
}
到此,netty server就启动完成了。netty client和netty server是基于socket的,支持全双工通信,因此,client和server之间是可以双向通信的。
下一节,我们将会关注,针对RM和TM的请求,seata server都是如何处理的。