我是靠谱客的博主 长情歌曲,最近开发中收集的这篇文章主要介绍springboot + zookeeper+seata+docker使用(分布式事务),觉得挺不错的,现在分享给大家,希望可以做个参考。

概述

版本

seata.1.2.0

项目引用

pom文件

<zkclient.version>0.11</zkclient.version>
<seata.version>1.2.0</seata.version>
<mysql.version>8.0.16</mysql.version>
<dependency>
<groupId>com.101tec</groupId>
<artifactId>zkclient</artifactId>
<version>${zkclient.version}</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>${mysql.version}</version>
</dependency>
<dependency>
<groupId>io.seata</groupId>
<artifactId>seata-all</artifactId>
<version>${seata.version}</version>
</dependency>

在resource目录下创建文件:registry.conf

registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "zk"
nacos {
application = "seata-server"
serverAddr = "192.168.226.128"
namespace = ""
username = ""
password = ""
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
password = ""
timeout = "0"
}
zk {
serverAddr = "192.168.226.128:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
etcd3 {
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3、springCloudConfig
type = "zk"
nacos {
serverAddr = "192.168.226.128"
namespace = ""
group = "SEATA_GROUP"
username = ""
password = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
appId = "seata-server"
apolloMeta = "http://192.168.1.204:8801"
namespace = "application"
}
zk {
serverAddr = "192.168.226.128:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
  • 只需要关心zk开头的配置,其他的是如果想用其他的配置文件的模板
  • 如果类型为file,则在resource下建一个file.conf
transport {
# tcp udt unix-domain-socket
type = "TCP"
#NIO NATIVE
server = "NIO"
#enable heartbeat
heartbeat = true
# the client batch send request enable
enableClientBatchSendRequest = true
#thread factory for netty
threadFactory {
bossThreadPrefix = "NettyBoss"
workerThreadPrefix = "NettyServerNIOWorker"
serverExecutorThread-prefix = "NettyServerBizHandler"
shareBossWorker = false
clientSelectorThreadPrefix = "NettyClientSelector"
clientSelectorThreadSize = 1
clientWorkerThreadPrefix = "NettyClientWorkerThread"
# netty boss thread size,will not be used for UDT
bossThreadSize = 1
#auto default pin or 8
workerThreadSize = "default"
}
shutdown {
# when destroy server, wait seconds
wait = 3
}
serialization = "seata"
compressor = "none"
}
service {
#transaction service group mapping
vgroupMapping.seata_tx_group = "default"
#only support when registry.type=file, please don't set multiple addresses
default.grouplist = "192.168.226.128:8091"
#degrade, current not support
enableDegrade = false
#disable seata
disableGlobalTransaction = false
}
client {
rm {
asyncCommitBufferLimit = 10000
lock {
retryInterval = 10
retryTimes = 30
retryPolicyBranchRollbackOnConflict = true
}
reportRetryCount = 5
tableMetaCheckEnable = false
reportSuccessEnable = false
}
tm {
commitRetryCount = 5
rollbackRetryCount = 5
}
undo {
dataValidation = true
logSerialization = "jackson"
logTable = "undo_log"
}
log {
exceptionRate = 100
}
}

修改datasource配置

package com.stc.cloud.provider.config;
import com.alibaba.druid.pool.DruidDataSource;
import io.seata.rm.datasource.DataSourceProxy;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
/**
* <p>
* 配置 数据库
* </p>
*
* @author Tianchi Sun
* @since 2020/5/28
*/
@Configuration
public class DatasourceConfig {
@Bean(destroyMethod = "close", initMethod = "init")
@ConfigurationProperties(prefix = "spring.datasource")
public DruidDataSource druidDataSource() {
DruidDataSource druidDataSource = new DruidDataSource();
return druidDataSource;
}
/**
* Data source data source.
*
* @param druidDataSource the druid data source
* @return the data source
*/
@ConfigurationProperties(prefix = "spring.datasource")
@Primary
@Bean("dataSource")
public DataSourceProxy dataSource(DruidDataSource druidDataSource) {
DataSourceProxy dataSourceProxy = new DataSourceProxy(druidDataSource);
return dataSourceProxy;
}
}

配置txServiceGroup

package com.stc.cloud.provider.config;
import io.seata.spring.annotation.GlobalTransactionScanner;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
* <p>
* SeataConfiguration
* </p>
*
* @author Tianchi Sun
* @since 2020/5/28
*/
@Configuration
public class SeataConfiguration {
@Value("${spring.application.name}")
private String applicationName;
/**
* 注册一个StatViewServlet
*
* @return global transaction scanner
*/
@Bean
public GlobalTransactionScanner globalTransactionScanner() {
return new GlobalTransactionScanner(applicationName,
"seata_tx_group");
}
}

在resource新建文件conf.txt

transport.type=TCP
transport.server=NIO
transport.heartbeat=true
transport.enableClientBatchSendRequest=false
transport.threadFactory.bossThreadPrefix=NettyBoss
transport.threadFactory.workerThreadPrefix=NettyServerNIOWorker
transport.threadFactory.serverExecutorThreadPrefix=NettyServerBizHandler
transport.threadFactory.shareBossWorker=false
transport.threadFactory.clientSelectorThreadPrefix=NettyClientSelector
transport.threadFactory.clientSelectorThreadSize=1
transport.threadFactory.clientWorkerThreadPrefix=NettyClientWorkerThread
transport.threadFactory.bossThreadSize=1
transport.threadFactory.workerThreadSize=default
transport.shutdown.wait=3
service.vgroupMapping.seata_tx_group=default
service.default.grouplist=127.0.0.1:8091
service.enableDegrade=false
service.disableGlobalTransaction=false
client.rm.asyncCommitBufferLimit=10000
client.rm.lock.retryInterval=10
client.rm.lock.retryTimes=30
client.rm.lock.retryPolicyBranchRollbackOnConflict=true
client.rm.reportRetryCount=5
client.rm.tableMetaCheckEnable=false
client.rm.sqlParserType=druid
client.rm.reportSuccessEnable=false
client.rm.sagaBranchRegisterEnable=false
client.tm.commitRetryCount=5
client.tm.rollbackRetryCount=5
store.mode=db
store.file.dir=file_store/data
store.file.maxBranchSessionSize=16384
store.file.maxGlobalSessionSize=512
store.file.fileWriteBufferCacheSize=16384
store.file.flushDiskMode=async
store.file.sessionReloadReadSize=100
store.db.datasource=druid
store.db.dbType=mysql
store.db.driverClassName=com.mysql.jdbc.Driver
store.db.url=jdbc:mysql://172.17.0.1:3306/seata
store.db.user=root
store.db.password=root
store.db.minConn=5
store.db.maxConn=30
store.db.globalTable=global_table
store.db.branchTable=branch_table
store.db.queryLimit=100
store.db.lockTable=lock_table
store.db.maxWait=5000
server.recovery.committingRetryPeriod=1000
server.recovery.asynCommittingRetryPeriod=1000
server.recovery.rollbackingRetryPeriod=1000
server.recovery.timeoutRetryPeriod=1000
server.maxCommitRetryTimeout=-1
server.maxRollbackRetryTimeout=-1
server.rollbackRetryTimeoutUnlockEnable=false
client.undo.dataValidation=true
client.undo.logSerialization=jackson
server.undo.logSaveDays=7
server.undo.logDeletePeriod=86400000
client.undo.logTable=undo_log
client.log.exceptionRate=100
transport.serialization=seata
transport.compressor=none
metrics.enabled=false
metrics.registryType=compact
metrics.exporterList=prometheus
metrics.exporterPrometheusPort=9898
  • 注意:这里的service.vgroupMapping.seata_tx_group=default,中的seata_tx_group要和SeataConfiguration代码中一致

上传config.txt的配置至zk的代码(需要执行)

package com.stc.seata.provider.config;
import com.baomidou.mybatisplus.core.toolkit.StringUtils;
import io.seata.config.zk.ZookeeperConfiguration;
import java.io.*;
/**
* <p>
* 新增seata 中zk的配置类
* </p>
*
* @author Tianchi Sun
* @since 2020/6/1
*/
public class AddConfig {
public static void addConfig() throws IOException {
ZookeeperConfiguration zookeeperConfiguration = new ZookeeperConfiguration();
final long timeout = 2000;
final String splitConfig = "=";
InputStream in = AddConfig.class.getResourceAsStream("/config.txt");
Reader f = new InputStreamReader(in);
BufferedReader fb = new BufferedReader(f);
String s = "";
while ((s = fb.readLine()) != null) {
if(StringUtils.isNotBlank(s) && s.contains(splitConfig)){
String dataId = s.split(splitConfig)[0];
String content = s.split(splitConfig)[1];
zookeeperConfiguration.removeConfig(dataId,timeout);
zookeeperConfiguration.putConfig(dataId,content,timeout);
}
}
in.close();
fb.close();
}
public static void main(String[] args) throws IOException {
addConfig();
}
}

配置seata的server端

配置文件:registry.conf

registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "zk"
nacos {
application = "seata-server"
serverAddr = "172.17.0.1"
namespace = ""
cluster = "default"
username = ""
password = ""
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
application = "default"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = 0
password = ""
cluster = "default"
timeout = 0
}
zk {
cluster = "default"
serverAddr = "172.17.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
consul {
cluster = "default"
serverAddr = "127.0.0.1:8500"
}
etcd3 {
cluster = "default"
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
application = "default"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
cluster = "default"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file:/root/seata-config/file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3
type = "zk"
nacos {
serverAddr = "172.17.0.1"
namespace = ""
group = "SEATA_GROUP"
username = ""
password = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
appId = "seata-server"
apolloMeta = "http://192.168.1.204:8801"
namespace = "application"
}
zk {
serverAddr = "172.17.0.1:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file:/root/seata-config/file.conf"
}
}
  • 如果类型为file则需要添加file的配置文件:file.conf
transport {
# tcp udt unix-domain-socket
type = "TCP"
#NIO NATIVE
server = "NIO"
#enable heartbeat
heartbeat = true
# the client batch send request enable
enableClientBatchSendRequest = true
#thread factory for netty
threadFactory {
bossThreadPrefix = "NettyBoss"
workerThreadPrefix = "NettyServerNIOWorker"
serverExecutorThread-prefix = "NettyServerBizHandler"
shareBossWorker = false
clientSelectorThreadPrefix = "NettyClientSelector"
clientSelectorThreadSize = 1
clientWorkerThreadPrefix = "NettyClientWorkerThread"
# netty boss thread size,will not be used for UDT
bossThreadSize = 1
#auto default pin or 8
workerThreadSize = "default"
}
shutdown {
# when destroy server, wait seconds
wait = 3
}
serialization = "seata"
compressor = "none"
}
service {
#transaction service group mapping
vgroupMapping.seata_tx_group = "default"
#only support when registry.type=file, please don't set multiple addresses
default.grouplist = "192.168.226.128:8091"
#degrade, current not support
enableDegrade = false
#disable seata
disableGlobalTransaction = false
}
client {
rm {
asyncCommitBufferLimit = 10000
lock {
retryInterval = 10
retryTimes = 30
retryPolicyBranchRollbackOnConflict = true
}
reportRetryCount = 5
tableMetaCheckEnable = false
reportSuccessEnable = false
}
tm {
commitRetryCount = 5
rollbackRetryCount = 5
}
undo {
dataValidation = true
logSerialization = "jackson"
logTable = "undo_log"
}
log {
exceptionRate = 100
}
}

docker启动:

docker run --restart=always --network=host --name seata-server3 -p 8093:8093 -e SEATA_CONFIG_NAME=file:/root/seata-config/registry -e SEATA_PORT=8093 -v /home/tianchi/docker/seata/config:/root/seata-config seataio/seata-server:1.2.0
  • –restart=always:开机启动
  • –network=host:容器网络为host模式,即强制使用宿主的ip和端口
  • SEATA_CONFIG_NAME:seata配置文件的目录,是指容器内部
  • SEATA_PORT:seata启动的端口,是指容器内部
  • -v /home/tianchi/docker/seata/config:/root/seata-config:volumn映射,其中冒号前是指linux的目录,是linux放要和registry.conf和file.conf的位置;冒号后是容器内部的位置,要和registry.conf配置file的代码:
file {
name = "file:/root/seata-config/file.conf"
}

目录一致

  • 如果需要启动另外一个,需要把–name、-p、SEATA_PORT改一下,如下:
docker run --restart=always --network=host --name seata-server4 -p 8094:8094 -e SEATA_CONFIG_NAME=file:/root/seata-config/registry -e SEATA_PORT=8094 -v /home/tianchi/docker/seata/config:/root/seata-config seataio/seata-server:1.2.0

docker-compose

version: "2"
services:
seata-server1:
image: seataio/seata-server:1.2.0
container_name: seata-server1
hostname: seata-server
volumes:
- ../config:/root/seata-config
ports:
- "8091:8091"
environment:
- SEATA_IP=172.17.0.1
- SEATA_CONFIG_NAME=file:/root/seata-config/registry
- SEATA_PORT=8091
restart: always
seata-server2:
image: seataio/seata-server:1.2.0
container_name: seata-server2
hostname: seata-server
volumes:
- ../config:/root/seata-config
ports:
- "8092:8092"
environment:
- SEATA_IP=172.17.0.1
- SEATA_CONFIG_NAME=file:/root/seata-config/registry
- SEATA_PORT=8092
restart: always
seata-server3:
image: seataio/seata-server:1.2.0
container_name: seata-server3
hostname: seata-server
volumes:
- ../config:/root/seata-config
ports:
- "8093:8093"
environment:
- SEATA_IP=172.17.0.1
- SEATA_CONFIG_NAME=file:/root/seata-config/registry
- SEATA_PORT=8093
restart: always

普通启动

  • 下载最新的release版本的seata-server
  • 修改regitst配置文件:
registry {
# file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
type = "zk"
nacos {
application = "seata-server"
serverAddr = "192.168.226.128"
namespace = ""
username = ""
password = ""
}
eureka {
serviceUrl = "http://localhost:8761/eureka"
weight = "1"
}
redis {
serverAddr = "localhost:6379"
db = "0"
password = ""
timeout = "0"
}
zk {
serverAddr = "192.168.226.128:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
etcd3 {
serverAddr = "http://localhost:2379"
}
sofa {
serverAddr = "127.0.0.1:9603"
region = "DEFAULT_ZONE"
datacenter = "DefaultDataCenter"
group = "SEATA_GROUP"
addressWaitTime = "3000"
}
file {
name = "file.conf"
}
}
config {
# file、nacos 、apollo、zk、consul、etcd3、springCloudConfig
type = "zk"
nacos {
serverAddr = "192.168.226.128"
namespace = ""
group = "SEATA_GROUP"
username = ""
password = ""
}
consul {
serverAddr = "127.0.0.1:8500"
}
apollo {
appId = "seata-server"
apolloMeta = "http://192.168.1.204:8801"
namespace = "application"
}
zk {
serverAddr = "192.168.226.128:2181"
sessionTimeout = 6000
connectTimeout = 2000
username = ""
password = ""
}
etcd3 {
serverAddr = "http://localhost:2379"
}
file {
name = "file.conf"
}
}
  • 进入/seata/bin目录下
  • windows启动:
seata-server.bat
  • linux启动
sh seata-server.sh

数据库脚本

日志数据库建表语句

  • 如果如果conf.txt中的配置:store.mode=db,则需要按照store.db.url=jdbc:mysql://172.17.0.1:3306/seata这个配置在对应的数据库中建如下表:
/*
Navicat Premium Data Transfer
Source Server
: 本地5.7
Source Server Type
: MySQL
Source Server Version : 50730
Source Host
: 192.168.226.128:3306
Source Schema
: seata
Target Server Type
: MySQL
Target Server Version : 50730
File Encoding
: 65001
Date: 24/06/2020 10:42:12
*/
SET NAMES utf8mb4;
SET FOREIGN_KEY_CHECKS = 0;
-- ----------------------------
-- Table structure for branch_table
-- ----------------------------
DROP TABLE IF EXISTS `branch_table`;
CREATE TABLE `branch_table`
(
`branch_id` bigint(20) NOT NULL,
`xid` varchar(128) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,
`transaction_id` bigint(20) NULL DEFAULT NULL,
`resource_group_id` varchar(32) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`resource_id` varchar(256) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`branch_type` varchar(8) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`status` tinyint(4) NULL DEFAULT NULL,
`client_id` varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`application_data` varchar(2000) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`gmt_create` datetime(6) NULL DEFAULT NULL,
`gmt_modified` datetime(6) NULL DEFAULT NULL,
PRIMARY KEY (`branch_id`) USING BTREE,
INDEX `idx_xid`(`xid`) USING BTREE
) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Table structure for global_table
-- ----------------------------
DROP TABLE IF EXISTS `global_table`;
CREATE TABLE `global_table`
(
`xid` varchar(128) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,
`transaction_id` bigint(20) NULL DEFAULT NULL,
`status` tinyint(4) NOT NULL,
`application_id` varchar(32) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`transaction_service_group` varchar(32) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`transaction_name` varchar(128) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`timeout` int(11) NULL DEFAULT NULL,
`begin_time` bigint(20) NULL DEFAULT NULL,
`application_data` varchar(2000) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`gmt_create` datetime(0) NULL DEFAULT NULL,
`gmt_modified` datetime(0) NULL DEFAULT NULL,
PRIMARY KEY (`xid`) USING BTREE,
INDEX `idx_gmt_modified_status`(`gmt_modified`, `status`) USING BTREE,
INDEX `idx_transaction_id`(`transaction_id`) USING BTREE
) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;
-- ----------------------------
-- Table structure for lock_table
-- ----------------------------
DROP TABLE IF EXISTS `lock_table`;
CREATE TABLE `lock_table`
(
`row_key` varchar(128) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,
`xid` varchar(96) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`transaction_id` bigint(20) NULL DEFAULT NULL,
`branch_id` bigint(20) NOT NULL,
`resource_id` varchar(256) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`table_name` varchar(32) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`pk` varchar(36) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
`gmt_create` datetime(0) NULL DEFAULT NULL,
`gmt_modified` datetime(0) NULL DEFAULT NULL,
PRIMARY KEY (`row_key`) USING BTREE,
INDEX `idx_branch_id`(`branch_id`) USING BTREE
) ENGINE = InnoDB CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;
SET FOREIGN_KEY_CHECKS = 1;

需要在每个你使用的数据库建如下表:

/*
Navicat Premium Data Transfer
Source Server
: 本地5.7
Source Server Type
: MySQL
Source Server Version : 50730
Source Host
: 192.168.226.128:3306
Source Schema
: zl_platform
Target Server Type
: MySQL
Target Server Version : 50730
File Encoding
: 65001
Date: 24/06/2020 10:43:46
*/
SET NAMES utf8mb4;
SET FOREIGN_KEY_CHECKS = 0;
-- ----------------------------
-- Table structure for undo_log
-- ----------------------------
DROP TABLE IF EXISTS `undo_log`;
CREATE TABLE `undo_log`
(
`id` bigint(20) NOT NULL AUTO_INCREMENT,
`branch_id` bigint(20) NOT NULL,
`xid` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,
`context` varchar(128) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL,
`rollback_info` longblob NOT NULL,
`log_status` int(11) NOT NULL,
`log_created` datetime(0) NOT NULL,
`log_modified` datetime(0) NOT NULL,
`ext` varchar(100) CHARACTER SET utf8 COLLATE utf8_general_ci NULL DEFAULT NULL,
PRIMARY KEY (`id`) USING BTREE,
UNIQUE INDEX `ux_undo_log`(`xid`, `branch_id`) USING BTREE
) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8 COLLATE = utf8_general_ci ROW_FORMAT = Dynamic;
SET FOREIGN_KEY_CHECKS = 1;

最后

以上就是长情歌曲为你收集整理的springboot + zookeeper+seata+docker使用(分布式事务)的全部内容,希望文章能够帮你解决springboot + zookeeper+seata+docker使用(分布式事务)所遇到的程序开发问题。

如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(45)

评论列表共有 0 条评论

立即
投稿
返回
顶部