前言
- ${}为自行配置项
- 以下配置适用于快速构建测试环境。
- 检验是否真的可以持久化数据。
- 创建docker network。
docker network create spring_cloud
MySQL 8.2
docker compose
version: '3'
services:
mysql:
image: mysql
container_name: mysql
privileged: true
command:
# MySQL8的密码验证方式默认是 caching_sha2_password,但是很多的连接工具还不支持该方式
# 就需要手动设置下mysql的密码认证方式为以前的 mysql_native_password 方式
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
# docker的重启策略:在容器退出时总是重启容器,但是不考虑在Docker守护进程启动时就已经停止了的容器
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: ${root密码} # root用户的密码
MYSQL_USER: ${用户名} # 创建新用户
MYSQL_PASSWORD: ${用户密码} # 新用户的密码
TZ: Asia/Shanghai # 时区
LANG: C.UTF-8 # 系统字符集
ports:
- 33306:3306
volumes:
- ./data:/var/lib/mysql
- ./conf:/etc/mysql/conf.d
- ./logs:/logs
- /usr/share/zoneinfo/Asia/Shanghai:/etc/localtime
networks:
- spring_cloud
networks:
spring_cloud:
external: true
my.cnf 配置
目录路径:${当前docker-compose.yml所在目录}/conf/
###### [mysql]配置模块 ######
[mysql]
# 设置MySQL客户端默认字符集
default-character-set=utf8mb4
###### [mysqld]配置模块 ######
[mysqld]
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
secure-file-priv= NULL
# MySQL8 的密码认证插件
default_authentication_plugin=mysql_native_password
# 服务端使用的字符集默认为8比特编码的latin1字符集
character-set-server=utf8mb4
collation-server=utf8mb4_general_ci
# 允许最大连接数
max_connections=65535
# 创建新表时将使用的默认存储引擎
default-storage-engine=INNODB
###### [client]配置模块 ######
[client]
default-character-set=utf8mb4
# Custom config should go here
!includedir /etc/mysql/conf.d/
Nacos 2.2.0
docker compose
version: '3'
services:
pig-register:
image: nacos/nacos-server:v2.2.0
container_name: nacos_server # 容器名为'nacos_server'
restart: unless-stopped # 指定容器退出后的重启策略为始终重启,但是不考虑在Docker守护进程启动时就已经停止了的容器
volumes: # 数据卷挂载路径设置,将本机目录映射到容器目录
- "./logs:/home/nacos/logs"
- "./data/:/home/nacos/data"
environment: # 设置环境变量,相当于docker run命令中的-e
- PREFER_HOST_MODE=ip # 如果支持主机名可以使用hostname,否则使用ip,默认也是ip
- MODE=standalone # 单机模式启动
- SPRING_DATASOURCE_PLATFORM=mysql # 数据源平台 仅支持mysql或不保存empty
# TODO 修改mysql连接信息
- MYSQL_SERVICE_HOST=mysql # 注:这里不能为`127.0.0.1`或`localhost`方式!!!
- MYSQL_SERVICE_DB_NAME=${nacos 在mysql 中的数据库名称}
- MYSQL_SERVICE_PORT=${mysql 端口号}
- MYSQL_SERVICE_USER=${mysql 用户名}
- MYSQL_SERVICE_PASSWORD=${mysql 密码}
# 修改JVM调优参数
- JVM_XMS=128m #-Xms default :2g
- JVM_XMX=128m #-Xmx default :2g
- JVM_XMN=64m #-Xmn default :1g
- JVM_MS=32m #-XX:MetaspaceSize default :128m
- JVM_MMS=32m #-XX:MaxMetaspaceSize default :320m
- MYSQL_SERVICE_DB_PARAM=characterEncoding=utf8&connectTimeout=10000&socketTimeout=30000&autoReconnect=true&useSSL=false&serverTimezone=UTC
ports:
- "8848:8848"
- "9848:9848"
- "9849:9849"
healthcheck:
test: [ "CMD-SHELL", "echo 'ruok' | curl -s telnet://localhost:8848 || exit 1" ]
retries: 10
networks:
- spring_cloud
networks:
spring_cloud:
external: true
Redis
docker compose
version: '3.0'
services:
myredis:
container_name: redis
image: redis:6.0.6
restart: always
ports:
- 36379:6379
privileged: true
command: redis-server /etc/redis/redis.conf --appendonly yes
volumes:
- ./data:/data
- ./conf/redis.conf:/etc/redis/redis.conf
networks:
- spring_cloud
networks:
spring_cloud:
external: true
redis.conf
#开启保护
protected-mode yes
#开启远程连接
#bind 127.0.0.1
bind 0.0.0.0
#自定义密码
requirepass ${自定义链接密码}
port 6379
timeout 0
# 900s内至少一次写操作则执行bgsave进行RDB持久化
save 900 1
save 300 10
save 60 10000
rdbcompression yes
dbfilename dump.rdb
dir /data
appendonly yes
appendfsync everysec
RabbitMQ
docker compose
version: '3'
services:
rabbitmq:
image: rabbitmq:3.9.10-management-alpine
container_name: rabbitmq
ports:
- 5672:5672 # AMQP 端口
- 15672:15672 # 管理界面端口
volumes:
- ./rabbitmq_data:/var/lib/rabbitmq # 数据持久化目录
environment:
- RABBITMQ_DEFAULT_USER=${默认用户名} # RabbitMQ 默认用户名
- RABBITMQ_DEFAULT_PASS=${默认密码} # RabbitMQ 默认密码
Kafka
docker compose
version: '3.0'
services:
zookeeper:
image: wurstmeister/zookeeper
container_name: zookeeper
ports:
- "2181:2181"
networks:
- spring_cloud
kafka:
image: wurstmeister/kafka
container_name: kafka
volumes:
# 将 kafka 的数据文件映射出来
- ./kafka:/kafka
- /var/run/docker.sock:/var/run/docker.sock
- /etc/localtime:/etc/localtime
ports:
- "9092:9092"
environment:
KAFKA_ADVERTISED_HOST_NAME: zookeeper
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_ADVERTISED_PORT: 9092
KAFKA_LOG_RETENTION_HOURS: 120
KAFKA_MESSAGE_MAX_BYTES: 10000000
KAFKA_REPLICA_FETCH_MAX_BYTES: 10000000
KAFKA_GROUP_MAX_SESSION_TIMEOUT_MS: 60000
KAFKA_NUM_PARTITIONS: 3
KAFKA_DELETE_RETENTION_MS: 1000
networks:
- spring_cloud
kafka-manager:
image: sheepkiller/kafka-manager
container_name: kafka-manager
environment:
ZK_HOSTS: zookeeper
ports:
- "9009:9000"
networks:
- spring_cloud
networks:
spring_cloud:
external: true
Elasticsearch
获取容器中的配置文件
- 启动该容器
docker run -d --name elasticsearch_container \
-p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
elasticsearch:8.11.3
- 将容器中的配置复制至宿主机
docker cp elasticsearch_container:/usr/share/elasticsearch/config ./config
- 将容器关闭并移除
docker stop elasticsearch_container && docker rm elasticsearch_container
- 修改elasticsearch.yml
以下配置将关闭xpack security 校验,正式环境确保配置正确的elasticsearch和域名证书。
cluster.name: "docker-cluster"
network.host: 0.0.0.0
#----------------------- BEGIN SECURITY AUTO CONFIGURATION -----------------------
#
# The following settings, TLS certificates, and keys have been automatically
# generated to configure Elasticsearch security features on 25-01-2024 01:55:49
#
# --------------------------------------------------------------------------------
# Enable security features
xpack.security.enabled: false
xpack.security.enrollment.enabled: true
# Enable encryption for HTTP API client connections, such as Kibana, Logstash, and Agents
xpack.security.http.ssl:
enabled: true
keystore.path: certs/http.p12
# Enable encryption and mutual authentication between cluster nodes
xpack.security.transport.ssl:
enabled: true
verification_mode: certificate
keystore.path: certs/transport.p12
truststore.path: certs/transport.p12
#----------------------- END SECURITY AUTO CONFIGURATION -------------------------
docker compose
version: '3.8'
services:
elasticsearch:
image: elasticsearch:8.11.3
container_name: elasticsearch
restart: always
privileged: true
ports:
- 9200:9200
- 9300:9300
environment:
- discovery.type=single-node
- TZ=Asia/Shanghai
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms1024m -Xmx1024m"
volumes:
- "./data:/usr/share/elasticsearch/data"
- "./config:/usr/share/elasticsearch/config"
- "./plugins:/usr/share/elasticsearch/plugins"
ulimits:
memlock:
soft: -1
hard: -1
networks:
- spring_cloud
networks:
spring_cloud:
external: true
logstash
docker compose
version: '3'
services:
logstash:
container_name: logstash
image: docker.elastic.co/logstash/logstash:7.17.17
ports:
- 5044:5044
volumes:
- ./config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
networks:
- spring_cloud
networks:
spring_cloud:
external: true
logstash.conf
配置文件目录:${当前docker-compose文件目录}/config/
input {
tcp {
port => 5044
codec => json_lines
}
}
output {
elasticsearch {
flush_size => 1000
batch_size => 200
hosts => "elasticsearch:9200"
index => "gov-logs-%{+YYYY.MM.dd}"
}
}
# 配置工作线程数为 4
pipeline.workers: 4
kibana
docker compose
version: '3'
services:
kibana:
image: kibana:8.11.3
container_name: kibana
privileged: true
ports:
- 5601:5601
environment:
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
volumes:
- ./config:/usr/share/kibana/config
networks:
- spring_cloud
networks:
spring_cloud:
external: true
配置文件
配置文件在当前docker-compose.yml文件目录中的config文件夹中
- 在config目录下新建kibana.yml文件
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://elasticsearch:9200" ] # elasticsearch 连接地址,集群请填写多个
monitoring.ui.container.elasticsearch.enabled: true
- 在config目录下新建node.options文件
## Node command line options
## See `node --help` and `node --v8-options` for available options
## Please note you should specify one option per line
## max size of old space in megabytes
#--max-old-space-size=4096
## do not terminate process on unhandled promise rejection
--unhandled-rejections=warn
## restore < Node 16 default DNS lookup behavior
--dns-result-order=ipv4first
## enable OpenSSL 3 legacy provider
--openssl-legacy-provider
Sentinel-dashboard
version: '3'
services:
sentinel-dashboard:
container_name: sentinel-dashboard
image: royalwang/sentinel-dashboard:1.8.4
ports:
- 8858:8858
privileged: true
networks:
- spring_cloud
networks:
spring_cloud:
external: true
Skywalking 链路追踪
version: '3.8'
services:
oap:
image: docker.io/apache/skywalking-oap-server:9.7.0-java17
container_name: oap
restart: always
privileged: true
ports:
- 11800:11800
- 12800:12800
environment:
- SW_CORE_RECORD_DATA_TTL=15
- SW_CORE_METRICS_DATA_TTL=15
- SW_STORAGE=elasticsearch
- SW_STORAGE_ES_CLUSTER_NODES=elasticsearch:9200 # es连接地址
- SW_ENABLE_UPDATE_UI_TEMPLATE=true
- TZ=Asia/Shanghai
networks:
- spring_cloud
ui:
image: docker.io/apache/skywalking-ui:9.7.0-java17
container_name: ui
privileged: true
depends_on:
- oap
restart: always
ports:
- 8080:8080
environment:
- SW_OAP_ADDRESS=http://oap:12800
- SW_ZIPKIN_ADDRESS=http://oap:9412
networks:
- spring_cloud
networks:
spring_cloud:
external: true