安装配置
- 创建目录
mkdir -p /opt/lpg/{loki,promtail}
mkdir -p /opt/lpg/loki/{data,config}
- 创建配置文件
loki-config.yaml
vim /opt/lpg/loki/conf/loki-config.yaml内容如下
auth_enabled: false
server:
http_listen_port: 3100
common: #通用配置
path_prefix: /opt/lpg/loki
storage:
filesystem:
chunks_directory: /opt/lpg/loki/chunks
rules_directory: /opt/lpg/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
schema_config:
configs:
- from: 2020-10-24 # 架构配置的生效日期
store: boltdb-shipper # 存储类型
object_store: filesystem # 对象存储类型为文件系统
schema: v11 # 架构版本
index: # 索引配置
prefix: index_ # 索引前缀
period: 24h # 索引周期
storage_config: # 存储相关
boltdb_shipper:
active_index_directory: /opt/lpg/loki/boltdb-shipper-active # 活动索引的目录
cache_location: /opt/lpg/loki/boltdb-shipper-cache # 缓存位置
cache_ttl: 24h # 缓存的过期时间
shared_store: filesystem # 共享存储类型为文件系统
filesystem:
directory: /opt/lpg/loki/chunks # 文件系统的目录
compactor:
working_directory: /opt/lpg/loki/boltdb-shipper-compactor # 紧缩器的工作目录
shared_store: filesystem # 共享存储类型为文件系统
limits_config:
reject_old_samples: true # 是否拒绝旧样本
reject_old_samples_max_age: 72h # 72小时之前的样本被拒绝
max_entries_limit_per_query: 9999 # 数值改为自己想要的最大查询行数
ingestion_rate_mb: 32 # mb单位,每个用户每秒的采样率限制 不设置客户端会报错
ingestion_burst_size_mb: 64 # 每个用户允许的采样突发大小
chunk_store_config:
max_look_back_period: 72h # 为避免查询超过保留期的数据,必须小于或等于下方的时间值
table_manager:
retention_deletes_enabled: true # 保留删除开启
retention_period: 72h # 超过72h的块数据将被删除
ruler:
alertmanager_url: http://localhost:9093
- 创建配置文件
promtail-config.yaml
vim /opt/lpg/promtail/promtail-config.yaml内容如下
server:
http_listen_port: 9080 #云服务器需开放9080端口
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
#loki地址
clients:
- url: http://loki:3100/loki/api/v1/push
scrape_configs:
- job_name: servername-file
static_configs:
- targets:
- localhost
labels:
job: servername-file #变量定义类型
__path__: /data/server/logs/*.log #收集程序输出的日志
- 使用docker安装,创建
docker-compose.yml
vim /opt/lpg/docker-compose.yml
version: '3'
networks:
loki:
services:
#数据库
loki:
container_name: loki
image: grafana/loki:2.6.1
ports:
- "3100:3100"
- "9095:9095"
volumes:
- /opt/lpg/loki/data:/var/loki
- /opt/lpg/loki/conf/loki-config.yaml:/etc/loki/local-config.yaml
user: root #不开报错
command: ["-config.file=/etc/loki/local-config.yaml"]
networks:
- loki
#收集
promtail:
image: grafana/promtail:2.1.0
container_name: promtail
restart: always
ports:
- 9080:9080
volumes:
- /opt/lpg/promtail/promtail-config.yaml:/opt/promtail-config.yaml
- /data/server/logs/:/data/server/logs/ #文件日志路径挂到容器
- /etc/localtime:/etc/localtime:ro
privileged: true #不开报错
deploy:
resources:
limits:
memory: 2G
reservations:
memory: 200M
logging:
driver: json-file
options:
max-size: "200m"
max-file: "4"
command: ["-config.file=/opt/promtail-config.yaml"]
networks:
- loki
#grafana web界面,已有的话可不需要
grafana:
image: grafana/grafana:9.5.18
restart: "always"
ports:
- 3000:3000
container_name: "grafana"
networks:
- loki
-
最后一步启动
docker-compose -f docker-compose.yml up -d -
打开登录Grafana
http://127.0.0.1:3000默认账号密码: admin/admin -
添加Loki数据源
- 查看日志
springboot集成
- JDK8版本
- maven
<dependency> <groupId>com.github.loki4j</groupId> <artifactId>loki-logback-appender-jdk8</artifactId> <version>1.5.1</version> </dependency> <dependency> <groupId>org.apache.httpcomponents</groupId> <artifactId>httpclient</artifactId> <version>4.5.14</version> </dependency>- logback-spring.xml
<?xml version="1.0" encoding="UTF-8"?>
<configuration debug="false">
<springProperty scope="context" name="logPath" source="logback.path"/>
<springProperty scope="context" name="logLevel" source="logback.level"/>
<!--定义日志文件的存储地址 勿在 LogBack 的配置中使用相对路径-->
<property name="LOG_HOME" value="${logPath}" />
<contextName>logback</contextName>
<!-- 控制台输出 -->
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] [%X{mdc_trace_id}] %-5level %logger{50}[%line] - %msg %n</pattern>
</encoder>
</appender>
<!-- 按照每天生成日志文件 -->
<appender name="FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!--日志文件输出的文件名-->
<FileNamePattern>${LOG_HOME}/flow.%d{yyyy-MM-dd}.log</FileNamePattern>
<!--日志文件保留天数-->
<MaxHistory>15</MaxHistory>
</rollingPolicy>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符-->
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] [%X{mdc_trace_id}] %-5level %logger{50}[%line] - %msg %n</pattern>
</encoder>
<!--日志文件最大的大小-->
<!--<triggeringPolicy class="ch.qos.logback.core.rolling.SizeBasedTriggeringPolicy">
<MaxFileSize>10MB</MaxFileSize>
</triggeringPolicy>-->
</appender>
<!-- Loki输出器 -->
<appender name="LOKI" class="com.github.loki4j.logback.Loki4jAppender">
<batchMaxItems>100</batchMaxItems>
<batchTimeoutMs>10000</batchTimeoutMs>
<verbose>true</verbose>
<http class="com.github.loki4j.logback.ApacheHttpSender">
<url>http://127.0.0.1:3100/loki/api/v1/push</url>
</http>
<format>
<label>
<pattern>app=my-app,host=${HOSTNAME},level=%level</pattern>
</label>
<message>
<pattern>l=%level h=${HOSTNAME} c=%logger{20} t=%thread | %msg %ex %n</pattern>
</message>
<sortByTime>true</sortByTime>
</format>
</appender>
<!-- 日志输出级别 -->
<!--日志级别从低到高分为TRACE、DEBUG、INFO、WARN、ERROR、FATAL-->
<root level="${logLevel}">
<appender-ref ref="STDOUT" />
<appender-ref ref="FILE" />
<appender-ref ref="LOKI" />
</root>
</configuration>
- JDK11以上版本
- maven
<dependency> <groupId>com.github.loki4j</groupId> <artifactId>loki-logback-appender</artifactId> <version>1.5.1</version> </dependency>- logback-spring.xml
......
......省略
<appender name="LOKI" class="com.github.loki4j.logback.Loki4jAppender">
<http>
<url>http://127.0.0.1:3100/loki/api/v1/push</url>
</http>
<format>
<label>
<pattern>app=my-app,host=${HOSTNAME}</pattern>
</label>
<message>
<pattern>%-5level [%.5(${HOSTNAME})] %.10thread %logger{20} | %msg %ex</pattern>
</message>
</format>
</appender>
......省略
......
结束