1. 添加 Elastic 软件包源
添加 Elastic 官方软件源,以便你能够安装指定版本的 Filebeat(7.17.13)。
导入 Elastic 公钥
sudo wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
添加 Elastic 软件包源
创建一个源列表文件来包含 Elastic 的 APT 源。注意指定 7.x 包含 Filebeat 7.17.13 版本:
echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee -a /etc/apt/sources.l
2. 更新 APT 并安装 Filebeat 7.17.13
在线安装
更新 APT 软件包列表:
sudo apt-get update
接下来,安装 Filebeat 7.17.13 版本:
sudo apt-get install filebeat=7.17.13
手动安装
如果无法下载安装filebeat,请手动下载 .deb 安装包
手动下载 Filebeat .deb 包
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.17.13-amd64.deb
使用 dpkg 安装
sudo dpkg -i filebeat-7.17.13-amd64.deb
验证filebeat是否安装成功
filebeat version
3. 配置 Filebeat
安装完成后,Filebeat 的配置文件位于 /etc/filebeat/filebeat.yml
a. 配置 Filebeat 采集日志
你可以设置 Filebeat 采集本地日志。找到 filebeat.inputs 部分,并定义要采集的日志路径:
filebeat.inputs:
- type: container
stream: all
enabled: true
id: smart-home-log # 每个 filestream 输入必须有唯一的 ID
paths:
- /var/log/containers/smarthome*.log # 监控的日志文件路径
close_inactive: 5m # 5分钟内没有变化则关闭文件
scan_frequency: 10s # 每10秒扫描一次新文件
# Multiline configuration
multiline.pattern: '^\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3}\]'
multiline.negate: true
multiline.match: after
# Additional patterns to handle stack traces
# multiline.flush_pattern: '^[\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}]' # Ensures new timestamp starts a new event
output.logstash:
hosts: ["127.0.0.1:5044"] # 将 Logstash 服务器 IP 替换为实际地址
#output.console:
# enabled: true
# pretty: true # 如果你希望日志输出得更易读,可以启用pretty
~
~
~
4. 启动或重启 Filebeat
完成配置后,重新启动 Filebeat 服务以应用新配置:
sudo systemctl restart filebeat
sudo systemctl status filebeat
# 配置filebeat 开机自动重启
sudo systemctl enable filebeat
5. Logstash配置说明
input {
beats {
port => 5044 # Logstash 监听的端口
ssl => false
}
}
filter {
grok {
match => { "message" => "\[%{TIMESTAMP_ISO8601:timestamp}\] %{LOGLEVEL:log_level} \[%{DATA:component}\] %{INT:pid} --- \[%{DATA:thread}\] %{JAVACLASS:class} : %{GREEDYDATA:log_message}" }
}
# 分割 component 字段为三个部分
mutate {
split => { "thread" => "," }
split => { "[log][file][path]" => "_" }
add_field => {
"thread_name" => "%{[thread][0]}"
"trace_id" => "%{[thread][1]}"
"log_file_name" => "%{[log][file][path][2]}"
}
}
mutate {
split => { "log_file_name" => "-" }
add_field => {
"index_name"=> "%{[log_file_name][0]}-%{[log_file_name][1]}-%{[log_file_name][2]}"
}
remove_field => ["thread","ecs","log_file_name","tags" ,"log","file","offset","host","agent","input","@version"]
}
# 如果 trace_id 不存在,设为空
if ![trace_id] or [trace_id] == "%{[thread][1]}" {
mutate {
replace => { "trace_id" => "" }
}
}
# 判断 log_message 是否为空
if ![log_message] or [log_message] == "-" {
mutate {
# 将 message 字段的值赋给 log_message
add_field => { "log_message" => "%{message}" }
}
}
if ![timestamp] or [timestamp] != "-" {
date {
#logdate 从上面过滤后取到的字段名,yyyy-MM-dd HH:mm:ss.SSS 日期格式条件
match => ["timestamp", "yyyy-MM-dd HH:mm:ss.SSS"]
target => "@timestamp"
timezone => "+08:00"
}
}
}
output {
elasticsearch {
hosts => "127.0.0.1:9200"
index => "%{index_name}-%{+YYYY.MM.dd}"
}
}
6. elasticsearch配置说明
action.auto_create_index: .monitoring*,.watches,.triggered_watches,.watcher-history*,.ml*,logname1*,logname2*