怎么让springboot的日志被es采集到
log4j.ym
Configuration:
status: debug
Properties:
Property:
- name: log.path
value: /var/log/qiusuo
# value: ./log
- name: project.name
value: gatewayservice
Appenders:
RollingFile:
- name: ROLLING_FILE
ignoreExceptions: false
fileName: "${log.path}/qiusuo#${project.name}#${env:MY_NODE_NAME}#${env:MY_POD_NAMESPACE}#${env:MY_POD_NAME}.log"
filePattern: "${log.path}/qiusuo#${project.name}#${env:MY_NODE_NAME}#${env:MY_POD_NAMESPACE}#${env:MY_POD_NAME}#%d{yyyy-MM-dd}.log"
append: true
PatternLayout:
pattern: "%d{yyyy-MM-dd HH:mm:ss}:${project.name} %p %t [%F:%L] - %m%n"
Policies:
TimeBasedTriggeringPolicy:
modulate: true
interval: 1
DefaultRolloverStrategy:
max: 100
Loggers:
Root:
level: info
AppenderRef:
- ref: ROLLING_FILE
在springboot.yml中添加
logging:
config: classpath:log4j2.yml
Springboot Deployment配置
apiVersion: apps/v1
kind: Deployment
metadata:
name: qiusuo-gateway-deployment
labels:
app: qiusuo-gateway
spec:
replicas: 2
selector:
matchLabels:
app: qiusuo-gateway
template:
metadata:
labels:
app: qiusuo-gateway
spec:
containers:
- name: qiusuo-gateway
image: www.harbor.mobi/bcs_dev/es-web:latest
imagePullPolicy: Always
env:
- name: MY_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: log-path
mountPath: /var/log/qiusuo
ports:
- containerPort: 8040
volumes:
- name: log-path
hostPath:
path: /var/log/qiusuo
---
apiVersion: v1
kind: Service
metadata:
name: qiusuo-gateway-svc
spec:
ports:
- name: 8040-8040
port: 8040
protocol: TCP
targetPort: 8040
selector:
app: qiusuo-gateway
sessionAffinity: None
type: ClusterIP
filebeat.yml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: bcs-nms
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.inputs:
- type: container
paths:
- /var/log/containers/*.log
fields:
log-source: stdout
key1: value1
processors:
- add_kubernetes_metadata:
default_indexers.enabled: true
default_matchers.enabled: true
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/containers/"
- type: log
paths:
- /var/log/qiusuo/*.log
fields:
log-source: logfile
multiline.pattern: '^[0-9]{4}-[0-9]{2}-[0-9]{2}'
multiline.negate: true
multiline.match: after
processors:
- add_kubernetes_metadata:
default_indexers.enabled: true
default_matchers.enabled: true
host: ${NODE_NAME}
matchers:
- logs_path:
logs_path: "/var/log/qiusuo/"
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
#filebeat.autodiscover:
# providers:
# - type: kubernetes
# node: ${NODE_NAME}
# hints.enabled: true
# hints.default_config:
# type: container
# paths:
# - /var/log/containers/*${data.kubernetes.container.id}.log
processors:
- add_cloud_metadata:
- add_host_metadata:
cloud.id: ${ELASTIC_CLOUD_ID}
cloud.auth: ${ELASTIC_CLOUD_AUTH}
#output.elasticsearch:
# hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
# username: ${ELASTICSEARCH_USERNAME}
# password: ${ELASTICSEARCH_PASSWORD}
output.logstash:
hosts: ['logstash:5044']
# username: ${ELASTICSEARCH_USERNAME}
# password: ${ELASTICSEARCH_PASSWORD}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: bcs-nms
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.9.3
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: 10.96.123.133
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
value: WL1z7p9HS45Xz6C943N0jkqk
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0640
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
# When filebeat runs as non-root user, this directory needs to be writable by group (g+w).
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
namespace: bcs-nms
subjects:
- kind: ServiceAccount
name: filebeat
namespace: bcs-nms
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: bcs-nms
labels:
k8s-app: filebeat
---
logstash.yml es的https访问,需要配置cacert,具体见文档。
apiVersion: v1
kind: Pod
metadata:
name: logstash-pod
namespace: bcs-nms
labels:
app: logstash-pod
spec:
containers:
- image: logstash:6.5.4
name: logstash-pod
imagePullPolicy: IfNotPresent
env:
- name: ELASTICSEARCH_HOSTS
value: https://elastic-es-http.logging.es.local:9200
ports:
- containerPort: 5044
volumeMounts:
- name: logstash-pipeline
mountPath: /usr/share/logstash/pipeline/
- name: elastic-crt
mountPath: /usr/share/logstash/config/certificates/
command:
- logstash
hostAliases:
- hostnames:
- elastic-es-http.logging.es.local
ip: 10.96.123.133
volumes:
- name: logstash-pipeline
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf
- name: elastic-crt
secret:
name: elastic-es-http-certs-public
items:
- key: ca.crt
path: ca.crt
---
apiVersion: v1
kind: Service
metadata:
name: logstash
namespace: bcs-nms
spec:
ports:
- name: 5044-5044
port: 5044
protocol: TCP
targetPort: 5044
type: ClusterIP
selector:
app: logstash-pod
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
namespace: bcs-nms
data:
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
if [fields][log-source] == "logfile" {
grok {
match => { "message" => "%{TIMESTAMP_ISO8601:time}:%{WORD:service} %{LOGLEVEL:loglevel} %{DATA:thread_name} \[%{DATA:src_file}:%{NUMBER:line}\] - %{GREEDYDATA:msg}" }
}
mutate {
add_field => { "tmp_path" => "%{[log][file][path]}" }
}
mutate {
split => ["tmp_path", "#"]
add_field => { "kubernetes.node.name" => "%{tmp_path[2]}" }
add_field => { "kubernetes.namespace" => "%{tmp_path[3]}" }
add_field => { "tmp_podname" => "%{tmp_path[4]}" }
}
mutate {
split => ["tmp_podname", "."]
add_field => { "kubernetes.pod.name" => "%{tmp_podname[0]}" }
}
mutate {
remove_field => [ "tmp_path", "tmp_podname" ]
}
}
}
output {
elasticsearch {
hosts => ["https://elastic-es-http.logging.es.local:9200"]
index => "logstash-%{+YYYY.MM.dd}"
cacert => '/usr/share/logstash/config/certificates/ca.crt'#这个是https的证书
user => 'elastic'
password => 'WL1z7p9HS45Xz6C943N0jkqk'
ssl => true
}
stdout { codec => rubydebug }
}
es的因为是https,所以配置时候,需要注意。 最终效果: