我們將要安裝配置 Filebeat 來(lái)收集 Kubernetes 集群中的日志數(shù)據(jù)菱鸥,然后發(fā)送到 ElasticSearch 去中宗兼,F(xiàn)ilebeat 是一個(gè)輕量級(jí)的日志采集代理,還可以配置特定的模塊來(lái)解析和可視化應(yīng)用(比如數(shù)據(jù)庫(kù)氮采、Nginx 等)的日志格式殷绍。
和 Metricbeat 類似,F(xiàn)ilebeat 也需要一個(gè)配置文件來(lái)設(shè)置和 ElasticSearch 的鏈接信息鹊漠、和 Kibana 的連接已經(jīng)日志采集和解析的方式主到。
如下資源對(duì)象就是我們這里用于日志采集的信息
1 Filebeat配置
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: kube-logging
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.config:
inputs:
path: ${path.config}/inputs.d/*.yml
reload.enabled: false
modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
#output.logstash:
# hosts: ["10.168.101.77:8080","10.168.101.78:8080","10.168.101.79:8080"]
output.kafka:
hosts: ["10.168.101.77:9092","10.168.101.78:9092","10.168.101.79:9092"]
enabbled: true
topic: "dev"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-inputs
namespace: kube-logging
labels:
k8s-app: filebeat
data:
kubernetes.yml: |-
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: kube-logging
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
image: elastic/filebeat:7.13.0
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: 10.168.101.77
- name: ELASTICSEARCH_PORT
value: "9200"
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: inputs
mountPath: /usr/share/filebeat/inputs.d
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: inputs
configMap:
defaultMode: 0600
name: filebeat-inputs
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: kube-logging
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: kube-logging
labels:
k8s-app: filebeat
---
kubectl apply -f filebeat.yaml
configmap/filebeat-config unchanged
configmap/filebeat-inputs unchanged
daemonset.apps/filebeat configured
Warning: rbac.authorization.k8s.io/v1beta1 ClusterRoleBinding is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRoleBinding
clusterrolebinding.rbac.authorization.k8s.io/filebeat unchanged
Warning: rbac.authorization.k8s.io/v1beta1 ClusterRole is deprecated in v1.17+, unavailable in v1.22+; use rbac.authorization.k8s.io/v1 ClusterRole
clusterrole.rbac.authorization.k8s.io/filebeat unchanged
serviceaccount/filebeat unchanged
kubectl get pods -n kube-logging
NAME READY STATUS RESTARTS AGE
filebeat-58b8s 1/1 Running 0 91s
filebeat-5sj2c 1/1 Running 0 115s
filebeat-k7qdl 1/1 Running 0 104s
filebeat-xpgrs 1/1 Running 0 81s
2: Logstash配置
我這里使用的是單節(jié)點(diǎn)Logstash 并沒(méi)有使用Kubernetes搭建.相應(yīng)的kubernetes yaml文件如下
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash
namespace: kube-logging
spec:
replicas: 1
selector:
matchLabels:
app: logstash
template:
metadata:
labels:
app: logstash
spec:
containers:
- name: logstash
image: elastic/logstash:7.13.0
volumeMounts:
- name: config
mountPath: /opt/logstash/config/containers.conf
subPath: containers.conf
env:
- name: "XPACK_MONITORING_ELASTICSEARCH_URL"
value: "http://elasticsearch:9200"
command:
- "/bin/sh"
- "-c"
- "/opt/logstash/bin/logstash -f /opt/logstash/config/containers.conf"
volumes:
- name: config
configMap:
name: logstash-k8s-config
---
apiVersion: v1
kind: Service
metadata:
labels:
app: logstash
name: logstash
namespace: kube-logging
spec:
ports:
- port: 8080
targetPort: 8080
nodePort: 32003
selector:
app: logstash
type: NodePort
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-k8s-config
namespace: kube-logging
data:
containers.conf: |
input {
beats {
port => 8080 #filebeat連接端口
}
}
filter {
mutate {
remove_field => ["agent","@version","container","[kubernetes][labels]","[kubernetes][node]","[kubernetes][pod][ip]","[kubernetes][pod][uid]","tag","log","stream","_score","_type"]
}
if [kubernetes][namespace] == "x" {
mutate {
add_field => {
"[@metadata][target_index]" => "%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
#"[@metadata][target_index]" => 'test'
}
}
}
else if [kubernetes][namespace] == "x" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "x" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
} else if [kubernetes][namespace] == "x" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else {
drop {}
}
}
output {
# stdout {
# codec => rubydebug
# }
elasticsearch {
hosts => "elasticsearch:9200"
index => "%{[@metadata][target_index]}"
}
}
由于我這里沒(méi)有使用Kubernetes搭建.所以展示一下我的Logstash配置文件
cat container.conf
input {
kafka {
bootstrap_servers => ["10.168.101.77:9092,10.168.101.78:9092,10.168.101.79:9092"]
topics => ["dev"]
consumer_threads => 1
codec => json
}
}
filter {
mutate {
remove_field => ["agent","@version","container","[kubernetes][labels]","[kubernetes][node]","[kubernetes][pod][ip]","[kubernetes][pod][uid]","tag","log","stream","_score","_type"]
}
if [kubernetes][namespace] == "es-backend-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "es-frontend-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "es-influxdb-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-db%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "es-kafka-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-db%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "es-mongodb-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-db%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "es-mqtt-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-db%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "es-mysql-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-db%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "es-redis-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-db%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else if [kubernetes][namespace] == "websocket-dev" {
mutate {
add_field => {
"[@metadata][target_index]" => "dev-db%{[kubernetes][container][name]}-%{+YYYY.MM.dd}"
}
}
}
else {
drop {}
}
}
output {
elasticsearch {
hosts => ["10.168.101.77:9200","10.168.101.78:9200","10.168.101.79:9200"]
index => "%{[@metadata][target_index]}"
}
}