EFK
Elasticsearch Kibana Filebeat Logstash選型
efk名稱空間 | zookeeper kafka es kibana .. |
dev名稱空間 | failbeat |
數(shù)據(jù)存儲 | nfs /data/nfs-volume/ |
graph TD
a(Filebeat 日志收集)-->|logging topic|b(kafka集群)
c(zookeeper集群)-->|分布式協(xié)調(diào),調(diào)度|b
b-->d
subgraph logstash
d(接受input->過濾-filter->輸出output)
end
d-->e(elasricsearch集群 日志存儲與索引)
e-->f(Kibana 日志可視化管理)
節(jié)點數(shù) | |
---|---|
zookeeper | 1或3 |
kafka | 1或3 |
es | 1或3 |
二進制-ES
jdk8,未來版本要jdk11
mkdir /opt/src
mv elasticsearch-7.10.0-linux-x86_64.tar.gz /opt/src
# 解壓勾习,創(chuàng)建數(shù)據(jù)目錄
tar axf elasticsearch-7.10.0-linux-x86_64.tar.gz -C /opt/
ln -ns /opt/elasticsearch-7.10.0/ /opt/elasticsearch
mkdir /data/elasticsearch
# 創(chuàng)建普通用戶
useradd -s /sbin/nologin -M es
chown -R es.es /opt/elasticsearch/*
chown -R es.es /data/elasaaa:wq:wqaaaaaa
# 文件描述符aa:Wq!
cat >/etc/security/limits.d/es.conf<<EOF
1000 hard nofile 65535
1000 soft fsize unlimitd
1000 hard memlock unlimited
1000 soft memlock unlimited
EOF
# 內(nèi)核調(diào)整
sysctl -w vm.max_map_count=262144
echo "vm.max_map_count=262144" /etc/sysctl.conf
elasticsearch.yml
cat >>/opt/elasticsearch/config/elasticsearch.yml<<\EOF
cluster.initial_master_nodes: ["k8s-slave01"]
cluster.name: es.es.com
node.name: k8s-slave01
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
bootstrap.memory_lock: true # 在啟動時鎖定內(nèi)存,不用swap
network.host: 10.0.0.3
http.port: 9200
EOF
安全配置
# 1.生成一些允許節(jié)點安全通信的證書
bin/elasticsearch-certutil cert -out config/elastic-certificates.p12 -pass ""
# 2.修改配置文件/config/elasticsearch.yml
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
# 3. 將憑證同步到另外兩個節(jié)點
cp ../elasticsearch-7.1.0-master/config/* config/
# 4.Elasticsearch 集群密碼谍椅,一旦主節(jié)點開始運行距淫,便可以為集群設(shè)置密碼了
bin/elasticsearch-setup-passwords auto
# 5.kibana配置 config/elasticsearch.yml
elasticsearch.username: "elastic" # 就用這個最高權(quán)限
elasticsearch.password "xxxx"
curl --user elastic:xxxx localhost:9200/_cat/indices?v
jvm.options
# 生產(chǎn)環(huán)境最多給 32G內(nèi)存
-Xms512m
-Xmx512m
啟動
su -s /bin/bash -c "/opt/elasticsearch/bin/elasticsearch -d" es
k8s-ES單機
docker pull docker.elastic.co/elasticsearch/elasticsearch:7.10.0
#單機 docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.5.2
# nfs
mkdir /data/nfs-volume/elasticsearch
# 節(jié)點設(shè)置標簽
kubectl label node k8s-slave02 efk=true
# 單機配置
kubectl create secret generic \
elasticsearch -n efk \
--from-literal=elastic-passwd=whileiselastic
cat >elasticsearch-single.yaml<<\EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: elasticsearch
namespace: efk
data:
elasticsearch.yml: |
network.host: 0.0.0.0
path.data: /data
xpack.security.enabled: true
http.port: 9200
---
apiVersion: v1
data:
elastic-passwd: d2hpbGVpc2VsYXN0aWM=
kind: Secret
metadata:
creationTimestamp: null
name: elasticsearch
namespace: efk
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
name: elasticsearch
name: elasticsearch
namespace: efk
spec:
replicas: 1
serviceName: elasticsearch
revisionHistoryLimit: 10
selector:
matchLabels:
name: elasticsearch
template:
metadata:
labels:
name: elasticsearch
spec:
# nodeSelector:stic-certificates.p12
# efk: "true" # 指定部署在哪個節(jié)點污朽。需根據(jù)環(huán)境來修改
initContainers:
- name: init
image: alpine:3.6
securityContext:
privileged: true
command: ["sh","-c", "sysctl -w vm.max_map_count=262144"]
imagePullPolicy: IfNotPresent
- name: fix-permissions
image: alpine:3.6
securityContext:
privileged: true
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
command: [ "sh", "-c", "mkdir -p /mnt/$NAMESPACE/$POD_NAME && chown -R 1000:1000 /mnt/$NAMESPACE/$POD_NAME"]
imagePullPolicy: IfNotPresent
volumeMounts:
- name: es-data
mountPath: /mnt
containers:
- name: elasticsearch
securityContext:
capabilities:
add:
- IPC_LOCK
- SYS_RESOURCE
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.0
command: ["bash", "-c", "ulimit -l unlimited && exec su elasticsearch docker-entrypoint.sh"]
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ES_JAVA_OPTS
value: "-Xms1g -Xmx1g"
- name: discovery.type
value: single-node
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
name: elasticsearch
key: elastic-passwd
resources:
limits:
cpu: 1000m
memory: 1.5Gi
requests:
cpu: 500m
memory: 1Gi
ports:
- containerPort: 9200
protocol: TCP
- containerPort: 9300
protocol: TCP
volumeMounts:
- name: es-config
mountPath: "/usr/share/elasticsearch/config/elasticsearch.yml"
subPath: elasticsearch.yml
- name: es-data
mountPath: "/data"
subPathExpr: $(NAMESPACE)/$(POD_NAME)
volumes:
- name: es-config
configMap:
name: elasticsearch
defaultMode: 0644
- name: es-data
nfs:
server: 10.0.0.2
path: /data/nfs-volume/elasticsearch/
---
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: efk
spec:
ports:
- name: server
port: 9200
protocol: TCP
targetPort: 9200
- name: cluster
port: 9300
protocol: TCP
targetPort: 9300
selector:
name: elasticsearch
clusterIP: None
EOF
# 驗證
curl -u elastic:whileiselastic elasticsearch:9200
curl http://elastic:whileiselastic@elasticsearch:9200
curl http://elastic:whileiselastic@elasticsearch.efk.svc.cluster.local:9200
k8s-ES集群
# 生產(chǎn)證書傲诵,通過這些證書便能允許節(jié)點安全地通信
bin/elasticsearch-certutil cert -out elastic-certificates.p12 -pass ""
kubectl create secret generic \
elasticsearch -n efk \
--from-literal=elastic-passwd=whileiselastic \
--from-file=elastic-cert=./elastic-certificates.p12 -n efk
cat >elasticsearch-cluster.yaml<<\EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: elasticsearch
namespace: efk
data:
elasticsearch.yml: |
cluster.name: "${NAMESPACE}"
node.name: "${POD_NAME}"
network.host: 0.0.0.0
discovery.seed_hosts: ["elasticsearch.efk.svc.cluster.local"]
cluster.initial_master_nodes: ["elasticsearch-0","elasticsearch-1","elasticsearch-2"]
bootstrap.memory_lock: true
path.data: /data
path.logs: /data
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
labels:
name: elasticsearch
name: elasticsearch
namespace: efk
spec:
replicas: 1
serviceName: elasticsearch
revisionHistoryLimit: 10
selector:
matchLabels:
name: elasticsearch
template:
metadata:
labels:
name: elasticsearch
spec:
nodeSelector:
elasticsearch: "true" ## 指定部署在哪個節(jié)點髓介。需根據(jù)環(huán)境來修改
initContainers:
- name: init
image: alpine:3.6
securityContext:
privileged: true
command: ["sh","-c", "sysctl -w vm.max_map_count=262144"]
imagePullPolicy: IfNotPresent
- name: fix-permissions
image: alpine:3.6
securityContext:
privileged: true
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
command: [ "sh", "-c", "mkdir -p /mnt/$NAMESPACE/$NODE_NAME && chown -R 1000:1000 /mnt"]
imagePullPolicy: IfNotPresent
volumeMounts:
- name: elasticsearch-data
mountPath: /mnt
containers:
- name: elasticsearch
securityContext:
capabilities:
add:
- IPC_LOCK
- SYS_RESOURCE
image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2
command: ["bash", "-c", "ulimit -l unlimited && exec su elasticsearch docker-entrypoint.sh"]
env:
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
resources:
limits:
cpu: 1000m
memory: 1Gi
requests:
cpu: 500m
memory: 800Mi
ports:
- containerPort: 9200
protocol: TCP
- containerPort: 9300
protocol: TCP
volumeMounts:
- name: es-config
mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
subPath: elasticsearch.yml
- name: elasticsearch-data
mountPath: /data
subPathExpr: $(NAMESPACE)/$(POD_NAME)
volumes:
- name: es-config
configMap:
name: elasticsearch
defaultMode: 0644
- name: elasticsearch-data
nfs:
server: 10.0.0.2
path: /data/nfs-volume/elasticsearch/
---
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: efk
spec:
ports:
- name: server
port: 9200
protocol: TCP
targetPort: 9200
- name: cluster
port: 9300
protocol: TCP
targetPort: 9300
selector:
name: elasticsearch
clusterIP: None
EOF
# 驗證
curl -u elastic:whileiselastic -X GET "elasticsearch:9200/_cat/nodes?v&pretty"
# 待優(yōu)化集群發(fā)現(xiàn)
discovery.seed_providers
#以文件的方式提供主機列表惕鼓,可以動態(tài)修改,而不用重啟節(jié)點(容器化環(huán)境適用)
大規(guī)模集群使用方式
# 默認情況下elasticsearch 集群中每個節(jié)點都有成為主節(jié)點的資格唐础,也都存儲數(shù)據(jù)箱歧,還可以提供查詢服務(wù)夫否。
# 節(jié)點是否具有成為主節(jié)點的資格
node.master:
# 節(jié)點是否存儲數(shù)據(jù)
node.data:
# 用 ingest 對數(shù)據(jù)進行過濾、轉(zhuǎn)換等操作 用于預(yù)處理數(shù)據(jù)(索引和搜索階段都可以用到)
node.ingest
# 節(jié)點既有成為主節(jié)點的資格叫胁,又可以存儲數(shù)據(jù)凰慈,還可以作為預(yù)處理節(jié)點(默認配置)
node.master: true
node.data: true
node.ingest: true
# master節(jié)點
node.master: true
node.data: false
node.ingest: false
# data(數(shù)據(jù))節(jié)點
node.master: false
node.data: true
node.ingest: false
# coordinate(協(xié)調(diào)) 我們在代碼中配置訪問節(jié)點就都可以配置這些 ingest 節(jié)點即可
node.master: false
node.data: false
node.ingest: true
# 純查詢模,節(jié)點只可以接受查詢驼鹅,對于查詢數(shù)據(jù)量比較大的集群微谓,配置這種節(jié)點可以有效防止datanode內(nèi)存溢出
node.master: false
node.data: false
node.ingest: false
master節(jié)點:普通服務(wù)器即可(CPU 內(nèi)存 消耗一般)
data 節(jié)點:主要消耗磁盤,內(nèi)存
client | ingest 節(jié)點:普通服務(wù)器即可(如果要進行分組聚合操作的話输钩,建議這個節(jié)點內(nèi)存也分配多一點)
k8s-ES可視化管理
# 谷歌瀏覽器商店安裝 elasticsearch-head插件. 可視化,要配置ingress提供訪問
kubectl create svc clusterip elasticsearch --tcp=9200:9200
elasticsearch基本概念
索引
- 索引(index)是Elasticsearch對邏輯數(shù)據(jù)的邏輯存儲豺型,所以它可以分為更小的部分。
- 可以把索引看成關(guān)系型數(shù)據(jù)庫的表买乃,索引的結(jié)構(gòu)是為快速有效的全文索引準備的姻氨,特別是它不存儲原始值。
- Elasticsearch可以把索引存放在一臺機器或者分散在多臺服務(wù)器上剪验,每個索引有一或多個分片(shard)肴焊,每個分片可以有多個副本(replica)。
文檔
- 存儲在Elasticsearch中的主要實體叫文檔(document)功戚。用關(guān)系型數(shù)據(jù)庫來類比的話娶眷,一個文檔相當(dāng)于數(shù)據(jù)庫表中的一行記錄。
- Elasticsearch和MongoDB中的文檔類似啸臀,都可以有不同的結(jié)構(gòu)届宠,但Elasticsearch的文檔中,相同字段必須有相同類型乘粒。
- 文檔由多個字段組成豌注,每個字段可能多次出現(xiàn)在一個文檔里,這樣的字段叫多值字段(multivalued)灯萍。 每個字段的類型轧铁,可以是文本、數(shù)值竟稳、日期等属桦。字段類型也可以是復(fù)雜類型,一個字段包含其他子文檔或者數(shù) 組他爸。
映射
- 所有文檔寫進索引之前都會先進行分析聂宾,如何將輸入的文本分割為詞條、哪些詞條又會被過濾诊笤,這種行為叫做 映射(mapping)系谐。一般由用戶自己定義規(guī)則。
文檔類型
- 在Elasticsearch中,一個索引對象可以存儲很多不同用途的對象纪他。例如鄙煤,一個博客應(yīng)用程序可以保存文章和評 論。
- 每個文檔可以有不同的結(jié)構(gòu)茶袒。
- 不同的文檔類型不能為相同的屬性設(shè)置不同的類型梯刚。例如,在同一索引中的所有文檔類型中薪寓,一個叫title的字段必須具有相同的類型亡资。
RESTful API
- 在Elasticsearch中,提供了功能豐富的RESTful API的操作向叉,包括基本的CRUD锥腻、創(chuàng)建索引、刪除索引等操作母谎。
創(chuàng)建非結(jié)構(gòu)化索引
- 在Lucene中瘦黑,創(chuàng)建索引是需要定義字段名稱以及字段的類型的,在Elasticsearch中提供了非結(jié)構(gòu)化的索引奇唤,就是不需要創(chuàng)建索引結(jié)構(gòu)幸斥,即可寫入數(shù)據(jù)到索引中,實際上在Elasticsearch底層會進行結(jié)構(gòu)化操作冻记,此操作對用戶是透明的睡毒。
elasticsearch基本操作
創(chuàng)建空索引
PUT /haoke
{
"settings": {
"index": {
"number_of_shards": "2",
"number_of_replicas": "0"
}
}
}
k8s-kibana
docker pull docker.elastic.co/kibana/kibana:7.10.0
cat >kibana.yaml<<EOF
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: efk
labels:
name: kibana
spec:
ports:
- port: 5601
protocol: TCP
targetPort: ui
selector:
name: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: efk
labels:
name: kibana
spec:
replicas: 1
selector:
matchLabels:
name: kibana
template:
metadata:
labels:
name: kibana
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kibana
image: docker.elastic.co/kibana/kibana:7.10.0
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch:9200
- name: SERVER_NAME
value: kibana.zs.com
- name: I18N_LOCALE
value: zh-CN
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
valueFrom:
secretKeyRef:
name: elasticsearch
key: elastic-passwd
- name: PATHA_DATA
value: /data
ports:
- containerPort: 5601
name: ui
protocol: TCP
# livenessProbe:
# httpGet:
# path: /api/status
# port: ui
# initialDelaySeconds: 5
# timeoutSeconds: 10
# readinessProbe:
# httpGet:
# path: /api/status
# port: ui
# initialDelaySeconds: 5
# timeoutSeconds: 10
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana
namespace: efk
spec:
rules:
- host: kibana.zs.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana
port:
number: 5601
EOF
# 在stack management->Kibana(索引模式)-->創(chuàng)建 索引模式
k8s-filebeat
docker pull elastic/filebeat:7.10.0
收集pod中 nginx訪問日志
# fields_under_root: 如果值為ture,那么fields存儲在輸出文檔的頂級位置
# fields 指定可選字段以將其他信息添加到輸出中
# multiline.pattern 合并行,通過正則匹配開頭
apiVersion: v1
kind: Service
metadata:
name: nginx-externalname
namespace: dev
spec:
type: ExternalName
externalName: kafka.efk.svc.cluster.local
ports:
- name: kafka
port: 9092
targetPort: 9092
---
apiVersion: v1
kind: Service
metadata:
name: nginx
namespace: dev
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
clusterIP: None
selector:
name: nginx
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx
namespace: dev
data:
nginx.conf: |
user nginx;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
map $http_x_forwarded_for $clientRealIp {
"" $remote_addr;
~^(?P<firstAddr>[0-9\.]+),?.*$ $firstAddr;
}
log_format nginx_log_json '{'
'"accessip_list":"$proxy_add_x_forwarded_for",'
'"client_ip":"$clientRealIp",'
'"http_host":"$host",'
'"@timestamp":"$time_iso8601",'
'"method":"$request_method",'
'"url":"$request_uri",'
'"status":"$status",'
'"http_referer":"$http_referer",'
'"body_bytes_sent":"$body_bytes_sent",'
'"request_time":"$request_time",'
'"http_user_agent":"$http_user_agent",'
'"total_bytes_sent":"$bytes_sent",'
'"server_ip":"$server_addr"'
'}';
access_log /var/log/nginx/access.log nginx_log_json;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
filebeat.yml: |
filebeat.inputs:
- type: log
enabled: true
fields_under_root: true
fields:
log_topic: k8s-dev-fb-nginx
paths:
- "/log/*.log"
- "/log/*/*.log"
scan_frequency: 120s
max_bytes: 10485760
multiline.type: pattern
multiline.pattern: '^{'
multiline.negate: true
multiline.match: after
multilinde.max_lines: 100
output.kafka:
enabled: true
hosts: ["nginx-externalname.dev.svc.cluster.local:9092"]
topic: "%{[log_topic]}"
partition.round_robin:
reachable_only: false
required_acks: 1
max_message_bytes: 1000000
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: dev
name: nginx
labels:
name: nginx
spec:
replicas: 1
selector:
matchLabels:
name: nginx
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: ikubernetes/myapp:v7
imagePullPolicy: IfNotPresent
ports:
- name: nginx
containerPort: 80
protocol: TCP
volumeMounts:
- name: timezone
mountPath: /etc/localtime
- name: log
mountPath: /var/log/nginx
- name: conf
mountPath: "/etc/nginx/nginx.conf"
readOnly: true
subPath: nginx.conf
- name: filebeat
image: elastic/filebeat:7.10.0
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
volumeMounts:
- name: log
mountPath: /log
- name: yml
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
volumes:
- name: timezone
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
- name: log
emptyDir: {}
- name: yml
configMap:
defaultMode: 0644
name: nginx
items:
- key: "filebeat.yml"
path: "filebeat.yml"
- name: conf
configMap:
defaultMode: 0644
name: nginx
items:
- key: "nginx.conf"
path: "nginx.conf"
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: nginx
namespace: dev
annotations:
nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
spec:
rules:
- host: nginx.zs.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: nginx
port:
number: 80
k8s-logstash
docker pull docker.elastic.co/logstash/logstash:7.10.0
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash
namespace: efk
data:
logstash.yml: |
#http.host: "0.0.0.0"
#xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch.efk.svc.cluster.local:9200" ]
#xpack.management.elasticsearch.username: logstash_admin_user
#xpack.management.elasticsearch.password: t0p.s3cr3t
#xpack.monitoring.enabled: true
#path.config: /usr/share/logstash/pipeline/logstash.conf
logstash.conf: |
input {
kafka {
bootstrap_servers => "kafka.efk.svc.cluster.local:9092"
topics_pattern => "k8s-dev-fb-.*"
group_id => "logstash1"
codec => json {
charset => "UTF-8"
}
add_field => { "[@metadata][myid]" => "nginxaccess-log" }
}
}
filter {
if [@metadata][myid] == "nginxaccess-log" {
mutate {
gsub => ["message", "\\x", "\\\x"]
}
if ( 'method":"HEAD' in [message] ) {
drop {}
}
json {
source => "message"
remove_field => "prospector"
remove_field => "beat"
remove_field => "source"
remove_field => "input"
remove_field => "offset"
remove_field => "fields"
remove_field => "host"
remove_field => "@version"
remove_field => "message"
}
}
}
output {
if [@metadata][myid] == "nginxaccess-log" {
elasticsearch {
hosts => [ "elasticsearch.efk.svc.cluster.local:9200" ]
index => "k8s-dev-%{+YYYY.MM.dd}"
user => "elastic"
password => "${ELASTIC_PASSWORD}"
}
}
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: efk
name: logstash
labels:
name: logstash
spec:
replicas: 1
selector:
matchLabels:
name: logstash
template:
metadata:
labels:
name: logstash
spec:
containers:
- name: logstash
image: docker.elastic.co/logstash/logstash:7.10.0
imagePullPolicy: IfNotPresent
env:
- name: ELASTIC_PASSWORD
valueFrom:
secretKeyRef:
name: elasticsearch
key: elastic-passwd
volumeMounts:
- name: yml
mountPath: "/usr/share/logstash/config/logstash.yml"
readOnly: true
subPath: logstash.yml
- name: conf
mountPath: "/usr/share/logstash/pipeline/logstash.conf"
readOnly: true
subPath: logstash.conf
volumes:
- name: yml
configMap:
defaultMode: 0644
name: logstash
items:
- key: "logstash.yml"
path: "logstash.yml"
- name: conf
configMap:
defaultMode: 0644
name: logstash
items:
- key: "logstash.conf"
path: "logstash.conf"