===============================================================================
1. 下載好如下軟件包+插件+環(huán)境
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.10.0-x86_64.rpm
集群插件:
git://github.com/mobz/elasticsearch-head.git
cerebro插件:
http://github.com/lmenezes/cerebro/releases/download/v0.9.4/cerebro-0.9.4.zip
監(jiān)控插件:
curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-7.10.0-x86_64.rpm
安裝環(huán)境
JDK1.8以上即可
自己官網(wǎng)下載
NPM環(huán)境
https://nodejs.org/dist/v12.18.3/node-v12.18.3-linux-x64.tar.xz
===============================================================================
安裝jdk1.8版本
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_121
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
source /etc/profile
java -version
安裝npm最新版本
wget https://nodejs.org/dist/v12.18.3/node-v12.18.3-linux-x64.tar.xz
tar -xvf node-v12.18.3-linux-x64.tar.xz -C .
mv node-v12.18.3-linux-x64 /usr/local/nodejs
ln -s /usr/local/nodejs/bin/node /usr/bin/node #創(chuàng)建軟連接颁股,讓node命令全局生效
ln -s /usr/local/nodejs/bin/npm /usr/bin/npm #創(chuàng)建軟連接得哆,讓npm命令全局生效
vim /etc/profile
export NODE_HOME=/usr/local/node
export PATH=$NODE_HOME/bin:$PATH
source /etc/profile
node -v
npm -v
設(shè)置鏡像
npm config set registry https://registry.npm.taobao.org
安裝easticsarch-head插件
yum install –y git
git --version
git clone https://github.com/mobz/elasticsearch-head.git
cd elasticsearch-head
npm install cnpm -g --registry=https://registry.npm.taobao.org #因?yàn)閚pm安裝非常非常慢酷愧,所以在這里先安裝淘寶源地址
ln -s /usr/local/nodejs/bin/cnpm /usr/local/bin/cnpm #創(chuàng)建cnpm軟鏈接,不然執(zhí)行下面執(zhí)行命令會報(bào)錯(cuò)
cnpm install #使用cnpm命令下載安裝項(xiàng)目所需要的插件
vim _site/app.js #修改app.js 搜索localhost滓侍,將localhost修改為安裝ElasticSearch服務(wù)器的ip
cd node_modules/grunt/bin
nohup ./grunt server >nohup.out 2>&1 &
關(guān)防火墻。
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld
selinux配置
setenforce 0
sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config
vim /etc/sysctl.conf
vm.max_map_count=262144
sysctl -p
修改最大文件描述符
vim /etc/security/limits.conf
*soft nofile 655350
*hard nofile 655350
*soft nproc 40960
*hard nproc 40960
es安裝
rpm --install elasticsearch-7.10.0-x86_64.rpm 備注:es-3臺都裝
rpm --install kibana-7.10.0-x86_64.rpm
rpm --install logstash-7.10.0-x86_64.rpm
rpm --install filebeat-7.10.0-x86_64.rpm
cat /etc/elasticsearch/elasticsearch.yml|grep -v "#"
cluster.name: test-es-cluster
node.name: test-elk-01
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.22
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: true
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"
cluster.name: test-es-cluster
node.name: test-elk-02
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.23
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: false
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"
cluster.name: test-es-cluster
node.name: test-elk-03
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.24
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: false
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"
[root@elk-66 conf.d]# curl -XGET 'http://192.168.64.66:9200/_cat/nodes?v'
ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.64.68 30 16 0 0.00 0.01 0.05 cdhstw - elk-68
192.168.64.66 38 37 1 0.10 0.15 0.13 cdhmstw * elk-66
192.168.64.67 62 15 0 0.00 0.03 0.08 cdhstw - elk-67
[root@elk-66 conf.d]# curl -XGET 'http://192.168.64.66:9200/_cat/nodes'
192.168.64.68 40 16 0 0.00 0.01 0.05 cdhstw - elk-68
192.168.64.66 21 37 4 0.08 0.14 0.13 cdhmstw * elk-66
192.168.64.67 15 15 0 0.00 0.02 0.08 cdhstw - elk-67
[root@elk-66 conf.d]# curl -i http://192.168.64.66:9200/_cluster/state/nodes?pretty
HTTP/1.1 200 OK
content-type: application/json; charset=UTF-8
content-length: 894
{
"cluster_name" : "es-cluster",
"cluster_uuid" : "LZso9rvUS3eeZdHs35KsZA",
"nodes" : {
"wNhKViCZRy2rJQfX11RNcg" : {
"name" : "elk-68",
"ephemeral_id" : "N2S8ggVPSMKEiPtCBug19A",
"transport_address" : "192.168.64.68:9300",
"attributes" : {
"xpack.installed" : "true",
"transform.node" : "true"
}
},
"VSRB4svAStGm2Z1q90L00A" : {
"name" : "elk-66",
"ephemeral_id" : "ejQBrC-qTvCnjjML3dPYkQ",
"transport_address" : "192.168.64.66:9300",
"attributes" : {
"xpack.installed" : "true",
"transform.node" : "true"
}
},
"rbi9mt6oSjCrO1luN6visQ" : {
"name" : "elk-67",
"ephemeral_id" : "P8uSRSxgT56rYSUuHP09hw",
"transport_address" : "192.168.64.67:9300",
"attributes" : {
"xpack.installed" : "true",
"transform.node" : "true"
}
}
}
}
rpm --install metricbeat-7.10.0-x86_64.rpm 備注:裝master主節(jié)點(diǎn)上即可
cd /usr/bin/
metricbeat modules list
metricbeat modules enable elasticsearch-xpack
[root@elk-66 metricbeat]# vim metricbeat.yml
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["http://192.168.64.66:9200","http://192.168.64.67:9200","http://192.168.64.68:9200"]
[root@elk-66 modules.d]# cat elasticsearch-xpack.yml
# Module: elasticsearch
# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.10/metricbeat-module-elasticsearch.html
- module: elasticsearch
xpack.enabled: true
period: 10s
hosts: ["http://192.168.64.66:9200","http://192.168.64.67:9200","http://192.168.64.68:9200"]
#username: "user"
#password: "secret"
sudo service metricbeat start
filebeat.yml 配置
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
paths:
- /var/log/nginx/access.log
fields:
log_source: nginx-access
- type: log
paths:
- /var/log/nginx/error.log
fields:
log_source: nginx-error
[root@elk-66 conf.d]# pwd
/etc/logstash/conf.d
[root@elk-66 conf.d]# cat nginx_find_log.conf
input {
beats {
port => 5044
}
}
filter {
if [fields][log_source]=="nginx-access"{
grok {
match => {
"message" => '%{IP:clientip}\s*%{DATA}\s*%{DATA}\s*\[%{HTTPDATE:requesttime}\]\s*"%{WORD:requesttype}.*?"\s*%{NUMBER:status:int}\s*%{NUMBER:bytes_read:int}\s*"%{DATA:requesturl}"\s*%{QS:ua}'
}
overwrite => ["message"]
}
}
if [fields][log_source]=="nginx-error"{
grok {
match => {
"message" => '(?<time>.*?)\s*\[%{LOGLEVEL:loglevel}\]\s*%{DATA}:\s*%{DATA:errorinfo},\s*%{WORD}:\s*%{IP:clientip},\s*%{WORD}:%{DATA:server},\s*%{WORD}:\s*%{QS:request},\s*%{WORD}:\s*%{QS:upstream},\s*%{WORD}:\s*"%{IP:hostip}",\s*%{WORD}:\s*%{QS:referrer}'
}
overwrite => ["message"]
}
}
}
output {
if [fields][log_source]=="nginx-access"{
elasticsearch {
hosts => ["http://192.168.64.66:9200"]
action => "index"
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
if [fields][log_source]=="nginx-error"{
elasticsearch {
hosts => ["http://192.168.64.66:9200"]
action => "index"
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
啟動停止的順序:
啟動:elasticsearch --- logstash --- filebeat --- kibana
停止:kibana --- filebeat --- logstash --- elasticsearch
nginx安裝htpasswd 密碼登錄
# 安裝工具包httpd-tools
yum install -y httpd-tools
[root@elk-66 passwd]# htpasswd -c "/etc/nginx/passwd/kibana.passwd" "admin"
New password:
Re-type new password:
Adding password for user admin
nginx的conf配置例如:
[root@elk-66 conf.d]# cat default.conf
server {
server_name 192.168.64.66; #這里一般是填寫域名 然后把防火墻開啟 只需呀放開80即可 但是我這里只是演示測試環(huán)境哈
#listen 80;
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/passwd/kibana.passwd;
location / {
proxy_pass http://192.168.64.66:5601;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
nginx-json模板如下:
log_format json '{"@timestamp":"$time_iso8601",'
'"host":"$server_addr",'
'"clientip":"$remote_addr",'
'"size":$body_bytes_sent,'
'"responsetime":$request_time,'
'"upstreamtime":"$upstream_response_time",'
'"upstreamhost":"$upstream_addr",'
'"http_host":"$host",'
'"url":"$uri",'
'"domain":"$host",'
'"xff":"$http_x_forwarded_for",'
'"referer":"$http_referer",'
'"remote_user":"$remote_user",'
'"request":"$request",'
'"http_user_agent":"$http_user_agent",'
'"requesturi":"$request_uri",'
'"status":"$status"}';
access_log /var/log/nginx/access.log json;
收集策略 nginx上的用戶日志 docker上的jar日志 kafka日常消息topics日志
[root@elk-66 conf.d]# cat logstash-test.conf
input {
beats {
port => 5044
}
}
filter {
if [fields][log_source]=="nginx-access"{
grok {
match => {
"message" => '%{IP:clientip}\s*%{DATA}\s*%{DATA}\s*\[%{HTTPDATE:requesttime}\]\s*"%{WORD:requesttype}.*?"\s*%{NUMBER:status:int}\s*%{NUMBER:bytes_read:int}\s*"%{DATA:requesturl}"\s*%{QS:ua}'
}
overwrite => ["message"]
}
}
if [fields][log_source]=="nginx-error"{
grok {
match => {
"message" => '(?<time>.*?)\s*\[%{LOGLEVEL:loglevel}\]\s*%{DATA}:\s*%{DATA:errorinfo},\s*%{WORD}:\s*%{IP:clientip},\s*%{WORD}:%{DATA:server},\s*%{WORD}:\s*%{QS:request},\s*%{WORD}:\s*%{QS:upstream},\s*%{WORD}:\s*"%{IP:hostip}",\s*%{WORD}:\s*%{QS:referrer}'
}
overwrite => ["message"]
}
}
}
output {
if [fields][log_source]=="nginx-access"{
elasticsearch {
hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
action => "index"
index => "nginx-access-%{+YYYY.MM.dd}"
}
}
if [fields][log_source]=="nginx-error"{
elasticsearch {
hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
action => "index"
index => "nginx-error-%{+YYYY.MM.dd}"
}
}
if [log_source] == "docker-test-45" {
elasticsearch {
hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
index => "docker-test-45-%{+YYYY.MM.dd}"
}
}
if [log_source] == "docker-test-46" {
elasticsearch {
hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
index => "docker-test-46-%{+YYYY.MM.dd}"
}
}
stdout { codec => rubydebug }
}
[root@docker-test-45 filebeat]# cat filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /home/*/logs/*.log
fields:
log_source: docker-test-45
fields_under_root: true
multiline.pattern: ^\d{4}-\d{1,2}-\d{1,2}
multiline.negate: true
multiline.match: after
scan_frequency: 5s
close_inactive: 1h
ignore_older: 24h
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
output.logstash:
hosts: ["192.168.64.66:5044"]
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
- add_docker_metadata:
host: "unix:///var/run/docker.sock"
match_source: true
match_source_index: 3
- add_kubernetes_metadata: ~
logging.level: info
[root@elk-66 conf.d]# cat /etc/filebeat/filebeat.yml|grep -v "#"
filebeat.inputs:
- type: log
paths:
- /var/log/nginx/access.log
fields:
log_source: nginx-access
- type: log
paths:
- /var/log/nginx/error.log
fields:
log_source: nginx-error
enabled: false
- type: filestream
enabled: false
paths:
- /var/log/*.log
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.dashboards.enabled: false
setup.kibana:
host: "192.168.64.66:5601"
output.logstash:
hosts: ["192.168.64.66:5044"]
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
cat application.conf
hosts = [
{
host = "http://192.168.64.66:9200"
name = "es-cluster"
headers-whitelist = [ "x-proxy-user", "x-proxy-roles", "X-Forwarded-For" ]
}
nohup ./cerebro 2>&1 &
刪除索引(也刪除了對應(yīng)的數(shù)據(jù))
curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.30 #刪除單條索引
curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.3{0..1} #刪除連續(xù)多條索引
curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.30,axt_examination_log-2021.08.30 #刪除多條索引
有了這兩個(gè)命令可以開始編寫下面腳本
# cat es_delete_index.sh
#!/bin/bash
#刪除java 15天前索引
source /etc/profile
date=`date -d "15 days ago" +%Y.%m.%d`
log_name='
axt_resources_log
axt_user_log
axt_data_log
axt_crm_log
axt_statistics_log
axt_mhcz_log
axt_future_log
axt_examination_log
axt_usercenter_log
'
for i in $log_name
do
curl -XDELETE http://localhost:9200/$i-$date