ELK-7.10版本集群官方RPM部署 metricbeat/elasticsearch-head插件

image.png
===============================================================================
1. 下載好如下軟件包+插件+環(huán)境
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/logstash/logstash-7.10.0-x86_64.rpm
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.10.0-x86_64.rpm
集群插件:
git://github.com/mobz/elasticsearch-head.git
cerebro插件:
http://github.com/lmenezes/cerebro/releases/download/v0.9.4/cerebro-0.9.4.zip
監(jiān)控插件:
curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-7.10.0-x86_64.rpm
安裝環(huán)境
JDK1.8以上即可
自己官網(wǎng)下載
NPM環(huán)境
https://nodejs.org/dist/v12.18.3/node-v12.18.3-linux-x64.tar.xz
===============================================================================

安裝jdk1.8版本
vim /etc/profile
export JAVA_HOME=/usr/local/jdk1.8.0_121
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
source /etc/profile
java -version

安裝npm最新版本
wget https://nodejs.org/dist/v12.18.3/node-v12.18.3-linux-x64.tar.xz
tar -xvf node-v12.18.3-linux-x64.tar.xz -C .
mv node-v12.18.3-linux-x64 /usr/local/nodejs
ln -s /usr/local/nodejs/bin/node /usr/bin/node #創(chuàng)建軟連接颁股,讓node命令全局生效
ln -s /usr/local/nodejs/bin/npm /usr/bin/npm #創(chuàng)建軟連接得哆,讓npm命令全局生效
vim /etc/profile
export NODE_HOME=/usr/local/node
export PATH=$NODE_HOME/bin:$PATH
source /etc/profile
node -v
npm -v
設(shè)置鏡像
npm config set registry https://registry.npm.taobao.org

安裝easticsarch-head插件
yum install –y git
git --version
git clone https://github.com/mobz/elasticsearch-head.git
cd elasticsearch-head
npm install cnpm -g --registry=https://registry.npm.taobao.org #因?yàn)閚pm安裝非常非常慢酷愧,所以在這里先安裝淘寶源地址
ln -s /usr/local/nodejs/bin/cnpm /usr/local/bin/cnpm #創(chuàng)建cnpm軟鏈接,不然執(zhí)行下面執(zhí)行命令會報(bào)錯(cuò)
cnpm install #使用cnpm命令下載安裝項(xiàng)目所需要的插件
vim _site/app.js #修改app.js 搜索localhost滓侍,將localhost修改為安裝ElasticSearch服務(wù)器的ip

cd node_modules/grunt/bin
nohup ./grunt server >nohup.out  2>&1 &
image.png
關(guān)防火墻。
systemctl stop firewalld
systemctl disable firewalld
systemctl status firewalld

selinux配置
setenforce 0  
sed -i 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config

vim /etc/sysctl.conf
vm.max_map_count=262144
sysctl -p

修改最大文件描述符
vim /etc/security/limits.conf
*soft nofile 655350
*hard nofile 655350
*soft nproc 40960
*hard nproc 40960

es安裝
rpm --install elasticsearch-7.10.0-x86_64.rpm  備注:es-3臺都裝
rpm --install kibana-7.10.0-x86_64.rpm
rpm --install logstash-7.10.0-x86_64.rpm
rpm --install filebeat-7.10.0-x86_64.rpm


cat /etc/elasticsearch/elasticsearch.yml|grep -v "#"

cluster.name: test-es-cluster
node.name: test-elk-01
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.22
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: true
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"

cluster.name: test-es-cluster
node.name: test-elk-02
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.23
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: false
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"

cluster.name: test-es-cluster
node.name: test-elk-03
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.64.24
transport.tcp.port: 9300
http.port: 9200
discovery.seed_hosts: ["192.168.64.22:9300","192.168.64.23:9300","192.168.64.24:9300"]
cluster.initial_master_nodes: ["192.168.64.22:9300"]
node.master: false
node.data: true
node.ingest: false
node.ml: false
cluster.remote.connect: false
http.cors.enabled: true
http.cors.allow-origin: "*"

[root@elk-66 conf.d]# curl -XGET 'http://192.168.64.66:9200/_cat/nodes?v'
ip            heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
192.168.64.68           30          16   0    0.00    0.01     0.05 cdhstw    -      elk-68
192.168.64.66           38          37   1    0.10    0.15     0.13 cdhmstw   *      elk-66
192.168.64.67           62          15   0    0.00    0.03     0.08 cdhstw    -      elk-67
[root@elk-66 conf.d]# curl -XGET 'http://192.168.64.66:9200/_cat/nodes'
192.168.64.68 40 16 0 0.00 0.01 0.05 cdhstw  - elk-68
192.168.64.66 21 37 4 0.08 0.14 0.13 cdhmstw * elk-66
192.168.64.67 15 15 0 0.00 0.02 0.08 cdhstw  - elk-67
[root@elk-66 conf.d]# curl -i http://192.168.64.66:9200/_cluster/state/nodes?pretty
HTTP/1.1 200 OK
content-type: application/json; charset=UTF-8
content-length: 894

{
  "cluster_name" : "es-cluster",
  "cluster_uuid" : "LZso9rvUS3eeZdHs35KsZA",
  "nodes" : {
    "wNhKViCZRy2rJQfX11RNcg" : {
      "name" : "elk-68",
      "ephemeral_id" : "N2S8ggVPSMKEiPtCBug19A",
      "transport_address" : "192.168.64.68:9300",
      "attributes" : {
        "xpack.installed" : "true",
        "transform.node" : "true"
      }
    },
    "VSRB4svAStGm2Z1q90L00A" : {
      "name" : "elk-66",
      "ephemeral_id" : "ejQBrC-qTvCnjjML3dPYkQ",
      "transport_address" : "192.168.64.66:9300",
      "attributes" : {
        "xpack.installed" : "true",
        "transform.node" : "true"
      }
    },
    "rbi9mt6oSjCrO1luN6visQ" : {
      "name" : "elk-67",
      "ephemeral_id" : "P8uSRSxgT56rYSUuHP09hw",
      "transport_address" : "192.168.64.67:9300",
      "attributes" : {
        "xpack.installed" : "true",
        "transform.node" : "true"
      }
    }
  }
}

rpm --install metricbeat-7.10.0-x86_64.rpm 備注:裝master主節(jié)點(diǎn)上即可
cd /usr/bin/
metricbeat modules list
metricbeat modules enable elasticsearch-xpack
[root@elk-66 metricbeat]# vim metricbeat.yml 
output.elasticsearch:
  # Array of hosts to connect to.
  hosts: ["http://192.168.64.66:9200","http://192.168.64.67:9200","http://192.168.64.68:9200"]
[root@elk-66 modules.d]# cat elasticsearch-xpack.yml 
# Module: elasticsearch
# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.10/metricbeat-module-elasticsearch.html

- module: elasticsearch
  xpack.enabled: true
  period: 10s
  hosts: ["http://192.168.64.66:9200","http://192.168.64.67:9200","http://192.168.64.68:9200"]
  #username: "user"
  #password: "secret"

sudo service metricbeat start
image.png
filebeat.yml 配置
filebeat.inputs:

# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
  paths:
   - /var/log/nginx/access.log
  fields:
   log_source: nginx-access
- type: log
  paths:
   - /var/log/nginx/error.log
  fields:
   log_source: nginx-error

[root@elk-66 conf.d]# pwd
/etc/logstash/conf.d
[root@elk-66 conf.d]# cat nginx_find_log.conf 
 input {
   beats {
     port => 5044
   }
 }
 filter {
   if [fields][log_source]=="nginx-access"{
     grok {
       match => {
         "message" => '%{IP:clientip}\s*%{DATA}\s*%{DATA}\s*\[%{HTTPDATE:requesttime}\]\s*"%{WORD:requesttype}.*?"\s*%{NUMBER:status:int}\s*%{NUMBER:bytes_read:int}\s*"%{DATA:requesturl}"\s*%{QS:ua}'
      }
       overwrite => ["message"]
     }
   }
   if [fields][log_source]=="nginx-error"{
     grok {
       match => {
         "message" => '(?<time>.*?)\s*\[%{LOGLEVEL:loglevel}\]\s*%{DATA}:\s*%{DATA:errorinfo},\s*%{WORD}:\s*%{IP:clientip},\s*%{WORD}:%{DATA:server},\s*%{WORD}:\s*%{QS:request},\s*%{WORD}:\s*%{QS:upstream},\s*%{WORD}:\s*"%{IP:hostip}",\s*%{WORD}:\s*%{QS:referrer}'
       }
       overwrite => ["message"]
     }
   }
 }
 output {
   if [fields][log_source]=="nginx-access"{
     elasticsearch {
       hosts => ["http://192.168.64.66:9200"]
       action => "index"
       index => "nginx-access-%{+YYYY.MM.dd}"
    }
   }
   if [fields][log_source]=="nginx-error"{
     elasticsearch {
       hosts => ["http://192.168.64.66:9200"]
       action => "index"
       index => "nginx-error-%{+YYYY.MM.dd}"
    }
   }
   stdout { codec => rubydebug }
 }
image.png

image.png

啟動停止的順序:
啟動:elasticsearch --- logstash --- filebeat --- kibana
停止:kibana --- filebeat --- logstash --- elasticsearch

nginx安裝htpasswd 密碼登錄
# 安裝工具包httpd-tools
yum install -y httpd-tools
[root@elk-66 passwd]# htpasswd -c "/etc/nginx/passwd/kibana.passwd" "admin"
New password: 
Re-type new password: 
Adding password for user admin

nginx的conf配置例如:
[root@elk-66 conf.d]# cat default.conf 
server {
        server_name 192.168.64.66; #這里一般是填寫域名 然后把防火墻開啟 只需呀放開80即可 但是我這里只是演示測試環(huán)境哈
        #listen 80;
        auth_basic "Restricted Access";
        auth_basic_user_file /etc/nginx/passwd/kibana.passwd;
        location / {
                proxy_pass http://192.168.64.66:5601;
                proxy_http_version 1.1;
                proxy_set_header Upgrade $http_upgrade;
                proxy_set_header Connection 'upgrade';
                proxy_set_header Host $host;
                proxy_cache_bypass $http_upgrade;
        }
}


nginx-json模板如下:
log_format json '{"@timestamp":"$time_iso8601",'
     '"host":"$server_addr",'
     '"clientip":"$remote_addr",'
     '"size":$body_bytes_sent,'
     '"responsetime":$request_time,'
     '"upstreamtime":"$upstream_response_time",'
     '"upstreamhost":"$upstream_addr",'
     '"http_host":"$host",'
     '"url":"$uri",'
     '"domain":"$host",'
     '"xff":"$http_x_forwarded_for",'
     '"referer":"$http_referer",'
     '"remote_user":"$remote_user",'
     '"request":"$request",'
     '"http_user_agent":"$http_user_agent",'
     '"requesturi":"$request_uri",'
     '"status":"$status"}';
access_log  /var/log/nginx/access.log  json;

image.png

image.png
收集策略 nginx上的用戶日志 docker上的jar日志 kafka日常消息topics日志 
[root@elk-66 conf.d]# cat logstash-test.conf 
 input {
   beats {
     port => 5044
   }
 }
 filter {
   if [fields][log_source]=="nginx-access"{
     grok {
       match => {
         "message" => '%{IP:clientip}\s*%{DATA}\s*%{DATA}\s*\[%{HTTPDATE:requesttime}\]\s*"%{WORD:requesttype}.*?"\s*%{NUMBER:status:int}\s*%{NUMBER:bytes_read:int}\s*"%{DATA:requesturl}"\s*%{QS:ua}'
      }
       overwrite => ["message"]
     }
   }
   if [fields][log_source]=="nginx-error"{
     grok {
       match => {
         "message" => '(?<time>.*?)\s*\[%{LOGLEVEL:loglevel}\]\s*%{DATA}:\s*%{DATA:errorinfo},\s*%{WORD}:\s*%{IP:clientip},\s*%{WORD}:%{DATA:server},\s*%{WORD}:\s*%{QS:request},\s*%{WORD}:\s*%{QS:upstream},\s*%{WORD}:\s*"%{IP:hostip}",\s*%{WORD}:\s*%{QS:referrer}'
       }
       overwrite => ["message"]
     }
   }
 }
 output {
   if [fields][log_source]=="nginx-access"{
     elasticsearch {
       hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
       action => "index"
       index => "nginx-access-%{+YYYY.MM.dd}"
    }
   }
   if [fields][log_source]=="nginx-error"{
     elasticsearch {
       hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
       action => "index"
       index => "nginx-error-%{+YYYY.MM.dd}"
    }
   }
   if [log_source] == "docker-test-45" {
        elasticsearch {
        hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
                index => "docker-test-45-%{+YYYY.MM.dd}"
        }
   }
   if [log_source] == "docker-test-46" {
    elasticsearch {
        hosts => ["elk-66:9200","elk-67:9200","elk-68:9200"]
        index => "docker-test-46-%{+YYYY.MM.dd}"
    }
   }
   stdout { codec => rubydebug }
 }

[root@docker-test-45 filebeat]# cat filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /home/*/logs/*.log
  fields:
    log_source: docker-test-45
  fields_under_root: true
  multiline.pattern: ^\d{4}-\d{1,2}-\d{1,2}
  multiline.negate: true
  multiline.match: after
  scan_frequency: 5s
  close_inactive: 1h
  ignore_older: 24h
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
output.logstash:
  hosts: ["192.168.64.66:5044"]
processors:
  - add_host_metadata: ~
  - add_cloud_metadata: ~
  - add_docker_metadata: 
      host: "unix:///var/run/docker.sock"                                               
      match_source: true                                                
      match_source_index: 3 
  - add_kubernetes_metadata: ~
logging.level: info

[root@elk-66 conf.d]# cat /etc/filebeat/filebeat.yml|grep -v "#"
filebeat.inputs:
- type: log
  paths:
   - /var/log/nginx/access.log
  fields:
   log_source: nginx-access
- type: log
  paths:
   - /var/log/nginx/error.log
  fields:
   log_source: nginx-error
  enabled: false
- type: filestream
  enabled: false
  paths:
    - /var/log/*.log
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.dashboards.enabled: false
setup.kibana:
  host: "192.168.64.66:5601"
output.logstash:
  hosts: ["192.168.64.66:5044"]
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~

image.png

image.png

image.png
cat application.conf
hosts = [
  {
    host = "http://192.168.64.66:9200"
    name = "es-cluster"
    headers-whitelist = [ "x-proxy-user", "x-proxy-roles", "X-Forwarded-For" ]
  }

nohup ./cerebro 2>&1 &
image.png
刪除索引(也刪除了對應(yīng)的數(shù)據(jù))

curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.30 #刪除單條索引
 
curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.3{0..1} #刪除連續(xù)多條索引
 
curl -XDELETE http://localhost:9200/axt_examination_log-2021.07.30,axt_examination_log-2021.08.30 #刪除多條索引
有了這兩個(gè)命令可以開始編寫下面腳本

# cat es_delete_index.sh 
#!/bin/bash
 
#刪除java 15天前索引
 
source /etc/profile
 
date=`date -d "15 days ago" +%Y.%m.%d`
 
log_name='
axt_resources_log
axt_user_log
axt_data_log
axt_crm_log
axt_statistics_log
axt_mhcz_log
axt_future_log
axt_examination_log
axt_usercenter_log
'
 
for i in $log_name
do
  curl -XDELETE http://localhost:9200/$i-$date
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個(gè)濱河市规哲,隨后出現(xiàn)的幾起案子,更是在濱河造成了極大的恐慌诽表,老刑警劉巖唉锌,帶你破解...
    沈念sama閱讀 212,080評論 6 493
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件隅肥,死亡現(xiàn)場離奇詭異,居然都是意外死亡袄简,警方通過查閱死者的電腦和手機(jī)腥放,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 90,422評論 3 385
  • 文/潘曉璐 我一進(jìn)店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來痘番,“玉大人捉片,你說我怎么就攤上這事」眨” “怎么了伍纫?”我有些...
    開封第一講書人閱讀 157,630評論 0 348
  • 文/不壞的土叔 我叫張陵,是天一觀的道長昂芜。 經(jīng)常有香客問我莹规,道長,這世上最難降的妖魔是什么泌神? 我笑而不...
    開封第一講書人閱讀 56,554評論 1 284
  • 正文 為了忘掉前任良漱,我火速辦了婚禮,結(jié)果婚禮上欢际,老公的妹妹穿的比我還像新娘母市。我一直安慰自己,他們只是感情好损趋,可當(dāng)我...
    茶點(diǎn)故事閱讀 65,662評論 6 386
  • 文/花漫 我一把揭開白布患久。 她就那樣靜靜地躺著,像睡著了一般浑槽。 火紅的嫁衣襯著肌膚如雪蒋失。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 49,856評論 1 290
  • 那天桐玻,我揣著相機(jī)與錄音篙挽,去河邊找鬼。 笑死镊靴,一個(gè)胖子當(dāng)著我的面吹牛铣卡,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播偏竟,決...
    沈念sama閱讀 39,014評論 3 408
  • 文/蒼蘭香墨 我猛地睜開眼煮落,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了苫耸?” 一聲冷哼從身側(cè)響起州邢,我...
    開封第一講書人閱讀 37,752評論 0 268
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎,沒想到半個(gè)月后量淌,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體骗村,經(jīng)...
    沈念sama閱讀 44,212評論 1 303
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 36,541評論 2 327
  • 正文 我和宋清朗相戀三年呀枢,在試婚紗的時(shí)候發(fā)現(xiàn)自己被綠了胚股。 大學(xué)時(shí)的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片。...
    茶點(diǎn)故事閱讀 38,687評論 1 341
  • 序言:一個(gè)原本活蹦亂跳的男人離奇死亡裙秋,死狀恐怖琅拌,靈堂內(nèi)的尸體忽然破棺而出,到底是詐尸還是另有隱情摘刑,我是刑警寧澤进宝,帶...
    沈念sama閱讀 34,347評論 4 331
  • 正文 年R本政府宣布,位于F島的核電站枷恕,受9級特大地震影響党晋,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜徐块,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 39,973評論 3 315
  • 文/蒙蒙 一未玻、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧胡控,春花似錦扳剿、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,777評論 0 21
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至癣猾,卻和暖如春敛劝,著一層夾襖步出監(jiān)牢的瞬間余爆,已是汗流浹背纷宇。 一陣腳步聲響...
    開封第一講書人閱讀 32,006評論 1 266
  • 我被黑心中介騙來泰國打工, 沒想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留蛾方,地道東北人像捶。 一個(gè)月前我還...
    沈念sama閱讀 46,406評論 2 360
  • 正文 我出身青樓,卻偏偏與公主長得像桩砰,于是被迫代替她去往敵國和親拓春。 傳聞我的和親對象是個(gè)殘疾皇子,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 43,576評論 2 349

推薦閱讀更多精彩內(nèi)容