1.Flume的安裝
查看JAVA_HOME:
[root@bigdata113 ~]# echo $JAVA_HOME
/opt/module/jdk1.8.0_181
安裝Flume
[root@bigdata112 soft]# tar -zxvf apache-flume-1.8.0-bin.tar.gz -C /opt/module
改名:
[root@bigdata112 module]# mv apache-flume-1.8.0-bin/ flume1.8.0
[root@bigdata112 conf]# mv flume-env.sh.template flume-env.sh
flume-env.sh涉及修改項:
export JAVA_HOME=/opt/module/jdk1.8.0_144
到這里Flume就安裝完成了
2.測試案例(端口,文本,文件夾)
案例一:監(jiān)控端口數(shù)據(jù)
目標(biāo):Flume監(jiān)控一端Console躲查,另一端Console發(fā)送消息,使被監(jiān)控端實時顯示榴芳。
分步實現(xiàn):
1)安裝telnet工具:
[root@bigdata112 ~]#yum -y install telnet
image.png
在flume目錄下創(chuàng)建一個目錄用與存放Flume的任務(wù)
[root@bigdata112 flume1.8.0]# mkdir jobconf
創(chuàng)建Flume Agent配置文件flume-telnet.conf
#定義Agent
a1.sources = r1
a1.sinks = k1
a1.channels = c1
#定義source
a1.sources.r1.type = netcat
a1.sources.r1.bind = bigdata112
a1.sources.r1.port = 44445
# 定義sink
a1.sinks.k1.type = logger
# 定義memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100
# 雙向鏈接
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
判斷44445端口是否被占用
netstat -tunlp | grep 44445
2)測試:啟動flume配置文件
[root@bigdata112 bin]#bin/flume-ng agent \
--conf /opt/module/flume1.8.0/conf/ \
--name a1 \
--conf-file /opt/module/flume1.8.0/jobconf/flume-telnet.conf \
-Dflume.root.logger==INFO,console
使用telnet工具向本機(jī)的44445端口發(fā)送內(nèi)容
[root@bigdata111 ~]# telnet bigdata111 44445
Trying 192.168.226.111...
Connected to bigdata111.
Escape character is '^]'.
123
OK
結(jié)果:
2019-06-21 11:23:04,777 (SinkRunner-PollingRunner-DefaultSinkProcessor) [INFO - org.apache.flume.sink.LoggerSink.process(LoggerSink.java:95)] Event: { headers:{} body: 31 32 33 0D 123. }
案例二:實時讀取本地文件到HDFS
創(chuàng)建flume-hdfs.conf文件:
# 1 agent
a2.sources = r2
a2.sinks = k2
a2.channels = c2
# 2 source
a2.sources.r2.type = exec
a2.sources.r2.command = tail -F /opt/zouzou
a2.sources.r2.shell = /bin/bash -c
# 3 sink
a2.sinks.k2.type = hdfs
a2.sinks.k2.hdfs.path = hdfs://bigdata111:9000/flume/%Y%m%d/%H
#上傳文件的前綴
a2.sinks.k2.hdfs.filePrefix = logs-
#是否按照時間滾動文件夾
a2.sinks.k2.hdfs.round = true
#多少時間單位創(chuàng)建一個新的文件夾
a2.sinks.k2.hdfs.roundValue = 1
#重新定義時間單位
a2.sinks.k2.hdfs.roundUnit = hour
#是否使用本地時間戳
a2.sinks.k2.hdfs.useLocalTimeStamp = true
#積攢多少個Event才flush到HDFS一次
a2.sinks.k2.hdfs.batchSize = 1000
#設(shè)置文件類型,可支持壓縮
a2.sinks.k2.hdfs.fileType = DataStream
#多久生成一個新的文件
a2.sinks.k2.hdfs.rollInterval = 600
#設(shè)置每個文件的滾動大小
a2.sinks.k2.hdfs.rollSize = 134217700
#文件的滾動與Event數(shù)量無關(guān)
a2.sinks.k2.hdfs.rollCount = 0
#最小副本數(shù)
a2.sinks.k2.hdfs.minBlockReplicas = 1
# Use a channel which buffers events in memory
a2.channels.c2.type = memory
a2.channels.c2.capacity = 1000
a2.channels.c2.transactionCapacity = 100
# Bind the source and sink to the channel
a2.sources.r2.channels = c2
a2.sinks.k2.channel = c2
執(zhí)行監(jiān)控配置
/opt/module/flume1.8.0/bin/flume-ng agent \
--conf /opt/module/flume1.8.0/conf/ \
--name a2 \
--conf-file /opt/module/flume1.8.0/jobconf/flume-hdfs.conf
然后在/opt下對zouzou這個文件進(jìn)行編輯,編輯的內(nèi)容就會存到hdfs中:
[root@bigdata113 soft]# hdfs dfs -cat /flume/20190621/11/*
hello world
hdfs上出現(xiàn)了按年月日時 分區(qū)的目錄:
image.png
案例三:實時讀取目錄文件到HDFS
目標(biāo):使用flume監(jiān)聽整個目錄的文件
分步實現(xiàn):
1)創(chuàng)建配置文件flume-dir.conf
#1 Agent
a3.sources = r3
a3.sinks = k3
a3.channels = c3
#2 source
a3.sources.r3.type = spooldir
a3.sources.r3.spoolDir = /opt/module/flume1.8.0/upload
a3.sources.r3.fileSuffix = .COMPLETED
a3.sources.r3.fileHeader = true
#忽略所有以.tmp結(jié)尾的文件,不上傳
a3.sources.r3.ignorePattern = ([^ ]*\.tmp)
# 3 sink
a3.sinks.k3.type = hdfs
a3.sinks.k3.hdfs.path = hdfs://bigdata111:9000/flume/%H
#上傳文件的前綴
a3.sinks.k3.hdfs.filePrefix = upload-
#是否按照時間滾動文件夾
a3.sinks.k3.hdfs.round = true
#多少時間單位創(chuàng)建一個新的文件夾
a3.sinks.k3.hdfs.roundValue = 1
#重新定義時間單位
a3.sinks.k3.hdfs.roundUnit = hour
#是否使用本地時間戳
a3.sinks.k3.hdfs.useLocalTimeStamp = true
#積攢多少個Event才flush到HDFS一次
a3.sinks.k3.hdfs.batchSize = 100
#設(shè)置文件類型献联,可支持壓縮
a3.sinks.k3.hdfs.fileType = DataStream
#多久生成一個新的文件
a3.sinks.k3.hdfs.rollInterval = 600
#設(shè)置每個文件的滾動大小大概是128M
a3.sinks.k3.hdfs.rollSize = 134217700
#文件的滾動與Event數(shù)量無關(guān)
a3.sinks.k3.hdfs.rollCount = 0
#最小副本數(shù)
a3.sinks.k3.hdfs.minBlockReplicas = 1
# Use a channel which buffers events in memory
a3.channels.c3.type = memory
a3.channels.c3.capacity = 1000
a3.channels.c3.transactionCapacity = 100
# Bind the source and sink to the channel
a3.sources.r3.channels = c3
a3.sinks.k3.channel = c3
2)執(zhí)行測試:執(zhí)行如下腳本后智末,請向upload文件夾中添加文件試試
/opt/module/flume1.8.0/bin/flume-ng agent \
--conf /opt/module/flume1.8.0/conf/ \
--name a3 \
--conf-file /opt/module/flume1.8.0/jobconf/flume-dir.conf
[root@bigdata112 upload]# vi A
woshi A
aa
[root@bigdata112 upload]# ll
total 4
-rw-r--r--. 1 root root 19 Jun 21 12:32 A.COMPLETED
[root@bigdata112 upload]# hdfs dfs -cat /flume/12/*
woshi A
aa
image.png
尖叫提示: 在使用Spooling Directory Source時
- 不要在監(jiān)控目錄中創(chuàng)建并持續(xù)修改文件
- 上傳完成的文件會以.COMPLETED結(jié)尾
- 被監(jiān)控文件夾每500毫秒掃描一次文件變動