下載地址:
Zookeeper:
http://mirror.bit.edu.cn/apache/zookeeper/current/
Scala:
http://www.scala-lang.org/download/2.11.8.html
Kafka:
http://kafka.apache.org/downloads
一.Zookeeper部署
1.下載解壓zookeeper-3.4.6.tar.gz
[root@hadoop001 software]# tar -xvf zookeeper-3.4.6.tar.gz
[root@hadoop001 software]# mv zookeeper-3.4.6 zookeeper
[root@hadoop001 software]#
[root@hadoop001 software]# chown -R root:root zookeeper
2.修改配置
[root@hadoop001 software]# cd zookeeper/conf
[root@hadoop001 conf]# ll
total 12
-rw-rw-r--. 1 root root? 535 Feb 20? 2014 configuration.xsl
-rw-rw-r--. 1 root root 2161 Feb 20? 2014 log4j.properties
-rw-rw-r--. 1 root root? 922 Feb 20? 2014 zoo_sample.cfg
[root@hadoop001 conf]# cp zoo_sample.cfg zoo.cfg
[root@hadoop001 conf]# vi zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/opt/software/zookeeper/data
# the port at which the clients will connect
clientPort=2181
# the maximum number of client connections.
# increase this if you need to handle more clients
#maxClientCnxns=60
#
# Be sure to read the maintenance section of the
# administrator guide before turning on autopurge.
#
# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
#
# The number of snapshots to retain in dataDir
#autopurge.snapRetainCount=3
# Purge task interval in hours
# Set to "0" to disable auto purge feature
#autopurge.purgeInterval=1
server.1=hadoop001:2888:3888
server.2=hadoop002:2888:3888
server.3=hadoop003:2888:3888
~
"zoo.cfg" 36L, 1028C written
[root@hadoop001 conf]# cd ../
[root@hadoop001 zookeeper]#? mkdir data
[root@hadoop001 zookeeper]# touch data/myid
[root@hadoop001 zookeeper]# echo 1 > data/myid
[root@hadoop001 zookeeper]#
3.hadoop002/003,也修改配置,就如下不同
[root@hadoop001 software]# scp -r? zookeeper 192.168.137.141:/opt/software/
[root@hadoop001 software]# scp -r? zookeeper 192.168.137.142:/opt/software/
[root@hadoop002 zookeeper]# echo 2 > data/myid
[root@hadoop003 zookeeper]# echo 3 > data/myid
###切記不可echo 3>data/myid,將>前后空格保留,否則無(wú)法將 3 寫(xiě)入myid文件
4.啟動(dòng)Zookeeper集群
[root@hadoop001 bin]# ./zkServer.sh start
[root@hadoop002 bin]# ./zkServer.sh start
[root@hadoop003 bin]# ./zkServer.sh start
5.查看Zookeeper狀態(tài)
[root@hadoop001 bin]# ./zkServer.sh status
JMX enabled by default
Using config: /opt/software/zookeeper/bin/../conf/zoo.cfg
Mode: follower
[root@hadoop002 bin]#? ./zkServer.sh status
JMX enabled by default
Using config: /opt/software/zookeeper/bin/../conf/zoo.cfg
Mode: leader
[root@hadoop002 bin]#
[root@hadoop003 bin]#? ./zkServer.sh status
JMX enabled by default
Using config: /opt/software/zookeeper/bin/../conf/zoo.cfg
Mode: follower
[root@hadoop003 bin]#
6.進(jìn)入客戶端
[root@hadoop001 bin]# ./zkCli.sh
[zk: localhost:2181(CONNECTED) 0] ls /
[zookeeper, yarn-leader-election, hadoop-ha, rmstore]
[zk: localhost:2181(CONNECTED) 1]
[zk: localhost:2181(CONNECTED) 1] help
ZooKeeper -server host:port cmd args
stat path [watch]
set path data [version]
ls path [watch]
delquota [-n|-b] path
ls2 path [watch]
setAcl path acl
setquota -n|-b val path
history
redo cmdno
printwatches on|off
delete path [version]
sync path
listquota path
rmr path
get path [watch]
create [-s] [-e] path data acl
addauth scheme auth
quit
getAcl path
close
connect host:port
[zk: localhost:2181(CONNECTED) 2]
二.Kafka部署
1.解壓并配置Scala
[root@hadoop001 software]# tar -xzvf scala-2.11.8.tgz
[root@hadoop001 software]# chown -R root:root scala-2.11.8
[root@hadoop001 software]# ln -s scala-2.11.8 scala
#環(huán)境變量
[root@hadoop001 software]# vi /etc/profile
export SCALA_HOME=/opt/software/scala
export PATH=$SCALA_HOME/bin:$PATH
[root@hadoop001 software]# source /etc/profile
[root@hadoop001 software]# scala
Welcome to Scala 2.11.8 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_45).
Type in expressions for evaluation. Or try :help.
2.下載基于Scala 2.11的kafka版本為0.10.0.1
[root@hadoop001 software]# tar -xzvf kafka_2.11-0.10.0.1.tgz
[root@hadoop001 software]# ln -s kafka_2.11-0.10.0.1 kafka
[root@hadoop001 software]#
3.創(chuàng)建logs目錄和修改server.properties
[root@hadoop001 software]# cd kafka
[root@hadoop001 kafka]# mkdir logs
[root@hadoop001 kafka]# cd config/
[root@hadoop001 config]# vi server.properties
broker.id=1
port=9092
host.name=192.168.137.141
log.dirs=/opt/software/kafka/logs
zookeeper.connect=192.168.137.141:2181,192.168.137.142:2181,192.168.137.143:2181/kafka
4.環(huán)境變量
[root@hadoop001 config]# vi /etc/profile
export KAFKA_HOME=/opt/software/kafka
export PATH=$KAFKA_HOME/bin:$PATH
[root@hadoop001 config]# source /etc/profile
5.另外兩臺(tái)機(jī)器如上操作
6.啟動(dòng)/停止
[root@sht-sgmhadoopdn-01 kafka]# nohup kafka-server-start.sh config/server.properties &
[root@sht-sgmhadoopdn-02 kafka]# nohup kafka-server-start.sh config/server.properties &
[root@sht-sgmhadoopdn-03 kafka]# nohup kafka-server-start.sh config/server.properties &
###停止
bin/kafka-server-stop.sh
---------------------------------------------------------------------------------------------------------------------------------------------
7.模擬實(shí)驗(yàn)1
創(chuàng)建test topic
bin/kafka-topics.sh --create \
--zookeeper 192.168.137.141:2181,192.168.137.142:2181,192.168.137.143:2181/kafka \
--replication-factor 3 --partitions 3 --topic test
在一個(gè)終端斜棚,啟動(dòng)Producer闯第,并向我們上面創(chuàng)建的名稱為my-replicated-topic5的Topic中生產(chǎn)消息剖膳,執(zhí)行如下腳本:
bin/kafka-console-producer.sh \
--broker-list 192.168.137.141:9092,192.168.137.142:9092,192.168.137.143:9092 --topic test
在另一個(gè)終端蛛砰,啟動(dòng)Consumer英遭,并訂閱我們上面創(chuàng)建的名稱為my-replicated-topic5的Topic中生產(chǎn)的消息蔑赘,執(zhí)行如下腳本:
bin/kafka-console-consumer.sh \
--zookeeper 192.168.137.141:2181,192.168.137.142:2181,192.168.137.143:2181/kafka \
--from-beginning --topic test
可以在Producer終端上輸入字符串消息行掸茅,就可以在Consumer終端上看到消費(fèi)者消費(fèi)的消息內(nèi)容行疏。