51cto趙強HADOOP學(xué)習(xí)(七)

利用ZK實現(xiàn)Hadoop的HA

利用ZooKeeper實現(xiàn)Hadoop的高可用特性

image.png

配置信息

鏈接:https://pan.baidu.com/s/1geUjpSn 密碼:7yoc

一邑跪、安裝JDK(所有)

#mkdir tools
#mkdir training
#cd tools
#tar -zxvf jdk-8u144-linux-x64.tar.gz -C ~/training/
# cd ~/training/
# vi ~/.bash_profile
JAVA_HOME=/root/training/jdk1.8.0_144
export JAVA_HOME

PATH=$JAVA_HOME/bin:$PATH
export PATH

# source ~/.bash_profile
# vi /etc/hosts
192.168.56.11 hadoop11
192.168.56.12 hadoop12
192.168.56.13 hadoop13
192.168.56.21 hadoop21
192.168.56.22 hadoop22
192.168.56.23 hadoop23
192.168.56.24 hadoop24

hadoop11:

#cd
# ssh-keygen -t rsa

# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop12:

#cd
# ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop13:

#cd
# ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop21:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop22:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop23:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop24:

#cd 
#ssh-keygen -t rsa
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop11
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop12
# ssh-copy-id -i .ssh/id_rsa.pub root@hadoop13
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop21
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop22
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop23
#ssh-copy-id -i .ssh/id_rsa.pub root@hadoop24

hadoop11:

#pwd
/root
#cd tools
#tar -zxvf zookeeper-3.4.6.tar.gz -C ~/training
#cd ~/training
# cd zookeeper-3.4.6
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME

PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile
#mkdir data
#cd conf
#cp zoo_sample.cfg zoo.cfg
#vi zoo.cfg
dataDir=/root/training/zookeeper-3.4.6/data
server.1=hadoop11:2888:3888
server.2=hadoop12:2888:3888
server.3=hadoop13:2888:3888
#cd ../data
#echo 1 > myid
#cd ../..
#scp -r zookeeper-3.4.6/ root@hadoop12:/root/training/
#scp -r zookeeper-3.4.6/ root@hadoop13:/root/training/

hadoop12:

#cd training/zookeeper-3.4.6/data/
#echo 2 > myid
#cd ..
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME

PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile

hadoop13:

#cd training/zookeeper-3.4.6/data/
#echo 3 > myid
#cd ..
#vi ~/.bash_profile
ZOOKEEPER_HOME=/root/training/zookeeper-3.4.6
export ZOOKEEPER_HOME

PATH=$ZOOKEEPER_HOME/bin:$PATH
export PATH
#source ~/.bash_profile

hadoop21:

#cd tools
# tar -zxvf hadoop-2.4.1.tar.gz -C ~/training/
#cd ~/training
# cd hadoop-2.4.1/
# vi ~/.bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source ~/.bash_profile
#cd etc/hadoop/
#vi hadoop-env.sh
export JAVA_HOME=/root/training/jdk1.8.0_144
#mkdir ~/training/hadoop-2.4.1/tmm
#vi core-site.xml
<!-- 指定hdfs的nameservice為ns1 -->
<property>
    <name>fs.defaultFS</name>
    <value>hdfs://ns1</value>
</property>
<!-- 指定hadoop臨時目錄 -->
<property>
    <name>hadoop.tmp.dir</name>
    <value>/root/training/hadoop-2.4.1/tmm</value>
</property>
<!-- 指定zookeeper地址 -->
<property>
    <name>ha.zookeeper.quorum</name>                
        <value>hadoop11:2181,hadoop12:2181,hadoop13:2181</value>
</property>
#vi hdfs-site.xml
<configuration>                 
        <!-- 指定hdfs的nameservice為ns1轴踱,需要和core-site.xml中的保持一致 -->
        <property>
                <name>dfs.nameservices</name>
                <value>ns1</value>
        </property>

        <!-- ns1下面有兩個NameNode谚赎,分別是nn1,nn2 -->
        <property>
                <name>dfs.ha.namenodes.ns1</name>                         
                <value>nn1,nn2</value>
        </property>

        <!--nn1的RPC通信地址 -->
        <property>
                <name>dfs.namenode.rpc-address.ns1.nn1</name>
                <value>hadoop21:9000</value>
        </property>
        <!-- nn1的http通信地址 -->
        <property>                              
                <name>dfs.namenode.http-address.ns1.nn1</name>
                <value>hadoop21:50070</value>
        </property>

        <!-- nn2的RPC通信地址 -->
        <property>                              
                <name>dfs.namenode.rpc-address.ns1.nn2</name>
                <value>hadoop24:9000</value>
        </property>
        <!-- nn2的http通信地址 -->              
        <property>
                <name>dfs.namenode.http-address.ns1.nn2</name>
                <value>hadoop24:50070</value>
        </property>

        <!-- 指定NameNode的元數(shù)據(jù)在JournalNode上的存放位置 -->
        <property>
                <name>dfs.namenode.shared.edits.dir</name>
                <value>qjournal://hadoop21:8485;hadoop24:8485;/ns1</value>
        </property>
        <!-- 指定JournalNode在本地磁盤存放數(shù)據(jù)的位置 -->
        <property>                      
                <name>dfs.journlanode.edits.dir</name>
                <value>/root/training/hadoop-2.4.1/journal</value>
        </property>

        <!--開啟NameNode失敗自動切換--> 
        <property>
                <name>dfs.ha.automatic-failover.enabled</name>
                <value>true</value>
        </property>
        <!--配置失敗自動切換實現(xiàn)方式--> 
        <property>
                <name>dfs.client.failover.proxy.provider.ns1</name>
                <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvder</value>
        </property>
        <!--配置隔離機制方法雳灵,多個機制用換行分割闸盔,即每個機制暫用一行--> 
        <property>
                <name>dfs.ha.fencing.methods</name>
                <value> 
                        sshfence
                        shell(/bin/true)
                </value>
        </property>
        <!--使用sshfence隔離機制時需要ssh免登陸 -->
        <property>
                <name>dfs.ha.fencing.ssh.private-key-files</name>
                <value>/root/.ssh/id_rsa</value>
        </property>
        <!--配置sshfence隔離機制超時時間 -->
        <property>
                <name>dfs.ha.fencing.ssh.connect-timeout</name>
                <value>30000</value>
        </property>
</configuration>
#cp  mapred-site.xml.template mapred-site.xml
#vi mapred-site.xml
<configuration>
        <property>
                <name>mapreduce.framework.name</name>
                <value>yarn</value>
        </property>
</configuration>
#vi yarn-site.xml
<configuration>
        <!--開啟RM高可靠-->
        <property>
                <name>yarn.resourcemanager.ha.enabled</name>
                <value>true</value>
        </property>
        <!--指定RM的cluster id-->
        <property>
                <name>yarn.resourcemanager.cluster-id</name>
                <value>yrc</value>
        </property>
        <!--指定RM的名字--> 
        <property>
                <name>yarn.resourcemanager.ha.rm-ids</name>
                <value>rm1,rm2</value>
        </property>
        <!--分別指定RM的地址--> 
        <property>
                <name>yarn.resourcemanager.hostname.rm1</name>
                <value>hadoop21</value>
        </property>
        <property>
                <name>yarn.resourcemanager.hostname.rm2</name>
                <value>hadoop24</value>
        </property>
        <!--指定zk集群地址--> 
        <property>
                <name>yarn.resourcemanager.zk-address</name>
                <value>hadoop11:2181,hadoop12:2181,hadoop13:2181</value>
        </property>
        <property>
                <name>yarn.nodemanager.aux-services</name>
                <value>mapreduce_shuffle</value>
        </property>
</configuration>
#vi slaves
hadoop22
hadoop23
#cd ../../..
#scp -r hadoop-2.4.1/ root@hadoop22:/root/training/
#scp -r hadoop-2.4.1/ root@hadoop23:/root/training/
#scp -r hadoop-2.4.1/ root@hadoop24:/root/training/

hadoop22:

#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile

hadoop23:

#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile

hadoop24:

#vi .bash_profile
HADOOP_HOME=/root/training/hadoop-2.4.1
export HADOOP_HOME

PATH=$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
export PATH
#source .bash_profile

啟動zookeeper集群

#zkServer.sh start

hadoop21:

#hadoop-daemon.sh start journalnode   //hadoop21,24
#hdfs namenode -format
#pwd
/root/training
#scp -r ~/training/hadoop-2.4.1/tmm root@hadoop24:/root/training/
#hdfs zkfc -format
#start-all.sh
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末击费,一起剝皮案震驚了整個濱河市,隨后出現(xiàn)的幾起案子谆棱,更是在濱河造成了極大的恐慌批幌,老刑警劉巖,帶你破解...
    沈念sama閱讀 222,729評論 6 517
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件皆警,死亡現(xiàn)場離奇詭異截粗,居然都是意外死亡,警方通過查閱死者的電腦和手機意推,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 95,226評論 3 399
  • 文/潘曉璐 我一進店門菊值,熙熙樓的掌柜王于貴愁眉苦臉地迎上來,“玉大人昵宇,你說我怎么就攤上這事儿子。” “怎么了蒋譬?”我有些...
    開封第一講書人閱讀 169,461評論 0 362
  • 文/不壞的土叔 我叫張陵犯助,是天一觀的道長儡毕。 經(jīng)常有香客問我,道長雷恃,這世上最難降的妖魔是什么费坊? 我笑而不...
    開封第一講書人閱讀 60,135評論 1 300
  • 正文 為了忘掉前任附井,我火速辦了婚禮,結(jié)果婚禮上把跨,老公的妹妹穿的比我還像新娘沼死。我一直安慰自己,他們只是感情好耸别,可當(dāng)我...
    茶點故事閱讀 69,130評論 6 398
  • 文/花漫 我一把揭開白布秀姐。 她就那樣靜靜地躺著,像睡著了一般省有。 火紅的嫁衣襯著肌膚如雪锥咸。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 52,736評論 1 312
  • 那天,我揣著相機與錄音雪侥,去河邊找鬼精绎。 笑死,一個胖子當(dāng)著我的面吹牛旬牲,可吹牛的內(nèi)容都是我干的搁吓。 我是一名探鬼主播,決...
    沈念sama閱讀 41,179評論 3 422
  • 文/蒼蘭香墨 我猛地睜開眼擂橘,長吁一口氣:“原來是場噩夢啊……” “哼通贞!你這毒婦竟也來了恼五?” 一聲冷哼從身側(cè)響起,我...
    開封第一講書人閱讀 40,124評論 0 277
  • 序言:老撾萬榮一對情侶失蹤茎用,失蹤者是張志新(化名)和其女友劉穎,沒想到半個月后彤避,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體夯辖,經(jīng)...
    沈念sama閱讀 46,657評論 1 320
  • 正文 獨居荒郊野嶺守林人離奇死亡蒿褂,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 38,723評論 3 342
  • 正文 我和宋清朗相戀三年啄栓,在試婚紗的時候發(fā)現(xiàn)自己被綠了。 大學(xué)時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片近速。...
    茶點故事閱讀 40,872評論 1 353
  • 序言:一個原本活蹦亂跳的男人離奇死亡堪旧,死狀恐怖,靈堂內(nèi)的尸體忽然破棺而出析砸,到底是詐尸還是另有隱情爆袍,我是刑警寧澤,帶...
    沈念sama閱讀 36,533評論 5 351
  • 正文 年R本政府宣布,位于F島的核電站谆扎,受9級特大地震影響堂湖,放射性物質(zhì)發(fā)生泄漏。R本人自食惡果不足惜无蜂,卻給世界環(huán)境...
    茶點故事閱讀 42,213評論 3 336
  • 文/蒙蒙 一斥季、第九天 我趴在偏房一處隱蔽的房頂上張望累驮。 院中可真熱鬧舵揭,春花似錦午绳、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 32,700評論 0 25
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至据忘,卻和暖如春妓笙,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背。 一陣腳步聲響...
    開封第一講書人閱讀 33,819評論 1 274
  • 我被黑心中介騙來泰國打工辈赋, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留膏燕,地道東北人。 一個月前我還...
    沈念sama閱讀 49,304評論 3 379
  • 正文 我出身青樓篷就,卻偏偏與公主長得像竭业,于是被迫代替她去往敵國和親及舍。 傳聞我的和親對象是個殘疾皇子,可洞房花燭夜當(dāng)晚...
    茶點故事閱讀 45,876評論 2 361

推薦閱讀更多精彩內(nèi)容