添加hsot
vim /etc/hosts
123.123.123.123 kylin1
123.123.123.125 kylin3
123.123.123.124 kylin2
修改主機(jī)名
#命令方式修改 從新連接后生效水孩,重啟會重置
hostnamectl set-hostname 主機(jī)名
#修改/etc/hostname防止重啟以后重置
vim /etc/hostname
配置主機(jī)秘鑰
ssh-keygen -t rsa
復(fù)制主機(jī)秘鑰
ssh-copy-id kylin1
安裝jdk[所有機(jī)器都要安裝]
jdk官網(wǎng)地址:https://www.oracle.com/java/technologies/javase/javase-jdk8-downloads.html
所有軟件安裝目錄 /usr/local
解壓jdk安裝包
存放在 /usr/local/java下
tar -zxvf jdk-8u191-linux-x64.tar.gz
mv jdk1.8.0_191/ /usr/local/java/
修改環(huán)境變量
vim /etc/profile
export JAVA_HOME=/usr/local/java/jdk1.8.0_191
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
驗證安裝結(jié)果
java -version
成功返回
java version "1.8.0_191"
Java(TM) SE Runtime Environment (build 1.8.0_191-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.191-b12, mixed mode)
安裝hadoop
haddop 安裝包地址 https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/core/hadoop-3.1.4/hadoop-3.1.4.tar.gz
其他版本參考:https://mirrors.tuna.tsinghua.edu.cn/apache/hadoop/core
hadoop 官方文檔
解壓安裝包靠汁,并放到已選定的安裝目錄
tar -zxvf hadoop-3.1.4.tar.gz
mv jdk1.8.0_281/ /usr/local/hadoop/
修改配置
配置文件位置為 /usr/local/hadoop/hadoop-3.1.4/etc/hadoop/
core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://kylin1:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/data</value>
</property>
</configuration>
hdfs-site.xml
<configuration>
<property>
<name>dfs.name.dir</name>
<value>/usr/local/data/namenode</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/usr/local/data/datanode</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
</configuration>
mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
yarn-site.xml
<configuration>
<!-- Site specific YARN configuration properties -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>kylin1</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
hadoop-env.sh
#查找export JAVA_HOME= 后面加上自己的路徑
export JAVA_HOME=/usr/local/java/jdk1.8.0_191
workers
#集群節(jié)點
kylin2
kylin3
添加環(huán)境變量
vim /etc/profile
#java
export JAVA_HOME=/usr/local/java/jdk1.8.0_191
export CLASSPATH=.:$JAVA_HOME/jre/lib/rt.jar:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
#hadoop
export HADOOP_HOME=/usr/local/hadoop/hadoop-3.1.4
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
#path
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
刷新環(huán)境變量
source /etc/profile
直接把配置好的hadoop包傳到剩下兩個子節(jié)點同樣的位置下
scp -r hadoop root@kylin2:/usr/local/
scp -r hadoop root@kylin3:/usr/local/
格式化hdfs
hdfs namenode -format
到hadoop安裝包根目錄
cd /usr/local/hadoop/hadoop-3.1.4
執(zhí)行
./sbin/start-dfs.sh
可能出現(xiàn)如下報錯:
Starting namenodes on [kylin1]
ERROR: Attempting to operate on hdfs namenode as root
ERROR: but there is no HDFS_NAMENODE_USER defined. Aborting operation.
Starting datanodes
ERROR: Attempting to operate on hdfs datanode as root
ERROR: but there is no HDFS_DATANODE_USER defined. Aborting operation.
Starting secondary namenodes [iZ8vbdysgs8j4ptv1x9tyaZ]
ERROR: Attempting to operate on hdfs secondarynamenode as root
ERROR: but there is no HDFS_SECONDARYNAMENODE_USER defined. Aborting operation.
將/usr/local/hadoop/hadoop-3.1.4/sbin 下的start-dfs.sh,stop-dfs.sh兩個文件頂部添加以下參數(shù)
#!/usr/bin/env bash
HDFS_DATANODE_USER=root
HADOOP_SECURE_DN_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
還有揪利,start-yarn.sh,stop-yarn.sh頂部也需添加以下:
#!/usr/bin/env bash
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
再次重啟渴邦,出現(xiàn)如下字樣表示啟動成功
WARNING: HADOOP_SECURE_DN_USER has been replaced by HDFS_DATANODE_SECURE_USER. Using value of HADOOP_SECURE_DN_USER.
Starting namenodes on [kylin1]
Last login: Fri Mar 5 18:02:06 CST 2021 on pts/0
Last failed login: Fri Mar 5 18:03:46 CST 2021 from 172.26.76.123 on ssh:notty
There were 2 failed login attempts since the last successful login.
Starting datanodes
Last login: Fri Mar 5 18:03:57 CST 2021 on pts/0
Starting secondary namenodes [kylin1]
Last login: Fri Mar 5 18:03:59 CST 2021 on pts/0
啟動yarn
sbin/start-yarn.sh
#成功字樣
Starting resourcemanager
Last login: Fri Mar 5 18:04:02 CST 2021 on pts/0
Starting nodemanagers
Last login: Fri Mar 5 18:07:06 CST 2021 on pts/0
jps 查看一下
節(jié)點
修改配置刷新命令
bin/hdfs dfsadmin -refreshNodes
bin/yarn rmadmin -refreshNodes
成功
安裝Hive
安裝mysql
wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
wget http://repo.mysql.com/mysql57-community-release-el7.rpm
sudo rpm -ivh mysql-community-release-el7-5.noarch.rpm
sudo yum update
sudo yum install mysql-server
sudo systemctl start mysqld
hive安裝包下載地址
注意 選則匹配自己Hadoop 的版本 可以參考 https://hive.apache.org/downloads.html
https://mirrors.bfsu.edu.cn/apache/hive/
同上面步驟 ,創(chuàng)建hive安裝目錄佃声,解壓安裝包,移動到對應(yīng)目錄下
mkdir /usr/local/hive
tar -zxvf apache-hive-2.3.4-bin.tar.gz
mv apache-hive-2.3.4-bin /usr/local/hive/hive-2.3.4
添加環(huán)境變量
vim /etc/profile
#hive
export HIVE_HOME=/usr/local/hive/hive-2.3.4
#path
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin
source /etc/profile
修改配置文件
cd hive-2.3.4/conf
mv hive-default.xml.template hive-site.xml
vim hive-site.xml
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://master:3306/hive_metadata?createDatabaseIfNotExist=true</value>
<description>
JDBC connect string for a JDBC metastore.
To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.
For example, jdbc:postgresql://myhost/db?ssl=true for postgres database.
</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>hive</value>
<description>Username to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>hive</value>
<description>password to use against metastore database</description>
</property>
<property>
<name>hive.querylog.location</name>
<value>/usr/local/hive/hive-2.3.4/tmp/hadoop</value>
<description>Location of Hive run time structured log file</description>
</property>
<property>
<name>hive.server2.logging.operation.log.location</name>
<value>/usr/local/hive/hive-2.3.4/tmp/hadoop/operation_logs</value>
<description>Top level directory where operation logs are stored if logging functionality is enabled</description>
</property>
<property>
<name>hive.exec.local.scratchdir</name>
<value>/usr/local/hive/hive-2.3.4/tmp/hadoop</value>
<description>Local scratch space for Hive jobs</description>
</property>
<property>
<name>hive.downloaded.resources.dir</name>
<value>/usr/local/hive/hive-2.3.4/tmp/${hive.session.id}_resources</value>
<description>Temporary local directory for added resources in the remote file system.</description>
</property>
<property>
<name>hive.metastore.schema.verification</name>
<value>false</value>
<description>
Enforce metastore schema version consistency.
True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic
schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures
proper metastore schema migration. (Default)
False: Warn if the version information stored in metastore doesn't match with one from in Hive jars.
</description>
</property>
鏈接: https://pan.baidu.com/s/1K4O7K00khqlo9yi6VVS3jA 密碼: kkpp
把下載好的mysql-connector-java.jar這個jar包拷到/usr/local/hive/hive-2.3.4/lib/下面
進(jìn)入mysql
mysql
create database if not exists hive_metadata;
grant all privileges on hive_metadata.* to 'hive'@'%' identified by 'hive';
grant all privileges on hive_metadata.* to 'hive'@'localhost' identified by 'hive';
grant all privileges on hive_metadata.* to 'hive'@'master' identified by 'hive';
flush privileges;
use hive_metadata;
exit;
初始化
schematool -dbType mysql -initSchema
出現(xiàn)如下錯誤
原因:
hadoop和hive的兩個guava.jar版本不一致
兩個位置分別位于下面兩個目錄:
- /usr/local/hive/hive-2.3.4/lib
- /usr/local/hadoop/hadoop-3.1.4/share/hadoop/common/lib
解決辦法:
刪除低版本的那個饿自,將高版本的復(fù)制到低版本目錄下
測試驗證hive
#創(chuàng)建一個txt文件存點數(shù)據(jù)等下導(dǎo)到hive中去
vim users.txt
1,浙江工商大學(xué)
2,杭州
3,I love
4,ZJGSU
5,加油哦
進(jìn)入hive出現(xiàn)”hive>“ 表示成功
# 創(chuàng)建users表,這個row format delimited fields terminated by ','代表我們等下導(dǎo)過來的文件中字段是以逗號“龄坪,”分割字段的
# 所以我們上面users.txt不同字段中間有逗號
create table users(id int, name string) row format delimited fields terminated by ',';
# 導(dǎo)數(shù)據(jù)
load data local inpath '/usr/local/src/users.txt' into table users;
#查詢
select * from users;
ok,hive 完成
zookeeper
安裝包
鏈接: https://pan.baidu.com/s/1wkBVwD6qh_WLi8zPs5RbEg 密碼: vo2j
官網(wǎng)https://zookeeper.apache.org/
/usr/local下創(chuàng)建zookeeper安裝目錄
mkdir zookeeper
tar -zxvf zookeeper-3.4.10.tar.gz
mv zookeeper-3.4.10 /usr/local/zookeeper/
配置zookeeper環(huán)境變量
#zookeeper
export ZOOKEEPER_HOME=/usr/local/zookeeper/zookeeper-3.4.10
#path
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$ZOOKEEPER_HOME/bin
source /etc/profile
修改配置文件
cd zookeeper-3.4.10/conf
mv zoo_sample.cfg zoo.cfg
vim zoo.cfg
dataDir=/usr/local/zookeeper/zookeeper-3.4.10/data
server.0=master:2888:3888
server.1=slave1:2888:3888
server.2=slave2:2888:3888
創(chuàng)建數(shù)據(jù)目錄
cd ..
mkdir data
cd data
vim myid
0
把上面配置好的zookeeper文件夾直接傳到兩個子節(jié)點
scp -r zookeeper root@192.168.185.151:/usr/local/
scp -r zookeeper root@192.168.185.152:/usr/local/
# 注意在兩個子節(jié)點上把myid文件里面的 0 給分別替換成 1 和 2
# 注意在兩個子節(jié)點上像步驟1一樣昭雌,在/etc/profile文件里配置zookeeper的環(huán)境變量,保存后別忘source一下
啟動zookeeper
#每一臺機(jī)器都要做
zkServer.sh start
正確結(jié)果應(yīng)該是:三個節(jié)點中其中一個是leader健田,另外兩個是follower
jps
# 檢查三個節(jié)點是否都有QuromPeerMain進(jìn)程
zookeeper配置結(jié)束
kafka
Kafka由Scala和Java編寫烛卧,所以我們先需要安裝配置Scala
安裝包:鏈接: https://pan.baidu.com/s/1iz6AbKr7CP3CjsqaWaZo_g 密碼: 8l4u
解壓放在/usr/lcal/scala下
編輯環(huán)境變量
vim /etc/profile
#scala
export SCALA_HOME=/usr/local/scala/scala-2.11.8
#path
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$ZOOKEEPER_HOME/bin:$HBASE_HOME/bin:$KYLIN_HOME/bin:$SCALA_HOME/bin
source /etc/profile
驗證結(jié)果
scala -version
Scala code runner version 2.11.8 -- Copyright 2002-2016, LAMP/EPFL
然后在剩下兩個子節(jié)點中重復(fù)上述步驟!
創(chuàng)建kafka安裝目錄/usr/local/kafka
鏈接: https://pan.baidu.com/s/1Xb2plF4GVNCq9csDepH5Jg 密碼: wr7a
解壓并移動到/usr/local/kafka
環(huán)境變量
#kafka
export KAFKA_HOME=/usr/local/kafka/kafka-2.1.0
#path
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$ZOOKEEPER_HOME/bin:$HBASE_HOME/bin:$KYLIN_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin
編輯配置文件
vim kafka-2.1.0/config/server.properties
broker.id=0
listeners=PLAINTEXT://kylin1:9092
advertised.listeners=PLAINTEXT://kylin1:9092
zookeeper.connect=kylin1:2181,kylin2:2181,kylin3:2181
把master節(jié)點上修改好的kafka整個文件夾傳到其余兩個子節(jié)點
scp -r kafka root@kylin2:/usr/local/
scp -r kafka root@kylin3:/usr/local/
# 在另外兩個節(jié)點上,對server.properties要有幾處修改
# broker.id 分別修改成: 1 和 2
# listeners 在ip那里分別修改成子節(jié)點對應(yīng)的总放,即 PLAINTEXT://kylin2:9092 和 PLAINTEXT://kylin3:9092
# advertised.listeners 也在ip那里分別修改成子節(jié)點對應(yīng)的呈宇,即 PLAINTEXT://kylin2:9092 和 PLAINTEXT://kylin3:9092
# zookeeper.connect 不需要修改
# 另外兩個節(jié)點上也別忘了配置kafka環(huán)境變量
# 在三個節(jié)點都啟動kafka
[root@master local]# cd kafka/kafka-2.1.0/
[root@master kafka-2.1.0]# nohup kafka-server-start.sh /usr/local/kafka/kafka-2.1.0/config/server.properties &
# 在主節(jié)點上創(chuàng)建主題TestTopic
[root@master kafka-2.1.0]# kafka-topics.sh --zookeeper kylin1:2181,kylin2:2181,kylin3:2181 --topic TestTopic --replication-factor 1 --partitions 1 --create
# 在主節(jié)點上啟動一個生產(chǎn)者
[root@master kafka-2.1.0]# kafka-console-producer.sh --broker-list kylin1:9092,kylin2:9092,kylin3:9092 --topic TestTopic
# 在其他兩個節(jié)點上分別創(chuàng)建消費(fèi)者
[root@slave1 kafka-2.1.0]# kafka-console-consumer.sh --bootstrap-server kylin2:9092 --topic TestTopic --from-beginning
[root@slave2 kafka-2.1.0]# kafka-console-consumer.sh --bootstrap-server kylin3:9092--topic TestTopic --from-beginning
# 在主節(jié)點生產(chǎn)者命令行那里隨便輸入一段話:
> hello world
# 然后你就會發(fā)現(xiàn)在其他兩個消費(fèi)者節(jié)點那里也出現(xiàn)了這句話,即消費(fèi)到了該數(shù)據(jù)
kafka 完成
Hbase
安裝包:鏈接: https://pan.baidu.com/s/1IVkmzyAqd9zFSW_Cts7t2Q 密碼: 7io2
官網(wǎng):https://www.w3cschool.cn/hbase_doc/hbase_doc-m3y62k51.html
同樣 /usr/local下創(chuàng)建hbase安裝目錄局雄,解壓并移動到hbase目錄
#環(huán)境變量
vim /etc/profile
#hbase
export HBASE_HOME=/usr/local/hbase/hbase-2.1.1
#path
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$ZOOKEEPER_HOME/bin:$HBASE_HOME/bin
source /etc/profile
編輯配置文件
cd hbase-2.1.1/conf
vim hbase-env.sh
export JAVA_HOME=/usr/local/java/jdk1.8.0_191
export HBASE_LOG_DIR=${HBASE_HOME}/logs
export HBASE_MANAGES_ZK=false
vim hbase-site.xml
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://master:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>master,slave1,slave2</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/usr/local/zookeeper/zookeeper-3.4.10/data</value>
</property>
<property>
<name>hbase.tmp.dir</name>
<value>/usr/local/hbase/data/tmp</value>
</property>
<property>
<name>hbase.master</name>
<value>hdfs://master:60000</value>
</property>
<property>
<name>hbase.master.info.port</name>
<value>16010</value>
</property>
<property>
<name>hbase.regionserver.info.port</name>
<value>16030</value>
</property>
</configuration>
vim regionservers
master
slave1
slave2
復(fù)制文件到節(jié)點
# 把上面配置好的hbase整個文件夾傳過去
cd ../../..
scp -r hbase root@192.168.185.151:/usr/local/
# 別忘在另外兩個節(jié)點也要在/etc/profile下配置環(huán)境變量并source一下使生效甥啄!
# 在所有節(jié)點上都手動創(chuàng)建/usr/local/hbase/data/tmp目錄,也就是上面配置文件中hbase.tmp.dir屬性的值炬搭,用來保存臨時文件的蜈漓。
注意:啟動Hbase之前,zookeeper和hadoop需要提前啟動起來
啟動hbase
cd hbase/hbase-2.1.1
bin/start-hbase.sh
jps
# 正確結(jié)果:主節(jié)點上顯示:HMaster / 子節(jié)點上顯示:HRegionServer
啟動以后可能沒有HMaster
需要執(zhí)行
cp $HBASE_HOME/lib/client-facing-thirdparty/htrace-core-3.1.0-incubating.jar $HBASE_HOME/lib/
再次重啟就好了宫盔,
重啟過程中 執(zhí)行 ./bin/stop-hbase.sh 可能不會停掉 HRegionServer
執(zhí)行
./bin/hbase-daemon.sh stop regionserver RegionServer
hbase結(jié)束
kylin
官網(wǎng) http://kylin.apache.org/cn/docs/install/index.html
/usr/local創(chuàng)建kylin安裝目錄 解壓融虽,并移動到/usr/local/kylin目錄
添加環(huán)境變量
vim /etc/profile
#kylin
export KYLIN_HOME=/usr/local/kylin/kylin-3.1.1
#path
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$HIVE_HOME/bin:$ZOOKEEPER_HOME/bin:$HBASE_HOME/bin:$KYLIN_HOME/bin
source /etc/profile
進(jìn)入kylin-3.1.1目錄
./bin/check.env.sh
如上圖表示檢查項通過
啟動kylin
./bin/kylin.sh start
報錯
需要在$HBASE_HOME/bin/hbase 里面添加:/opt/cloudera/parcels/CDH/lib/hbase/lib/*
原文地址:http://92072234.wiz03.com/share/s/2i1O8Q1L1k042IDoOy3h7BgH2K4G6J2SoQv42Xc4b01xpCrj
報錯: KeeperErrorCode = NoNode for /hbase/master]
解決方案:https://www.cnblogs.com/xyzai/p/12695116.html
看到如下字樣表示kylin啟動成功
文獻(xiàn):
https://blog.csdn.net/pig2guang/article/details/85313410
https://blog.csdn.net/weixin_40521823/article/details/86666139
https://blog.csdn.net/k393393/article/details/92078626