# mongodb版本
3.2.16
機器規(guī)劃
192.168.0.94 Mongo01
192.168.0.97 Mongo02
192.168.0.98 Mongo03
角色分配
host mongos config-server shard1 shard2 shard3
Mongo01 27017 27018 27001 27002 27003(仲裁)
Mongo02 27017 27018 27001 27002(仲裁) 27003
Mongo03 27017 27018 27001(仲裁) 27002 27003
添加hosts蠢箩,以及三臺機器相互信任
略
關(guān)閉防火墻
vi /etc/selinux/config
將SELINUX=enforcing改為SELINUX=disabled
systemctl stop firewalld.service
systemctl disable firewalld.service
下載mongodb軟件并安裝
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.2.16.tgz
tar -xzvf mongodb-linux-x86_64-3.2.16.tgz -C /usr/local
cd /usr/local
mv mongodb-linux-x86_64-3.2.16 mongodb
vi /etc/profile
在最后添加:
export MONGODB_HOME=/usr/local/mongodb
export PATH=$MONGODB_HOME/bin:$PATH
source /etc/profile
分別在每臺機器建立conf虑润、mongos瓢对、config靠胜、shard1、shard2饮醇、shard3六個目錄它抱,因為mongos不存儲數(shù)據(jù),只需要建立日志文件目錄即可
mkdir -p /usr/local/mongodb/conf
mkdir -p /usr/local/mongodb/mongos/log
mkdir -p /usr/local/mongodb/config/data
mkdir -p /usr/local/mongodb/config/log
mkdir -p /usr/local/mongodb/shard1/data
mkdir -p /usr/local/mongodb/shard1/log
mkdir -p /usr/local/mongodb/shard2/data
mkdir -p /usr/local/mongodb/shard2/log
mkdir -p /usr/local/mongodb/shard3/data
mkdir -p /usr/local/mongodb/shard3/log
集群部署
復(fù)制集
1 配置服務(wù)器部署朴艰,三臺都要操作(復(fù)制集)
vi /$MONGODB_HOME/conf/config.conf
#配置文件內(nèi)容
pidfilepath = /usr/local/mongodb/config/log/configsrv.pid
dbpath = /usr/local/mongodb/config/data
logpath = /usr/local/mongodb/config/log/congigsrv.log
logappend = true
bind_ip = 0.0.0.0
port = 27018
fork = true
configsvr = true
#副本集名稱
replSet=mongo
#設(shè)置最大連接數(shù)
maxConns=20000
2 啟動三臺機器的config server
[root@mongo01 conf]# mongod -f config.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1064
child process started successfully, parent exiting
3 登錄到任意一臺观蓄,初始化副本集
[root@mongo01 conf]# mongo --port=27018
#定義個變量
config = {//
_id : "mongo",//
members :[//
{_id :1,host : "mongo01:27018"},//
{_id :2,host : "mongo02:27018"},//
{_id :3,host : "mongo03:27018"}//
]//
}
#初始化
> rs.initiate(config)
#_id:“mongo” ? 需要與config.conf中 replSet 的值一致
#members 中的host為對應(yīng)每個節(jié)點的hostname+端口
查看狀態(tài)顯示為一主兩從:
mongo:SECONDARY> rs.status()
{
"set" : "mongo",
"date" : ISODate("2019-05-23T03:00:58.643Z"),
"myState" : 1,
"term" : NumberLong(1),
"configsvr" : true,
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 1,
"name" : "mongo01:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1977,
"optime" : {
"ts" : Timestamp(1558580277, 2),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2019-05-23T02:57:57Z"),
"electionTime" : Timestamp(1558580277, 1),
"electionDate" : ISODate("2019-05-23T02:57:57Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 2,
"name" : "mongo02:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 192,
"optime" : {
"ts" : Timestamp(1558580277, 2),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2019-05-23T02:57:57Z"),
"lastHeartbeat" : ISODate("2019-05-23T03:00:57.292Z"),
"lastHeartbeatRecv" : ISODate("2019-05-23T03:00:57.356Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "mongo01:27018",
"configVersion" : 1
},
{
"_id" : 3,
"name" : "mongo03:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 192,
"optime" : {
"ts" : Timestamp(1558580277, 2),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2019-05-23T02:57:57Z"),
"lastHeartbeat" : ISODate("2019-05-23T03:00:57.302Z"),
"lastHeartbeatRecv" : ISODate("2019-05-23T03:00:57.356Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "mongo01:27018",
"configVersion" : 1
}
],
"ok" : 1
}
# kill掉PRIMARY節(jié)點的config-server服務(wù)
[root@mongo001 mongodb]# ps -ef |grep mongo
root 1257 1 2 11:11 ? 00:00:01 bin/mongod -f conf/config.conf
root 1360 1228 0 11:12 pts/0 00:00:00 grep --color=auto mongo
[root@mongo001 mongodb]# kill -9 1257
[root@mongo001 mongodb]# ps -ef |grep mongo
root 1362 1228 0 11:12 pts/0 00:00:00 grep --color=auto mongo
此時原先的PRIMARY節(jié)點狀態(tài)變?yōu)?stateStr" : "(not reachable/healthy)",
原先兩個SECONDARY節(jié)點將自動選舉出一個為PRIMARY節(jié)點,此時集群變?yōu)橐恢饕粡囊粧鞝顟B(tài)
將001節(jié)點config-server服務(wù)再次起來祠墅,改節(jié)點自動變?yōu)镾ECONDARY節(jié)點
分片集
1 配置分片副本集(三臺)
設(shè)置第一個分片集(shard1)
vi $MONGODB_HOME/conf/shar1.conf
#配置文件內(nèi)容
pidfilepath = /usr/local/mongodb/shard1/log/shard1.pid
dbpath = /usr/local/mongodb/shard1/data
logpath = /usr/local/mongodb/shard1/log/shard1.log
logappend = true
bind_ip = 0.0.0.0
port = 27001
fork = true
#副本集名稱
replSet=shard1
shardsvr = true
#設(shè)置最大連接數(shù)
maxConns=20000
2 啟動三臺機器shard1服務(wù)
[root@mongo01 mongodb]# bin/mongod -f conf/shard1.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1303
child process started successfully, parent exiting
3 登錄任意一臺服務(wù)器侮穿,初始化副本集
[root@mongo01 conf]# mongo --port=27001
#定義副本集配置,"arbiterOnly":true 代表其為仲裁節(jié)點毁嗦。按照之前規(guī)劃shard1的003為仲裁節(jié)點
>config = {//
_id : "shard1",//
members :[//
{_id :1,host : "mongo01:27001"},//
{_id :2,host : "mongo02:27001"},//
{_id :3,host : "mongo03:27001",arbiterOnly: true}//
]//
}
#初始化副本集
> rs.initiate(config)
{ "ok" : 1 }
#查看分片1狀態(tài)亲茅,01為主,02為從狗准,03為仲裁:
shard1:SECONDARY> rs.status()
{
"set" : "shard1",
"date" : ISODate("2019-05-23T05:38:59.247Z"),
"myState" : 1,
"term" : NumberLong(1),
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 1,
"name" : "mongo01:27001",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 6306,
"optime" : {
"ts" : Timestamp(1558589850, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2019-05-23T05:37:30Z"),
"infoMessage" : "could not find member to sync from",
"electionTime" : Timestamp(1558589849, 1),
"electionDate" : ISODate("2019-05-23T05:37:29Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 2,
"name" : "mongo02:27001",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 101,
"optime" : {
"ts" : Timestamp(1558589850, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2019-05-23T05:37:30Z"),
"lastHeartbeat" : ISODate("2019-05-23T05:38:57.359Z"),
"lastHeartbeatRecv" : ISODate("2019-05-23T05:38:59.174Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "mongo01:27001",
"configVersion" : 1
},
{
"_id" : 3,
"name" : "mongo03:27001",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 101,
"lastHeartbeat" : ISODate("2019-05-23T05:38:57.371Z"),
"lastHeartbeatRecv" : ISODate("2019-05-23T05:38:55.160Z"),
"pingMs" : NumberLong(0),
"configVersion" : 1
}
],
"ok" : 1
}
# 如上配置shard2和shard3
shard2:
>config = {//
_id : "shard2",//
members :[//
{_id :1,host : "mongo01:27002"},//
{_id :2,host : "mongo02:27002",arbiterOnly: true},//
{_id :3,host : "mongo03:27002"}//
]//
}
> rs.initiate(config)
{ "ok" : 1 }
# 查看節(jié)點狀態(tài):02為仲裁節(jié)點
shard2:PRIMARY> rs.status()
shard3:
>config = {//
_id : "shard3",//
members :[//
{_id :1,host : "mongo01:27003",arbiterOnly: true},//
{_id :2,host : "mongo02:27003"},//
{_id :3,host : "mongo03:27003"}//
]//
}
> rs.initiate(config)
{ "ok" : 1 }
由于mongo01要被設(shè)置為仲裁節(jié)點克锣,此時需要換到mongo02或者03上執(zhí)行上述操作
配置路由服務(wù)mongos(三臺)
vi mongos.conf
pidfilepath = /usr/local/mongodb/mongos/log/mongos.pid
logpath = /usr/local/mongodb/mongos/log/mongos.log
logappend = true
bind_ip = 0.0.0.0
port = 27017
fork = true
#監(jiān)聽的配置服務(wù)器,config-server mongo為配置服務(wù)器的副本集名字
configdb = mongo/mongo01:27018,mongo02:27018,mongo03:27018
#設(shè)置最大連接數(shù)
maxConns=20000
啟動mongos服務(wù)
[root@mongo01 conf]# mongos -f mongos.conf
about to fork child process, waiting until server is ready for connections.
forked process: 1687
child process started successfully, parent exiting
此時搭建了mongodb配置服務(wù)器、路由服務(wù)器腔长,各個分片服務(wù)器袭祟,不過應(yīng)用程序連接到mongos路由服務(wù)器并不能使用分片機制,還需要在程序里設(shè)置分片配置捞附,讓分片生效巾乳。登錄到任意一臺機器mongos
[root@mongo01 mongodb]# mongo --port=27017
mongos> use admin
#串聯(lián)路由服務(wù)器與分配副本集
mongos> sh.addShard("shard1/mongo01:27001,mongo01:27001,mongo01:27001")
mongos> sh.addShard("shard2/mongo01:27002,mongo01:27002,mongo01:27002")
mongos> sh.addShard("shard3/mongo01:27003,mongo01:27003,mongo01:27003")
#查看分片狀態(tài)
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5ce63adad79f94274c1e5793")
}
shards:
{ "_id" : "shard1", "host" : "shard1/mongo01:27001,mongo02:27001" }
{ "_id" : "shard2", "host" : "shard2/mongo01:27002,mongo03:27002" }
{ "_id" : "shard3", "host" : "shard3/mongo02:27003,mongo03:27003" }
active mongoses:
"3.2.16" : 3
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
實現(xiàn)分片功能
設(shè)置分片chunk大小,設(shè)置塊大小為 1M 是方便實驗鸟召,不然需要插入海量數(shù)據(jù)
mongos> use config
switched to db config
mongos> db.settings.save({_id:"chunksize",value:1})
WriteResult({ "nMatched" : 0, "nUpserted" : 1, "nModified" : 0, "_id" : "chunksize" })
測試:讓指定的數(shù)據(jù)庫想鹰、指定的集合分片生效。
mongos> use admin
switched to db admin
mongos> db.runCommand({enablesharding : "testdb"});
mongos> db.runCommand({shardcollection :"testdb.test1",key:{id:1}})
db.runCommand({shardcollection :"xdo-import.importTask",key:{id:1}})
mongos> use testdb
mongos> for (var i = 1; i <= 100000; i++) db.test1.save({id:i,"test11":"testvalll"})
WriteResult({ "nInserted" : 1 })
三臺服務(wù)器的都在寫數(shù)據(jù)药版,應(yīng)該實現(xiàn)了分布式
查看分片狀態(tài)
db.test1.stats();