测试搭建
192.168.3.110
mongos 30000,30001,30002
config 40000,40001,40002
shard1 50001,50002,50003
shard2 50004,50005,50006,
shard3 50007,50008,50009
[root@mysql-slave10 data]# mkdir -p /data/config/1/data
[root@mysql-slave10 data]# mkdir -p /data/config/2/data
[root@mysql-slave10 data]# mkdir -p /data/config/3/data
[root@mysql-slave10 data]# mkdir -p /data/config/1/log
[root@mysql-slave10 data]# mkdir -p /data/config/2/log
[root@mysql-slave10 data]# mkdir -p /data/config/3/log
[root@mysql-slave10 data]# chown -R mongodb:mongodb /data/config/
[root@mysql-slave10 shard]# mkdir -p /data/shard/1_1/{data,log}
[root@mysql-slave10 shard]# mkdir -p /data/shard/1_2/{data,log}
[root@mysql-slave10 shard]# mkdir -p /data/shard/2_1/{data,log}
[root@mysql-slave10 shard]# mkdir -p /data/shard/2_2/{data,log}
[root@mysql-slave10 shard]# mkdir -p /data/shard/3_1/{data,log}
[root@mysql-slave10 shard]# mkdir -p /data/shard/3_2/{data,log}
[root@mysql-slave10 data]# chown -R mongodb:mongodb /data/shard/

1) 启动配置服务器
/usr/local/mongodb/bin/mongod --configsvr --dbpath=/data/config/1/data --port 40000 --logpath=/data/config/1/log/config.log --logappend --fork
/usr/local/mongodb/bin/mongod --configsvr --dbpath=/data/config/2/data --port 40001 --logpath=/data/config/2/log/config.log --logappend --fork
/usr/local/mongodb/bin/mongod --configsvr --dbpath=/data/config/3/data --port 40002 --logpath=/data/config/3/log/config.log --logappend --fork
2) 启动MONGOS服务器
[root@mysql-slave10 data]# hostname
mysql-slave10
/usr/local/mongodb/bin/mongos --configdb 192.168.3.110:40000,192.168.3.110:40001,192.168.3.110:40002 --port 30000 --logpath=/data/config/1/log/mongos.log --logappend --fork
/usr/local/mongodb/bin/mongos --configdb 192.168.3.110:40000,192.168.3.110:40001,192.168.3.110:40002 --port 30001 --logpath=/data/config/2/log/mongos.log --logappend --fork
/usr/local/mongodb/bin/mongos --configdb 192.168.3.110:40000,192.168.3.110:40001,192.168.3.110:40002 --port 30002 --logpath=/data/config/3/log/mongos.log --logappend --fork
3) 启动分片及其副本集实例

shard1:
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard1 --port 50001 --dbpath=/data/shard/1/data --logpath=/data/shard/1/log/shard.log --nojournal --oplogSize 10 --fork --logappend
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard1 --port 50002 --dbpath=/data/shard/1_1/data --logpath=/data/shard/1_1/log/shard.log --nojournal --oplogSize 10 --fork --logappend
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard1 --port 50003 --dbpath=/data/shard/1_2/data --logpath=/data/shard/1_2/log/shard.log --nojournal --oplogSize 10 --fork --logappend

shard2:
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard2 --port 50004 --dbpath=/data/shard/2/data --logpath=/data/shard/2/log/shard.log --nojournal --oplogSize 10 --fork --logappend
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard2 --port 50005 --dbpath=/data/shard/2_1/data --logpath=/data/shard/2_1/log/shard.log --nojournal --oplogSize 10 --fork --logappend
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard2 --port 50006 --dbpath=/data/shard/2_2/data --logpath=/data/shard/2_2/log/shard.log --nojournal --oplogSize 10 --fork --logappend

shard3:
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard3 --port 50007 --dbpath=/data/shard/3/data --logpath=/data/shard/3/log/shard.log --nojournal --oplogSize 10 --fork --logappend
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard3 --port 50008 --dbpath=/data/shard/3_1/data --logpath=/data/shard/3_1/log/shard.log --nojournal --oplogSize 10 --fork --logappend
/usr/local/mongodb/bin/mongod --shardsvr --replSet shard3 --port 50009 --dbpath=/data/shard/3_2/data --logpath=/data/shard/3_2/log/shard.log --nojournal --oplogSize 10 --fork --logappend

启动分片副本集
登录各个副本集中的任意实例,进行副本集初始化

/usr/local/mongodb/bin/mongo 192.168.3.110:50001/admin
config={_id:"shard1",members:[
{_id:0,host:"192.168.3.110:50001","priority":2},
{_id:1,host:"192.168.3.110:50002"},
{_id:2,host:"192.168.3.110:50003",arbiterOnly:true}
]
}
> rs.initiate(config)
{ "ok" : 1 }

/usr/local/mongodb/bin/mongo 192.168.3.110:50004/admin
config={_id:"shard2",members:[
{_id:0,host:"192.168.3.110:50006",arbiterOnly:true},
{_id:1,host:"192.168.3.110:50005","priority":2},
{_id:2,host:"192.168.3.110:50004"}
]
}

> rs.initiate(config)
{ "ok" : 1 }

/usr/local/mongodb/bin/mongo 192.168.3.110:50007/admin
config={_id:"shard3",members:[
{_id:0,host:"192.168.3.110:50007"},
{_id:1,host:"192.168.3.110:50008",arbiterOnly:true},
{_id:2,host:"192.168.3.110:50009","priority":2}
]
}
> rs.initiate(config)
{ "ok" : 1 }
5) 设置分片,让分片生效
连接mongos,比如
/usr/local/mongodb/bin/mongo 192.168.3.110:30000
use admin
db.runCommand({addshard:"shard1/192.168.3.110:50002,192.168.3.110:50001,192.168.3.110:50003"});
db.runCommand({addshard:"shard2/192.168.3.110:50004,192.168.3.110:50005,192.168.3.110:50006"});
db.runCommand({addshard:"shard3/192.168.3.110:50007,192.168.3.110:50008,192.168.3.110:50009"});

[mongodb@mysql-slave10 bin]$ /usr/local/mongodb/bin/mongo 192.168.3.110:30000
MongoDB shell version: 3.2.11
connecting to: 192.168.3.110:30000/test
mongos> use admin
switched to db admin
mongos> db.runCommand({addshard:"shard1/192.168.3.110:50002,192.168.3.110:50001,192.168.3.110:50003"});
{ "shardAdded" : "shard1", "ok" : 1 }
mongos> db.runCommand({addshard:"shard2/192.168.3.110:50004,192.168.3.110:50005,192.168.3.110:50006"});
{ "shardAdded" : "shard2", "ok" : 1 }
mongos> db.runCommand({addshard:"shard3/192.168.3.110:50007,192.168.3.110:50008,192.168.3.110:50009"});
{ "shardAdded" : "shard3", "ok" : 1 }
mongos>
6) 查看分片
mongos> db.runCommand( { listshards : 1 } );
{
"shards" : [
{
"_id" : "shard1",
"host" : "shard1/192.168.3.110:50001,192.168.3.110:50002"
},
{
"_id" : "shard2",
"host" : "shard2/192.168.3.110:50004,192.168.3.110:50005"
},
{
"_id" : "shard3",
"host" : "shard3/192.168.3.110:50007,192.168.3.110:50009"
}
],
"ok" : 1
}

测试分片
mongos> db.runCommand({enablesharding:"testdb"}); #开启testdb分片功能
{ "ok" : 1 }
mongos> db.runCommand({shardcollection:"testdb.table1",key:{id:1}}); ##再对集合table1进行分片,id字段是片键。片键的选择:利于分块、分散写请求、查询数据
{ "collectionsharded" : "testdb.table1", "ok" : 1 }
mongos> use testdb;
switched to db testdb
mongos> for(var i=1;i<=100000;i++) db.table1.save({id:i,"test1":"testval1"});
WriteResult({ "nInserted" : 1 })

查阅数据分布情况
db.table1.stats();
查阅分片信息
分片集群的信息保存在congfig服务器,名为config的数据库里。通过mongos可以进行访问。
/usr/local/mongodb/bin/mongo 192.168.3.110:40000
1) 查询当前的版本
mongos> db.getCollection("version").findOne()
2) 查询当前的配置CHUNKSIZE的大小
mongos> db.settings.find()
3) 查询整个SHARD集群的成员
mongos> db.shards.find()
4) 查询被水平拆分的集合:
mongos> db.collections.find()
5) 查询被水平拆分的集合分成的CHUNK分布
mongos> db.chunks.find()
6) 查询当前MongoDB里面的数据库的SHARD信息:
mongos> db.databases.find()
7) 查询MONGOS集合
mongos> db.mongos.find()
8) 查看整个MONGODB 数据库SHARD的详细信息
mongos> printShardingStatus();
db.CatchedFeature.getShardVersion();
sh.status();

[mongodb@mysql-slave10 bin]$ /usr/local/mongodb/bin/mongo 192.168.3.110:40000
MongoDB shell version: 3.2.11
connecting to: 192.168.3.110:40000/test
Server has startup warnings:
2016-12-12T07:37:38.973-0800 I CONTROL [initandlisten]
2016-12-12T07:37:38.973-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2016-12-12T07:37:38.973-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:37:38.973-0800 I CONTROL [initandlisten]
2016-12-12T07:37:38.973-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2016-12-12T07:37:38.973-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:37:38.973-0800 I CONTROL [initandlisten]
configsvr> show dbs
config 0.001GB
local 0.000GB
configsvr> use config
switched to db config
configsvr> db.getCollection("version").findOne()
{
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("584ec460f771e03d38bcf2e1")
}
configsvr> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(64) }
configsvr> db.shards.find()
{ "_id" : "shard1", "host" : "shard1/192.168.3.110:50001,192.168.3.110:50002" }
{ "_id" : "shard2", "host" : "shard2/192.168.3.110:50004,192.168.3.110:50005" }
{ "_id" : "shard3", "host" : "shard3/192.168.3.110:50007,192.168.3.110:50009" }
configsvr> db.collections.find()
{ "_id" : "testdb.table1", "lastmodEpoch" : ObjectId("584ec8bff771e03d38bcf359"), "lastmod" : ISODate("1970-02-19T17:02:47.296Z"), "dropped" : false, "key" : { "id" : 1 }, "unique" : false }
configsvr> db.chunks.find()
{ "_id" : "testdb.table1-id_MinKey", "lastmod" : Timestamp(2, 0), "lastmodEpoch" : ObjectId("584ec8bff771e03d38bcf359"), "ns" : "testdb.table1", "min" : { "id" : { "$minKey" : 1 } }, "max" : { "id" : 2 }, "shard" : "shard2" }
{ "_id" : "testdb.table1-id_2.0", "lastmod" : Timestamp(3, 0), "lastmodEpoch" : ObjectId("584ec8bff771e03d38bcf359"), "ns" : "testdb.table1", "min" : { "id" : 2 }, "max" : { "id" : 20 }, "shard" : "shard3" }
{ "_id" : "testdb.table1-id_20.0", "lastmod" : Timestamp(3, 1), "lastmodEpoch" : ObjectId("584ec8bff771e03d38bcf359"), "ns" : "testdb.table1", "min" : { "id" : 20 }, "max" : { "id" : { "$maxKey" : 1 } }, "shard" : "shard1" }
configsvr> db.databases.find()
{ "_id" : "testdb", "primary" : "shard1", "partitioned" : true }
configsvr> db.mongos.find()
{ "_id" : "mysql-slave10:30000", "ping" : ISODate("2016-12-12T16:01:32.959Z"), "up" : NumberLong(1403), "waiting" : true, "mongoVersion" : "3.2.11" }
{ "_id" : "mysql-slave10:30001", "ping" : ISODate("2016-12-12T16:01:33.379Z"), "up" : NumberLong(1396), "waiting" : true, "mongoVersion" : "3.2.11" }
{ "_id" : "mysql-slave10:30002", "ping" : ISODate("2016-12-12T16:01:37.317Z"), "up" : NumberLong(1394), "waiting" : true, "mongoVersion" : "3.2.11" }

---------
/usr/local/mongodb/bin/mongo 192.168.3.110:50001
[mongodb@mysql-slave10 bin]$ /usr/local/mongodb/bin/mongo 192.168.3.110:50001
MongoDB shell version: 3.2.11
connecting to: 192.168.3.110:50001/test
Server has startup warnings:
2016-12-12T07:38:43.936-0800 I CONTROL [initandlisten]
2016-12-12T07:38:43.936-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2016-12-12T07:38:43.936-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:38:43.936-0800 I CONTROL [initandlisten]
2016-12-12T07:38:43.936-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2016-12-12T07:38:43.936-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:38:43.936-0800 I CONTROL [initandlisten]
shard1:PRIMARY> show dbs;
local 0.005GB
testdb 0.003GB
shard1:PRIMARY> use testdb
switched to db testdb
shard1:PRIMARY> show tables;
table1
shard1:PRIMARY> db.table1.find().count()
99981
[mongodb@mysql-slave10 bin]$ /usr/local/mongodb/bin/mongo 192.168.3.110:50005
MongoDB shell version: 3.2.11
connecting to: 192.168.3.110:50005/test
Server has startup warnings:
2016-12-12T07:39:04.749-0800 I CONTROL [initandlisten]
2016-12-12T07:39:04.749-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2016-12-12T07:39:04.749-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:39:04.749-0800 I CONTROL [initandlisten]
2016-12-12T07:39:04.749-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2016-12-12T07:39:04.749-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:39:04.749-0800 I CONTROL [initandlisten]
shard2:PRIMARY> show dbs
local 0.000GB
testdb 0.000GB
shard2:PRIMARY> use testdb
switched to db testdb
shard2:PRIMARY> show tables;
table1
shard2:PRIMARY> db.table1.find().count()
1
shard2:PRIMARY> db.table1.find()
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d488"), "id" : 1, "test1" : "testval1" }
shard2:PRIMARY> exit
bye
[mongodb@mysql-slave10 bin]$ /usr/local/mongodb/bin/mongo 192.168.3.110:50009
MongoDB shell version: 3.2.11
connecting to: 192.168.3.110:50009/test
Server has startup warnings:
2016-12-12T07:39:34.419-0800 I CONTROL [initandlisten]
2016-12-12T07:39:34.419-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2016-12-12T07:39:34.419-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:39:34.419-0800 I CONTROL [initandlisten]
2016-12-12T07:39:34.419-0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2016-12-12T07:39:34.419-0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2016-12-12T07:39:34.419-0800 I CONTROL [initandlisten]
shard3:PRIMARY> use testdb
switched to db testdb
shard3:PRIMARY> db.table1.find().count()
18
shard3:PRIMARY> db.table1.find()
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d489"), "id" : 2, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d48a"), "id" : 3, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d48b"), "id" : 4, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d48c"), "id" : 5, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d48d"), "id" : 6, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d48e"), "id" : 7, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d48f"), "id" : 8, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d490"), "id" : 9, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d491"), "id" : 10, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d492"), "id" : 11, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d493"), "id" : 12, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d494"), "id" : 13, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d495"), "id" : 14, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d496"), "id" : 15, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d497"), "id" : 16, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d498"), "id" : 17, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d499"), "id" : 18, "test1" : "testval1" }
{ "_id" : ObjectId("584ec8ca2463dbba9dd3d49a"), "id" : 19, "test1" : "testval1" }

Mongodb 分片与副本集的更多相关文章

  1. mongodb的分布式集群(4、分片和副本集的结合)

    概述 前面3篇博客讲了mongodb的分布式和集群,当中第一种的主从复制我们差点儿不用,没有什么意义,剩下的两种,我们不论单独的使用哪一个.都会出现对应的问题.比較好的一种解决方式就是.分片和副本集的 ...

  2. MongoDB 3.4 分片 由副本集组成

    要在真实环境中实现MongoDB分片至少需要四台服务器做分片集群服务器,其中包含两个Shard分片副本集(每个包含两个副本节点及一个仲裁节点).一个配置副本集(三个副本节点,配置不需要仲裁节点),其中 ...

  3. MongoDB 学习笔记之 分片和副本集混合运用

     分片和副本集混合运用: 基本架构图: 搭建详细配置: 3个shard + 3个replicat set + 3个configserver + 3个Mongos shardrsname Primary ...

  4. 【六】MongoDB管理之副本集

    一.复制介绍 所谓的复制就是在多个主机之间同步数据的过程. 1.数据冗余及可用性 复制技术提供数据冗余及可用性,在不同的数据库服务器上使用多个数据副本,复制技术防止单个数据库服务器出现数据故障而出现数 ...

  5. MongoDB 复制(副本集)学习

    MongoDB 复制(副本集)学习 replication set复制集,复制集,多台服务器维护相同的数据副本,提高服务器的可用性.MongoDB复制是将数据同步在多个服务器的过程.复制提供了数据的冗 ...

  6. Mongodb主从复制 及 副本集+分片集群梳理

    转载努力哥原文,原文连接https://www.cnblogs.com/nulige/p/7613721.html 介绍了Mongodb的安装使用,在 MongoDB 中,有两种数据冗余方式,一种 是 ...

  7. MongoDB分布式集群搭建(分片加副本集)

    # 环境准备 服务器 # 环境搭建 文件配置和目录添加 新建目录的操作要在三台机器中进行,为配置服务器新建数据目录和日志目录 mkdir -p $MONGODB_HOME/config/data mk ...

  8. mongodb中的副本集搭建实践

    准备运行1个主节点,2个从节点,从节点中其中是一个是仲裁节点(Arb). --oplogSize --oplogSize --oplogSize 其中application是副本集的名称,节点必须相同 ...

  9. mongodb replica set(副本集)设置步骤

    网上已经有一大堆的设置步骤的了,根据我遇到的问题,整理一下,如下: 首先先去下载一个mongodb最新版,目前最新版应该是2.6 cd /usr/local/bin wget http://fastd ...

随机推荐

  1. RDLC 微软报表 导出Excel时产生多个工作表 (worksheet)

    . I have added two obejcts data source to Report Viewer. 2. in RDLC i have created two tables and in ...

  2. Linux下安装lrzsz上传下载工具

    使用yum安装 为什么要使用yum安装? 答:安装十分方便,几乎不需要别的操作,只需要一个yum命令就可以完成所有的安装过程. yum -y install lrzsz  要有网络才行 输入命令:rz ...

  3. PHP(Zend Studio)入门视频

    视频地址: http://www.ev-get.com/article/2014/5/9/20962.html (去掉地址中的减号-:可以看视频) Zend Studio教学视频之Zend Studi ...

  4. 虚拟机开启Linux时出现“我以复制虚拟机”、“我已移动虚拟机”

    当出现标题的情况时,并且网络出现状况时,可以尝试一下解决办法 首先用ifconfig -a命令调出现在的网卡驱动的名称和HWaddr地址,然后再编辑/etc/sysconfig/networking/ ...

  5. js替换字符串中的数字或非数字

    替换字符串中的数字 var text = "abc123"; text=text.replace(/[0-9]/ig,""); 此时得到的text为" ...

  6. UVA 11525 Permutation (树状数组+YY)

    题意:给你k个数Si,然后给你一个等式   H= ∑  Si ∗ (K − i)!  (i=(1->k)且0 ≤ Si ≤ K − i). 叫你求出第H个全排列 其实这是一个康托展开:X=a[n ...

  7. DATEDIFF 的用法

    DECLARE @date DATETIME = '2017-12-26 00:00:00';DECLARE @date2 DATETIME = DATEADD(DAY, 1, @date);DECL ...

  8. Java -- JDBC mysql读写大数据,文本 和 二进制文件

    1. 往mysql中读写字符文本 public class Demo1 { /* 创建数据库 create database LOBTest; use LOBTest; create table te ...

  9. 在openstack环境中安装rackspace private cloud --1 环境准备

    在一个openstack环境中安装rackspace private cloud, 环境准备: 在good-net网络中创建3个虚拟机vm Network Detail: good-net Netwo ...

  10. nova Rescue 和 Unrescue

    usage: nova rescue [--password <password>] [--image <image>] <server> Reboots a se ...