1.    MongoDB分片+副本集

健壮的集群方案

多个配置服务器 多个mongos服务器  每个片都是副本集 正确设置w

架构图

说明:

1.   此实验环境在一台机器上通过不同port和dbpath实现启动不同的mongod实例

2.   总的9个mongod实例,分别做成shard1、shard2、shard3三组副本集,每组1主2从

3.   Mongos进程的数量不限,建议把mongos配置在每个应用服务器本机上,这样每个应用服务器就与自身的mongos进行通信,如果服务器不工作了,并不会影响其他的应用服务器与其自己的mongos通信

4.   此实验模拟2台应用服务器(2个mongos服务)

5.   生产环境中每个片都应该是副本集,这样单个服务器坏了,才不会导致片失效

部署环境

创建相关目录

[root@Master cluster2]# mkdir -p shard{,,}/node{,,}
[root@Master cluster2]# mkdir -p shard{,,}/logs
[root@Master cluster2]# ls shard*
shard1:
logs node1 node2 node3 shard2:
logs node1 node2 node3 shard3:
logs node1 node2 node3
[root@Master cluster2]# mkdir -p config/logs
[root@Master cluster2]# mkdir -p config/node{,,}
[root@Master cluster2]# ls config/
logs node1 node2 node3 [root@Master cluster2]# mkdir -p mongos/logs

启动配置服务

Config server

/data/mongodb/config/node1

/data/mongodb/config/logs/node1.log

10000

/data/mongodb/config/node2

/data/mongodb/config/logs/node2.log

20000

/data/mongodb/config/node3

/data/mongodb/config/logs/node3.log

30000

#按规划启动3个:跟启动单个配置服务一样,只是重复3次

[root@Master cluster2]# mongod --dbpath config/node1 --logpath config/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --dbpath config/node2 --logpath config/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --dbpath config/node3 --logpath config/logs/node3.log --logappend --fork --port
[root@Master cluster2]# ps -ef|grep mongod|grep -v grep
mongod : ? :: /usr/bin/mongod -f /etc/mongod.conf
root : ? :: mongod --dbpath config/node1 --logpath config/logs/node1.log --logappend --fork --port
root : ? :: mongod --dbpath config/node2 --logpath config/logs/node2.log --logappend --fork --port
root : ? :: mongod --dbpath config/node3 --logpath config/logs/node3.log --logappend --fork --port

启动路由服务

Mongos server

——

/data/mongodb/mongos/logs/node1.log

40000

——

/data/mongodb/mongos/logs/node2.log

50000

#mongos的数量不受限制,通常应用一个服务器运行一个mongos

[root@Master cluster2]# mongos --port  --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log  --logappend --fork
[root@Master cluster2]# mongos --port --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log --logappend --fork
[root@Master cluster2]# ps -ef|grep mongos|grep -v grep
root : ? :: mongos --port --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log --logappend --fork
root : ? :: mongos --port --configdb localhost:,localhost:,localhost: --logpath mongos/logs/mongos1.log --logappend --fork

配置副本集

按规划,配置启动shard1、shard2、shard3三组副本集

#此处以shard1为例说明配置方法

#启动三个mongod进程

[root@Master cluster2]# mongod --replSet shard1 --dbpath shard1/node1 --logpath shard1/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard1 --dbpath shard1/node2 --logpath shard1/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard1 --dbpath shard1/node3 --logpath shard1/logs/node3.log --logappend --fork --port

#初始化Replica Set:shard1

[root@Master cluster2]# mongo --port
MongoDB shell version: 3.0.
connecting to: 127.0.0.1:/test
> use admin
switched to db admin
> rsconf={"_id" : "shard1","members" : [{"_id" : , "host" : "localhost:10001"}]}
{
"_id" : "shard1",
"members" : [
{
"_id" : ,
"host" : "localhost:10001"
}
]
}
> rs.initiate(rsconf)
{ "ok" : }
shard1:OTHER> rs.add("localhost:10002")
{ "ok" : }
shard1:PRIMARY> rs.add("localhost:10003")
{ "ok" : }
shard1:PRIMARY> rs.conf()
{
"_id" : "shard1",
"version" : ,
"members" : [
{
"_id" : ,
"host" : "localhost:10001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:10002",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:10003",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : ,
"getLastErrorModes" : { },
"getLastErrorDefaults" : {
"w" : ,
"wtimeout" :
}
}
} 

Shard2和shard3同shard1配置副本集

[root@Master cluster2]# mongod --replSet shard2 --dbpath shard2/node1 --logpath shard2/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard2 --dbpath shard2/node2 --logpath shard2/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard2 --dbpath shard2/node3 --logpath shard2/logs/node3.log --logappend --fork --port
[root@Master cluster2]# mongo --port
> use admin
> rsconf={"_id" : "shard2","members" : [{"_id" : , "host" : "localhost:20001"}]}
> rs.initiate(rsconf)
shard2:OTHER> rs.add("localhost:20002")
shard2:PRIMARY> rs.add("localhost:20003")
shard2:PRIMARY> rs.conf()
{
"_id" : "shard2",
"version" : ,
"members" : [
{
"_id" : ,
"host" : "localhost:20001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:20002",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:20003",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : ,
"getLastErrorModes" : { },
"getLastErrorDefaults" : {
"w" : ,
"wtimeout" :
}
}
}

  

[root@Master cluster2]# mongod --replSet shard3 --dbpath shard3/node1 --logpath shard3/logs/node1.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard3 --dbpath shard3/node2 --logpath shard3/logs/node2.log --logappend --fork --port
[root@Master cluster2]# mongod --replSet shard3 --dbpath shard3/node3 --logpath shard3/logs/node3.log --logappend --fork --port
[root@Master cluster2]# mongo --port
connecting to: 127.0.0.1:/test> use admin
> rsconf={"_id" : "shard3","members" : [{"_id" : , "host" : "localhost:30001"}]}
> rs.initiate(rsconf)
shard3:OTHER> rs.add("localhost:30002")
shard3:PRIMARY> rs.add("localhost:30003")
shard3:PRIMARY> rs.conf()
{
"_id" : "shard3",
"version" : ,
"members" : [
{
"_id" : ,
"host" : "localhost:30001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:30002",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
},
{
"_id" : ,
"host" : "localhost:30003",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : ,
"tags" : { },
"slaveDelay" : ,
"votes" :
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : ,
"getLastErrorModes" : { },
"getLastErrorDefaults" : {
"w" : ,
"wtimeout" :
}
}
}

添加(副本集)分片

#连接到mongs,并切换到admin这里必须连接路由节点

[root@Master cluster2]# mongo --port
MongoDB shell version: 3.0.
connecting to: 127.0.0.1:/test
mongos> use admin
switched to db admin
mongos> db.runCommand({"addShard":"shard1/localhost:10001"})
{ "shardAdded" : "shard1", "ok" : }
mongos> db.runCommand({"addShard":"shard2/localhost:20001"})
{ "shardAdded" : "shard2", "ok" : }
mongos> db.runCommand({"addShard":"shard3/localhost:30001"})
{ "shardAdded" : "shard3", "ok" : }
mongos> db.runCommand({listshards:})
{
"shards" : [
{
"_id" : "shard1",
"host" : "shard1/localhost:10001,localhost:10002,localhost:10003"
},
{
"_id" : "shard2",
"host" : "shard2/localhost:20001,localhost:20002,localhost:20003"
},
{
"_id" : "shard3",
"host" : "shard3/localhost:30001,localhost:30002,localhost:30003"
}
],
"ok" :
}

激活db和collections分片

激活数据库分片,命令

> db.runCommand( { enablesharding : "数据库名称"} );

执行以上命令,可以让数据库跨shard,如果不执行这步,数据库只会存放在一个shard

一旦激活数据库分片,数据库中不同的collection将被存放在不同的shard上

但一个collection仍旧存放在同一个shard上,要使单个collection也分片,还需单独对collection作些操作

#如:激活test数据库分片功能,连接mongos进程

[root@Master cluster2]# mongo --port
MongoDB shell version: 3.0.
connecting to: 127.0.0.1:/test
mongos> use admin
switched to db admin
mongos> db.runCommand({"enablesharding":"test"})
{ "ok" : }

要使单个collection也分片存储,需要给collection指定一个分片key,通过以下命令操作:

> db.runCommand( { shardcollection : "集合名称",key : "字段名称"});

注:  a. 分片的collection系统会自动创建一个索引(也可用户提前创建好)

b. 分片的collection只能有一个在分片key上的唯一索引,其它唯一索引不被允许

#对collection:test.yujx分片

mongos> db.runCommand({"shardcollection":"test.yujx","key":{"_id":}})
{ "collectionsharded" : "test.yujx", "ok" : }

生成测试数据

mongos> use test
switched to db test
mongos> for(var i=;i<=;i++) db.yujx.save({"id":i,"a":,"b":,"c":})
WriteResult({ "nInserted" : })
mongos> db.yujx.count()

查看集合分片

mongos> db.yujx.stats()
{
"sharded" : true,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"ns" : "test.yujx",
"count" : ,
"numExtents" : ,
"size" : ,
"storageSize" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"avgObjSize" : ,
"nindexes" : ,
"nchunks" : ,
"shards" : {
"shard1" : {
"ns" : "test.yujx",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d15366716d7504d5d74c4c")
}
},
"shard2" : {
"ns" : "test.yujx",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d1543eabed7d6d4a71d25e")
}
},
"shard3" : {
"ns" : "test.yujx",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d155346f36550e3c5f062c")
}
}
},
"ok" :
}

查看数据库分片

mongos> db.printShardingStatus()
--- Sharding Status ---
sharding version: {
"_id" : ,
"minCompatibleVersion" : ,
"currentVersion" : ,
"clusterId" : ObjectId("55d152a35348652fbc726a10")
}
shards:
{ "_id" : "shard1", "host" : "shard1/localhost:10001,localhost:10002,localhost:10003" }
{ "_id" : "shard2", "host" : "shard2/localhost:20001,localhost:20002,localhost:20003" }
{ "_id" : "shard3", "host" : "shard3/localhost:30001,localhost:30002,localhost:30003" }
balancer:
Currently enabled: yes
Currently running: yes
Balancer lock taken at Sun Aug :: GMT- (PDT) by Master.Hadoop::::Balancer:
Failed balancer rounds in last attempts:
Migration Results for the last hours:
: Success
: Failed with error 'could not acquire collection lock for test.yujx to migrate chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for migrating chunk [{ : MinKey }, { : MaxKey }) in test.yujx is taken.', from shard1 to shard2
databases:
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }
test.yujx
shard key: { "_id" : }
chunks:
shard1
shard2
shard3
{ "_id" : { "$minKey" : } } -->> { "_id" : ObjectId("55d157cca0c90140e33a9342") } on : shard3 Timestamp(, )
{ "_id" : ObjectId("55d157cca0c90140e33a9342") } -->> { "_id" : ObjectId("55d157cca0c90140e33a934a") } on : shard1 Timestamp(, )
{ "_id" : ObjectId("55d157cca0c90140e33a934a") } -->> { "_id" : { "$maxKey" : } } on : shard2 Timestamp(, )

#或者连接mongos的config数据库查询

mongos> use config
switched to db config
mongos> db.shards.find()
{ "_id" : "shard1", "host" : "shard1/localhost:10001,localhost:10002,localhost:10003" }
{ "_id" : "shard2", "host" : "shard2/localhost:20001,localhost:20002,localhost:20003" }
{ "_id" : "shard3", "host" : "shard3/localhost:30001,localhost:30002,localhost:30003" }
mongos> db.databases.find()
{ "_id" : "admin", "partitioned" : false, "primary" : "config" }
{ "_id" : "test", "partitioned" : true, "primary" : "shard1" }
mongos> db.chunks.find()
{ "_id" : "test.yujx-_id_MinKey", "lastmod" : Timestamp(, ), "lastmodEpoch" : ObjectId("55d15738679c4d5f9108eba0"), "ns" : "test.yujx", "min" : { "_id" : { "$minKey" : } }, "max" : { "_id" : ObjectId("55d157cca0c90140e33a9342") }, "shard" : "shard3" }
{ "_id" : "test.yujx-_id_ObjectId('55d157cca0c90140e33a9342')", "lastmod" : Timestamp(, ), "lastmodEpoch" : ObjectId("55d15738679c4d5f9108eba0"), "ns" : "test.yujx", "min" : { "_id" : ObjectId("55d157cca0c90140e33a9342") }, "max" : { "_id" : ObjectId("55d157cca0c90140e33a934a") }, "shard" : "shard1" }
{ "_id" : "test.yujx-_id_ObjectId('55d157cca0c90140e33a934a')", "lastmod" : Timestamp(, ), "lastmodEpoch" : ObjectId("55d15738679c4d5f9108eba0"), "ns" : "test.yujx", "min" : { "_id" : ObjectId("55d157cca0c90140e33a934a") }, "max" : { "_id" : { "$maxKey" : } }, "shard" : "shard2" }

hash分片

MongoDB2.4以上的版本支持基于哈希的分片

mongos> use admin
mongos> db.runCommand({"enablesharding":"mydb"})
mongos> db.runCommand({"shardcollection":"mydb.mycollection","key":{"_id":"hashed"}})
mongos> use mydb
switched to db mydb
mongos> for(i=;i<;i++){ db.mycollection.insert({"Uid":i,"Name":"zhanjindong2","Age":,"Date":new Date()}); }
WriteResult({ "nInserted" : })
mongos> db.mycollection.stats()
{
"sharded" : true,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"ns" : "mydb.mycollection",
"count" : ,
"numExtents" : ,
"size" : ,
"storageSize" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"avgObjSize" : ,
"nindexes" : ,
"nchunks" : ,
"shards" : {
"shard1" : {
"ns" : "mydb.mycollection",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d15366716d7504d5d74c4c")
}
},
"shard2" : {
"ns" : "mydb.mycollection",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d1543eabed7d6d4a71d25e")
}
},
"shard3" : {
"ns" : "mydb.mycollection",
"count" : ,
"size" : ,
"avgObjSize" : ,
"numExtents" : ,
"storageSize" : ,
"lastExtentSize" : ,
"paddingFactor" : ,
"paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : ,
"capped" : false,
"nindexes" : ,
"totalIndexSize" : ,
"indexSizes" : {
"_id_" : ,
"_id_hashed" :
},
"ok" : ,
"$gleStats" : {
"lastOpTime" : Timestamp(, ),
"electionId" : ObjectId("55d155346f36550e3c5f062c")
}
}
},
"ok" :
}

单点故障分析

由于这是为了了解入门mongodb做的实验,而故障模拟太浪费时间,所以这里就不一一列出,关于故障场景分析,可以参考:
http://blog.itpub.net/27000195/viewspace-1404402/

MongoDB健壮集群——用副本集做分片的更多相关文章

  1. 搭建mongodb集群(副本集+分片)

    搭建mongodb集群(副本集+分片) 转载自:http://blog.csdn.net/bluejoe2000/article/details/41323051 完整的搭建mongodb集群(副本集 ...

  2. MongoDB集群搭建-副本集

    MongoDB集群搭建-副本集 概念性的知识,可以参考本人博客地址: 一.Master-Slave方案: 主从: 二.Replica Set方案: 副本集: 步骤:(只要按步骤操作,100%成功) 1 ...

  3. mongodb集群配置副本集

    测试环境 操作系统:CentOS 7.2 最小化安装 主服务器IP地址:192.168.197.21 mongo01 从服务器IP地址:192.168.197.22 mongo02 从服务器IP地址: ...

  4. centos7下安装部署mongodb集群(副本集模式)

    环境需求:Mongodb集群有三种模式:  Replica Set, Sharding,Master-Slaver.  这里部署的是Replica Set模式. 测试环境: 这里副本集(Replica ...

  5. Mongodb集群之副本集

    上篇咱们遗留了几个问题 1主节点是否能自己主动切换连接? 眼下须要手动切换 2主节点读写压力过大怎样解决 3从节点每一个上面的数据都是对数据库全量拷贝,从节点压力会不会过大 4数据压力达到机器支撑不了 ...

  6. MongoDB学习笔记~Mongo集群和副本集

    回到目录 一些概念 对于Mongo在数据容灾上,推荐的模式是使用副本集模式,它有一个对外的主服务器Primary,还有N个副本服务器Secondary(N>=1,当N=1时,需要有一台仲裁服务器 ...

  7. MongoDB集群-主从复制(副本集)、failover

    1.概念 主从复制的目的:数据冗余.备份.读写分离 主从方式:一主一从(不推荐,只能实现复制,主节点挂掉且未重新启动的时候,无法提升从节点为master),一主一从一裁判,一主多从 复制方式:主节点记 ...

  8. MongoDB学习笔记——Replica Set副本集

    副本集 可以将MongoDB中的副本集看作一组服务器集群由一个主节点和多个副本节点等组成,相对于之前讲到的主从复制提供了故障自动转移的功能 副本集实现数据同步的方式依赖于local数据库中的oplog ...

  9. MongoDB高可用集群搭建(主从、分片、路由、安全验证)

    目录 一.环境准备 1.部署图 2.模块介绍 3.服务器准备 二.环境变量 1.准备三台集群 2.安装解压 3.配置环境变量 三.集群搭建 1.新建配置目录 2.修改配置文件 3.分发其他节点 4.批 ...

随机推荐

  1. Go and Beego Development

    1. Beego wiki in en and cn https://beego.me/ For API development: https://beego.me/blog/beego_api 2. ...

  2. 使用IntelliJ IDEA,gradle开发Java web应用步骤

    最近 正在学习gradle构建工具的使用,看了一堆的文档,有点一知半解,索性动作实践一把,在以后的自己的项目中尝试使用看看.目前手头用的是IntelliJ IDEA 14,搭建了一天终于明白怎么集成g ...

  3. springMVC入门程序。使用springmvc实现商品列表的展示。

    1.1 开发环境 本教程使用环境: Jdk:jdk1.7.0_72 Eclipse:mars Tomcat:apache-tomcat-7.0.53 Springmvc:4.1.3 1.2 需求 使用 ...

  4. Warning: Data truncated for column 'xxxx' at row 1

    The problem was that I changed the column's length only in the program.I had to do either change the ...

  5. 使用junit单元测试SpringMvc

    对于有依赖关系的方法,junit测试会有些麻烦,可以用@before @after之类的创建数据库连接,然后进行测试,但是有些太麻烦了. 所以就使用一下这个:org.springframework.t ...

  6. SpringCloud之服务注册-eureka

    类似于DUBBO 的zookeeper, SpringCloud本身提供一套服务注册中心--eureka 与zookeeper的区别在于 1:zookeeper本身就是一个应用,安装即可用:eurek ...

  7. CI框架下的PHP增删改查总结

    controllers下的 cquery.php文件 <?php class CQuery extends Controller { //构造函数 function CQuery() { par ...

  8. Google Tango Java SDK开发:Motion Tracking 运动追踪

    Java API Motion Tracking Tutorial运动追踪教程 This page describes how the Java API handles motion tracking ...

  9. 第二周leetcode

    4/4 这周莫名得忙,一天是做编译,一天是做模式识别作业,(一天刷魔兽皮肤),周末玩了两天,总的来说还是松懈了,大概只做了两天的leetcode,刷了10道题,羞愧羞愧. 决定每次把代码附上在这个总结 ...

  10. Java动态代理(三)——模拟AOP实现

    以下案例模拟AOP实现 目录结构 接口PersonService package com.ljq.service; public interface PersonService { public vo ...