# 13.mongodb配置_副本集_认证授权
# ==================================================================安装 mongodb

tar -zxvf ~/mongodb-linux-x86_64-rhel70-3.4.5.tgz -C /usr/local
mv /usr/local/mongodb-linux-x86_64-rhel70-3.4.5 /usr/local/mongodb-3.4.5
rm -r ~/mongodb-linux-x86_64-rhel70-3.4.5.tgz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$STORM_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $MONGODB_HOME mkdir -p $MONGODB_HOME/{conf,data,pids,logs} vi $MONGODB_HOME/conf/mongod.conf systemLog:
destination: file
logAppend: true
#日志文件存放目录
path: /usr/local/mongodb-3.4.5/logs/mongod.log
storage:
#数据文件存放目录
dbPath: /usr/local/mongodb-3.4.5/data
journal:
enabled: true
processManagement:
#以守护程序的方式启用,即在后台运行
fork: true
pidFilePath: /usr/local/mongodb-3.4.5/pids/mongod.pid
net:
port: 27017 #端口
replication:
replSetName: replSet1
#security:
# authorization: enabled
# keyFile: /usr/local/mongodb-3.4.5/keyfile/mongodb.keyfile scp -r $MONGODB_HOME node2:/usr/local/
scp -r $MONGODB_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $MONGODB_HOME

shutdown -h now
# 快照 mongodb_副本集_集群前

# 副本集配置

# ==================================================================node1 node2 node3
mongod -f $MONGODB_HOME/conf/mongod.conf # ==================================================================任意一台
mongo --port 27017 # 添加副本集配置
> cfg={_id:"replSet1", members:[{_id:1, host:'node1:27017'},{_id:2, host:'node2:27017'}, {_id:3, host:'node3:27017'}]};
> rs.initiate(cfg);
> rs.status();
> exit; # ==================================================================node1
mongo node1:27017/admin replSet1:PRIMARY> rs.status();
replSet1:PRIMARY> use admin;
replSet1:PRIMARY> db.dropUser("admin");
replSet1:PRIMARY> db.createUser({user: "admin",pwd: "Mongo*123456",roles: [{role:"root",db:"admin"}]});
replSet1:PRIMARY> db.auth("admin", "Mongo*123456");
replSet1:PRIMARY> exit # ==================================================================node2
mongo node2:27017/admin replSet1:SECONDARY> rs.status();
replSet1:SECONDARY> exit; # ==================================================================node3
mongo node3:27017/admin replSet1:SECONDARY> rs.status();
replSet1:SECONDARY> exit; # ==================================================================node1
mkdir -p $MONGODB_HOME/keyfile
openssl rand -base64 756 > $MONGODB_HOME/keyfile/mongodb.keyfile
chmod 600 $MONGODB_HOME/keyfile/mongodb.keyfile # ==================================================================node2 node3
mkdir -p $MONGODB_HOME/keyfile # ==================================================================node1
# 开启了keyFile,隐含就开启了auth,这个时候连接副本集就需要进行认证了
# 重启Mongo服务 scp -r $MONGODB_HOME/keyfile/mongodb.keyfile node2:$MONGODB_HOME/keyfile/
scp -r $MONGODB_HOME/keyfile/mongodb.keyfile node3:$MONGODB_HOME/keyfile/ # ==================================================================node2 node3
chmod 600 $MONGODB_HOME/keyfile/mongodb.keyfile # ==================================================================node1 node2 node3
vi $MONGODB_HOME/conf/mongod.conf security:
authorization: enabled
keyFile: /usr/local/mongodb-3.4.5/keyfile/mongodb.keyfile # ==================================================================node1 node2 node3
# 先从再主 先node3 node2 最后node1
mongod -f $MONGODB_HOME/conf/mongod.conf --shutdown mongod -f $MONGODB_HOME/conf/mongod.conf mongo node1:27017 > use admin;
> show dbs;
# 此刻会报错误,没有授权 > exit; # ==================================================================node1
mongo node1:27017/admin -u admin -p Mongo*123456 replSet1:PRIMARY> rs.status();
# > use logs;
# > db.dropUser("loguser");
# > db.createUser({user: "loguser",pwd: "Mongo*123456",roles: [{role: "readWrite", db: "logs"}]});
# > db.auth("loguser", "Mongo*123456"); # > db.createUser({user: "loguser",pwd: "Mongo*123456",roles: [{role : "readWrite",db : "logs"},{role : "readWrite",db : "school"}]});
# > db.updateUser({user: "loguser",pwd: "Mongo*123456",roles: [{role : "readWrite",db : "logs"},{role : "readWrite",db : "school"}]}) > use school;
> db.dropUser("schooluser");
> db.createUser({user: "schooluser",pwd: "Mongo*123456",roles: [{role: "readWrite", db: "school"}]});
> db.auth("schooluser", "Mongo*123456");
> exit; mongo node1:27017/school -u schooluser -p Mongo*123456 > use school;
> db.student.insert({name:"Tom", age:16})
db.student.insert({name:"Jerry", age:15})
db.student.insert({name:"Mary", age:9}); > db.student.find(); # ==================================================================node2
mongo node2:27017/school -u schooluser -p Mongo*123456 replSet1:SECONDARY> db.student.find();
Error: error: {
"ok" : 0,
"errmsg" : "not master and slaveOk=false",
"code" : 13435,
"codeName" : "NotMasterNoSlaveOk"
} # 发生上述错误是因为mongodb默认读写都是在Primary上进行的,副本节点不允许读写,可以使用如下命令来允许副本读:
> db.getMongo().setSlaveOk(); replSet1:SECONDARY> db.student.find();
{ "_id" : ObjectId("59d98fde9740291fac4998fb"), "name" : "Tom", "age" : 16 }
{ "_id" : ObjectId("59d98fe69740291fac4998fc"), "name" : "Jerry", "age" : 15 }
{ "_id" : ObjectId("59d98fed9740291fac4998fd"), "name" : "Mary", "age" : 9 } > exit; # ==================================================================node3
mongo node3:27017/school -u schooluser -p Mongo*123456 replSet1:SECONDARY> db.student.find();
Error: error: {
"ok" : 0,
"errmsg" : "not master and slaveOk=false",
"code" : 13435,
"codeName" : "NotMasterNoSlaveOk"
} # 发生上述错误是因为mongodb默认读写都是在Primary上进行的,副本节点不允许读写,可以使用如下命令来允许副本读:
> db.getMongo().setSlaveOk(); replSet1:SECONDARY> db.student.find();
{ "_id" : ObjectId("59d98fde9740291fac4998fb"), "name" : "Tom", "age" : 16 }
{ "_id" : ObjectId("59d98fe69740291fac4998fc"), "name" : "Jerry", "age" : 15 }
{ "_id" : ObjectId("59d98fed9740291fac4998fd"), "name" : "Mary", "age" : 9 } > exit; # ==================================================================node3 node2 node1
# 先从再主 先node3 node2 最后node1
mongod -f $MONGODB_HOME/conf/mongod.conf --shutdown shutdown -h now
# 快照 mongodb_副本集_安全认证

# 另一种方式

# 13.mongodb配置_副本集_分片_认证授权

# ==================================================================规划

# 端口分配:
mongos: 27017
config: 27000
shard1: 27001
shard2: 27002
shard3: 27003

# ==================================================================安装 mongodb

tar -zxvf ~/mongodb-linux-x86_64-rhel70-3.4.5.tgz -C /usr/local
mv /usr/local/mongodb-linux-x86_64-rhel70-3.4.5 /usr/local/mongodb-3.4.5
rm -r ~/mongodb-linux-x86_64-rhel70-3.4.5.tgz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$SPARK_HOME/sbin:$STORM_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $MONGODB_HOME

# ==================================================================node1

mkdir -p $MONGODB_HOME/{conf,data,pids,logs}

vi $MONGODB_HOME/conf/mongod.conf

systemLog:
destination: file
logAppend: true
#日志文件存放目录
path: /usr/local/mongodb-3.4.5/logs/mongod.log
storage:
#数据文件存放目录
dbPath: /usr/local/mongodb-3.4.5/data
journal:
enabled: true
processManagement:
#以守护程序的方式启用,即在后台运行
fork: true
pidFilePath: /usr/local/mongodb-3.4.5/pids/mongod.pid
net:
port: 27017 #端口
replication:
replSetName: replSet1
#security:
# authorization: enabled
# keyFile: /usr/local/mongodb-3.4.5/keyfile/mongodb.keyfile scp -r $MONGODB_HOME node2:/usr/local/
scp -r $MONGODB_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
cd ~
source /etc/profile # 查看配置结果
echo $MONGODB_HOME

# 分别在每台机器建立conf、mongos、config、shard1、shard2、shard3六个目录,因为mongos不存储数据,只需要建立日志文件目录即可。

mkdir -p $MONGODB_HOME/conf
mkdir -p $MONGODB_HOME/mongos/logs
mkdir -p $MONGODB_HOME/config/{data,logs}
mkdir -p $MONGODB_HOME/shard1/{data,logs}
mkdir -p $MONGODB_HOME/shard2/{data,logs}
mkdir -p $MONGODB_HOME/shard3/{data,logs} # 配置文件内容
vi $MONGODB_HOME/conf/config.conf pidfilepath = /usr/local/mongodb-3.4.5/config/logs/configsvr.pid
dbpath = /usr/local/mongodb-3.4.5/config/data
logpath = /usr/local/mongodb-3.4.5/config/logs/configsvr.log
logappend = true
bind_ip = 0.0.0.0
port = 27000
fork = true
configsvr = true
#副本集名称
replSet=configs
#设置最大连接数
maxConns=20000
# 安全认证机制
# keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
# 开启用户认证
# auth = true # ==================================================================node1 node2 node3
# 设置第一个分片副本集
vi $MONGODB_HOME/conf/shard1.conf pidfilepath = /usr/local/mongodb-3.4.5/shard1/logs/shard1.pid
dbpath = /usr/local/mongodb-3.4.5/shard1/data
logpath = /usr/local/mongodb-3.4.5/shard1/logs/shard1.log
logappend = true
bind_ip = 0.0.0.0
port = 27001
fork = true
#打开web监控
httpinterface = true
rest = true
#副本集名称
replSet = shard1
shardsvr = true
#设置最大连接数
maxConns = 20000
# 安全认证机制
# keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
# 开启用户认证
# auth = true # ==================================================================node1 node2 node3
设置第二个分片副本集
vi $MONGODB_HOME/conf/shard2.conf pidfilepath = /usr/local/mongodb-3.4.5/shard2/logs/shard2.pid
dbpath = /usr/local/mongodb-3.4.5/shard2/data
logpath = /usr/local/mongodb-3.4.5/shard2/logs/shard2.log
logappend = true
bind_ip = 0.0.0.0
port = 27002
fork = true
# 打开web监控
httpinterface = true
rest = true
# 副本集名称
replSet = shard2
shardsvr = true
#设置最大连接数
maxConns = 20000
# 安全认证机制
# keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
# 开启用户认证
# auth = true # 设置第三个分片副本集
# ==================================================================node1 node2 node3
vi $MONGODB_HOME/conf/shard3.conf pidfilepath = /usr/local/mongodb-3.4.5/shard3/logs/shard3.pid
dbpath = /usr/local/mongodb-3.4.5/shard3/data
logpath = /usr/local/mongodb-3.4.5/shard3/logs/shard3.log
logappend = true
bind_ip = 0.0.0.0
port = 27003
fork = true
# 打开web监控
httpinterface=true
rest=true
# 副本集名称
replSet=shard3
shardsvr = true
# 设置最大连接数
maxConns=20000
# 安全认证机制
# keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
# 开启用户认证
# auth = true # ==================================================================node1 node2 node3
# 配置路由服务器 mongos
# 先启动配置服务器和分片服务器,后启动路由实例启动路由实例:(三台机器)
vi $MONGODB_HOME/conf/mongos.conf pidfilepath = /usr/local/mongodb-3.4.5/mongos/logs/mongos.pid
logpath = /usr/local/mongodb-3.4.5/mongos/logs/mongos.log
logappend = true
bind_ip = 0.0.0.0
port = 27017
fork = true
#监听的配置服务器,只能有1个或者3个 configs为配置服务器的副本集名字
configdb = configs/node1:27000,node2:27000,node3:27000
#设置最大连接数
maxConns=20000
# 安全认证机制
# keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file # ==================================================================node1
scp -r $MONGODB_HOME node2:/usr/local/ scp -r $MONGODB_HOME node3:/usr/local/ # 查看配置结果
echo $MONGODB_HOME shutdown -h now
# 快照 mongodb集群前

# 启动

# 启动config server
# ==================================================================node1 node2 node3
mongod -f $MONGODB_HOME/conf/config.conf # ==================================================================任意一台
# 登录任意一台配置服务器,初始化配置副本集
mongo --port 27000 > config={_id:"configs",members:[{_id:1,host:"node1:27000"},{_id:2,host:"node2:27000"},{_id:3,host:"node3:27000"}]}
> rs.initiate(config)
# 其中 "_id":"configs" 应与配置文件中配置的 replicaction.replSetName 一致,"members" 中的 "host" 为三个节点的 ip 和 port # 启动shard1 server
# ==================================================================node1 node2 node3
mongod -f $MONGODB_HOME/conf/shard1.conf # ==================================================================任意一台
# 登陆任意一台服务器,初始化副本集
mongo --port 27001 #定义副本集配置,第三个节点的 "arbiterOnly":true 代表其为仲裁节点。
> config={_id:"shard1",members:[{_id:1,host:"node1:27001"},{_id:2,host:"node2:27001"},{_id:3,host:"node3:27001",arbiterOnly:true}]}
> rs.initiate(config); # 启动shard2 server
# ==================================================================node1 node2 node3
mongod -f $MONGODB_HOME/conf/shard2.conf # ==================================================================任意一台
mongo --port 27002 > config={_id:"shard2",members:[{_id:1,host:"node1:27002",arbiterOnly:true},{_id:2,host:"node2:27002"},{_id:3,host:"node3:27002"}]}
> rs.initiate(config); # 启动shard3 server
# ==================================================================node1 node2 node3
mongod -f $MONGODB_HOME/conf/shard3.conf # ==================================================================任意一台
# 登陆任意一台服务器,初始化副本集
mongo --port 27003 > config={_id:"shard3",members:[{_id:1,host:"node1:27003"},{_id:2,host:"node2:27003",arbiterOnly:true},{_id:3,host:"node3:27003"}]}
> rs.initiate(config); # ==================================================================node1 node2 node3
# 启动三台服务器的mongos server
mongos -f $MONGODB_HOME/conf/mongos.conf # 启用分片
# 目前搭建了mongodb配置服务器、路由服务器,各个分片服务器,不过应用程序连接到mongos路由服务器并不能使用分片机制,还需要在程序里设置分片配置,让分片生效。 # ==================================================================任意一台
# 登陆任意一台mongos
mongo --port 27017 #使用admin数据库
> use admin; # 串联路由服务器与分配副本集
> sh.addShard("shard1/node1:27001,node2:27001,node3:27001")
> sh.addShard("shard2/node1:27002,node2:27002,node3:27002")
> sh.addShard("shard3/node1:27003,node2:27003,node3:27003") #查看分片服务器的配置
> db.runCommand({listshards:1}); # 查看集群状态
> sh.status(); # 测试
# 目前配置服务、路由服务、分片服务、副本集服务都已经串联起来了,但我们的目的是希望插入数据,数据能够自动分片。连接在mongos上,准备让指定的数据库、指定的集合分片生效。
# 指定test分片生效
> db.runCommand({enablesharding:"test"}); # 指定数据库里需要分片的集合和片键
# 设置分片的集合名称。且必须指定Shard Key,系统会自动创建索引,然后根据这个shard Key来计算
> db.runCommand({shardcollection:"test.table1",key:{_id:"hashed"}}); # 我们设置test的 table1 表需要分片,根据 id 自动分片到 shard1 ,shard2,shard3 上面去。要这样设置是因为不是所有mongodb 的数据库和表 都需要分片
#使用test
> use test; #插入测试数据
> for (var i = 1; i <= 5000; i++) db.table1.save({id:i,"field1":"testval"+i});
# WriteResult({ "nInserted" : 1 }) > db.table1.find().count(); > db.table1.find().limit(5); # 查看分片
> db.table1.stats(); # 可以看到数据分到3个分片,表示已经成功了 #默认是从主节点读写数据,副本节点上不允许读,需要设置副本节点可以读
repset:SECONDARY> db.getMongo().setSlaveOk();
repset:SECONDARY> show tables; # 查看机器 主 从 仲裁
mongo --host node1 --port 27001
mongo --host node2 --port 27001
mongo --host node3 --port 27001 mongo --host node1 --port 27002
mongo --host node2 --port 27002
mongo --host node3 --port 27002 mongo --host node1 --port 27003
mongo --host node2 --port 27003
mongo --host node3 --port 27003 shutdown -h now
# 快照 mongodb未安全认证

# 开始安全认证

mongod -f $MONGODB_HOME/conf/config.conf

mongod -f $MONGODB_HOME/conf/shard1.conf

mongod -f $MONGODB_HOME/conf/shard2.conf

mongod -f $MONGODB_HOME/conf/shard3.conf

mongos -f $MONGODB_HOME/conf/mongos.conf

mongo --port 27017

# 添加用户 注意一定要使用admin数据库
> use admin;
> db.dropUser("admin");
> db.createUser({user: "admin",pwd: "Mongo*123",roles: [{role:"root",db:"admin"}]});
> db.auth("admin", "Mongo*123");
> exit; # ==================================================================node1
# 生成密钥文件。
# 在keyFile身份验证中,副本集中的每个mongod实例都使用keyFile的内容作为共享密码,只有具有正确密钥文件的mongod或者mongos实例可以连接到副本集。密钥文件的内容必须在6到1024个字符之间,并且在unix/linux系统中文件所有者必须有对文件至少有读的权限。
# 可以用任何方式生成密钥文件例如:
#随机生成keyFile或者手动写入,key的长度必须是6-1024的base64字符,unix下必须相同组权限,windows下不需要
mkdir -p $MONGODB_HOME/keyfile
openssl rand -base64 756 > $MONGODB_HOME/keyfile/keyFile.file chmod 600 $MONGODB_HOME/keyfile/keyFile.file # ==================================================================node2
mkdir -p $MONGODB_HOME/keyfile # ==================================================================node3
mkdir -p $MONGODB_HOME/keyfile # ==================================================================node1
# 第一条命令是生成密钥文件,第二条命令是使用chmod更改文件权限,为文件所有者提供读权限
# 将密钥复制到集群中的每台机器指定位置
scp -r $MONGODB_HOME/keyfile/keyFile.file root@node2:$MONGODB_HOME/keyfile scp -r $MONGODB_HOME/keyfile/keyFile.file root@node3:$MONGODB_HOME/keyfile # ==================================================================node2
chmod 600 $MONGODB_HOME/keyfile/keyFile.file # ==================================================================node3
chmod 600 $MONGODB_HOME/keyfile/keyFile.file # ==================================================================node1
# 依次在每台机器上的mongod(注意是所有的mongod不是mongos)的配置文件中加入下面一段配置。
# config server,shard1,shard2,shard3都加入下面的配置文件
# 配置文件内容
vi $MONGODB_HOME/conf/config.conf
keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
auth = true vi $MONGODB_HOME/conf/shard1.conf
keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
auth = true vi $MONGODB_HOME/conf/shard2.conf
keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
auth = true vi $MONGODB_HOME/conf/shard3.conf
keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file
auth = true vi $MONGODB_HOME/conf/mongos.conf
keyFile = /usr/local/mongodb-3.4.5/keyfile/keyFile.file scp -r $MONGODB_HOME/conf/config.conf root@node2:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/shard1.conf root@node2:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/shard2.conf root@node2:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/shard3.conf root@node2:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/mongos.conf root@node2:$MONGODB_HOME/conf scp -r $MONGODB_HOME/conf/config.conf root@node3:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/shard1.conf root@node3:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/shard2.conf root@node3:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/shard3.conf root@node3:$MONGODB_HOME/conf
scp -r $MONGODB_HOME/conf/mongos.conf root@node3:$MONGODB_HOME/conf # ==================================================================node3 node2 node1
# 先从再主
mongod -f $MONGODB_HOME/conf/config.conf --shutdown
mongod -f $MONGODB_HOME/conf/shard1.conf --shutdown
mongod -f $MONGODB_HOME/conf/shard2.conf --shutdown
mongod -f $MONGODB_HOME/conf/shard3.conf --shutdown # ==================================================================node1 node2 node3
# mongodb的启动顺序是,先启动配置服务器,在启动分片,最后启动mongos.
mongod -f $MONGODB_HOME/conf/config.conf mongod -f $MONGODB_HOME/conf/shard1.conf mongod -f $MONGODB_HOME/conf/shard2.conf mongod -f $MONGODB_HOME/conf/shard3.conf mongos -f $MONGODB_HOME/conf/mongos.conf mongo --port 27017
> use admin;
> show dbs;
# 此刻会报错误,没有授权 > exit; # ==================================================================node1
mongo node1:27017/admin -u admin -p Mongo*123 > show dbs;
> db.runCommand({enablesharding:"logs"});
> db.runCommand({shardcollection:"logs.logList",key:{id:1}}) > use logs;
> db.dropUser("loguser");
> db.createUser({user:"loguser",pwd:"Mongo*123",roles:[{role:"readWrite",db:"logs"}]});
> db.auth("loguser","Mongo*123");
> db.logList.ensureIndex({key: -1});
# > db.logList.ensureIndex({DateTime: -1});
# > db.logList.ensureIndex({PrimaryData: -1});
> exit; mongo node2:27017/logs -u loguser -p Mongo*123 ...SECONDARY> db.logList.find().count();
Error: error: {
"ok" : 0,
"errmsg" : "not master and slaveOk=false",
"code" : 13435,
"codeName" : "NotMasterNoSlaveOk"
} # 发生上述错误是因为mongodb默认读写都是在Primary上进行的,副本节点不允许读写,可以使用如下命令来允许副本读:
> db.getMongo().setSlaveOk(); ...SECONDARY> db.student.find(); mongo node1:27017/logs -u loguser -p Mongo*123 > use logs;
> show tables;
> db.logList.find().count(); #插入测试数据
> for (var i = 1; i <= 5000; i++) db.logList.save({id:i,"field1":"testval" + i});
# WriteResult({ "nInserted" : 1 }) > db.logList.find().count(); > db.logList.find().limit(5); # 查看分片
> db.logList.stats(); > db.logList.find().skip(0).limit(5);
> db.logList.remove({}); shutdown -h now
# 快照 mongodb安全认证

hadoop生态搭建(3节点)-13.mongodb配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  3. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  4. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  5. hadoop生态搭建(3节点)-10.spark配置

    # https://www.scala-lang.org/download/2.12.4.html# ================================================= ...

  6. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  7. hadoop生态搭建(3节点)-17.sqoop配置_单节点

    # ==================================================================安装 sqoop tar -zxvf ~/sqoop-1.4.7 ...

  8. hadoop生态搭建(3节点)-03.zookeeper配置

    # https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html # ===== ...

  9. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

随机推荐

  1. Linux 虚拟机虚拟网卡问题导致无法连接问题

    问题描述 当 Linux 虚拟机启动时,通过串口输出或者启动日志, 观察到虚拟网卡启动或者初始化故障, 导致虚拟机无法连接. 问题分析 常见的超时报错范例如下: CentOS 复制 Bringing ...

  2. [问题记录]libpomelo编译报错:ssize_t重定义

    1. 时间:2015/01/16 描述:添加libpomelo到cocos2dx项目,报错如下图所示: 解决: 修改代码,源代码: #if !defined(_SSIZE_T_) && ...

  3. css z-index层重叠顺序

    一.z-index语法与结构 z-index 跟具体数字 如:div{z-index:100}注意:z-index的数值不跟单位. z-index的数字越高越靠前,并且值必须为整数和正数(正数的整数) ...

  4. Kafka与MQ的区别

    作为消息队列来说,企业中选择mq的还是多数,因为像Rabbit,Rocket等mq中间件都属于很成熟的产品,性能一般但可靠性较强, 而kafka原本设计的初衷是日志统计分析,现在基于大数据的背景下也可 ...

  5. SAP CRM One Order跟踪和日志工具CRMD_TRACE_SET

    事务码CRMD_TRACE_SET激活跟踪模式: 在跟踪模式下运行One Order场景.运行完毕后,使用事务码CRMD_TRACE_EVAL: 双击参数,就能看到参数明细: 点Callstack也能 ...

  6. [零基础学JAVA]Java SE面向对象部分.面向对象基础(01)

    在对象的内存分配上与c++有区别: C++:#include <iostream>#include <string> class Person{    private:     ...

  7. 如何写Paper

    如何写文章,如何写好文章,是每一个科研工作者想弄懂或者已经弄懂了的问题.剑桥大学某研究人员分享了他的写作思路. 我从该视频中学到了以下几点经验: 正确的顺序是:Idea——>Write——> ...

  8. 解决IntelliJ IDEA控制台输出中文乱码问题

    一.原因: windows下的本地编码(GBK) ,在idea 整合的 maven中使得默认vm 的编码是utf-8.所以出现控制台乱码.tomcat设置的utf-8也会与window设置的gbk冲突 ...

  9. 如何用C#对Gridview的项目进行汇总统计?

    上一次用了javascript对gridview进行了汇总统计,但那个统计是在客户端进行的,虽然减轻了服务器的负担,但是,当需要把统计信息汇出excel时,汇总信息却死活不出来了,所以,绕半天又绕回来 ...

  10. 【题解】洛谷P1514 [NOIP2010TG] 引水入城(DFS+DP)

    次元传送门:洛谷P1514 思路 可以证明如果有解 那么每个蓄水池可以覆盖到的干旱区必定是线段 证明: 举个栗子 8 9 8 7 9 7 6 9 6 明显到不了中间的点 如果不是连续的线段 中间肯定有 ...