如果之前没有安装jdk和zookeeper,安装了的请直接跳过

https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html

# ==================================================================安装 jdk

mkdir -p /usr/java
tar -zxvf ~/jdk-8u111-linux-x64.tar.gz -C /usr/java
rm -r ~/jdk-8u111-linux-x64.tar.gz

http://archive.apache.org/dist/zookeeper/

# ==================================================================安装 zookeeper
# zookeeper集群搭建要至少3台服务器,服务器都要部署zookeeper

tar -zxvf ~/zookeeper-3.4.12.tar.gz -C /usr/local
rm -r ~/zookeeper-3.4.12.tar.gz

# https://www.scala-lang.org/download/2.12.4.html
# ==================================================================安装 scala

tar -zxvf ~/scala-2.12.4.tgz -C /usr/local
rm –r ~/scala-2.12.4.tgz

# http://archive.apache.org/dist/kafka/0.10.2.1/

# ==================================================================安装 kafka

tar -zxvf ~/kafka_2.12-0.10.2.1.tgz -C /usr/local
rm –r ~/kafka_2.12-0.10.2.1.tgz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加
export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $KAFKA_HOME

# ==================================================================node2 node3

mkdir -p /usr/java

# ==================================================================node1

# jdk

# zookeeper

# https://www.cnblogs.com/zcf5522/p/9753539.html

# scala
scp -r $SCALA_HOME node2:/usr/local/
scp -r $SCALA_HOME node3:/usr/local/ # ==================================================================配置 kafka
mkdir $KAFKA_HOME/logs # 配置 kafka server.properties
vi $KAFKA_HOME/config/server.properties broker.id=1 delete.topic.enable=true
auto.create.topics.enable=true listeners=PLAINTEXT://node1:9092
#listeners=SASL_PLAINTEXT://node1:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # 配置ACL入口类
#authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 本例使用SASL_PLAINTEXT
#security.inter.broker.protocol=SASL_PLAINTEXT
#sasl.mechanism.inter.broker.protocol=PLAIN
#sasl.enabled.mechanisms=PLAIN
# 设置本例中admin为超级用户
#super.users=User:admin;User:kafka scp -r $KAFKA_HOME node2:/usr/local/
scp -r $KAFKA_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $KAFKA_HOME

# ==================================================================node2

vi $KAFKA_HOME/config/server.properties

broker.id=2
listeners=PLAINTEXT://node2:9092
#listeners=SASL_PLAINTEXT://node2:9092

# ==================================================================node3

vi $KAFKA_HOME/config/server.properties

broker.id=3
listeners=PLAINTEXT://node3:9092
#listeners=SASL_PLAINTEXT://node3:9092

# kafka集群启动

# ==================================================================node1 node2 node3
# 启动zookeeper
zkServer.sh start
zkServer.sh status
# zkServer.sh stop # ==================================================================node1
# 需要手动在ZooKeeper中创建路径/kafka,使用如下命令连接到任意一台ZooKeeper服务器
zkCli.sh create /kafka '' # ==================================================================node1 node2 node3
# 启动 kafka
kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
# kafka-server-start.sh $KAFKA_HOME/config/server.properties & # 验证启动进程
jps # ==================================================================kafka集群测试1
# kafka-topics.sh --delete --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic1
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 1 --topic clustertopic1 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181,node2:2181,node3:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic1 # 生产消息
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic clustertopic1
> This is a message
> This is another message # 消费消息
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --from-beginning --topic clustertopic1 # 停止
kafka-server-stop.sh
zkServer.sh stop # ==================================================================kafka集群测试2
# kafka-topics.sh --delete --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic2
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 3 --topic clustertopic2 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic clustertopic2 # 生产消息 在一台服务器上创建一个生产者
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic clustertopic2
> This is a message
> This is another message # 消费消息,在另外一个节点启动消费进程
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --from-beginning --topic clustertopic2 # 停止
kafka-server-stop.sh
zkServer.sh stop shutdown -h now
# 快照 kafka

# 以下配置可以暂时不用配置

# ==================================================================使用 SASL/PLAIN 认证

# ==================================================================node1
# 配置kafka server端(每个broker)
vi $KAFKA_HOME/config/server.properties broker.id=1 delete.topic.enable=true
auto.create.topics.enable=true #listeners=PLAINTEXT://node1:9092
listeners=SASL_PLAINTEXT://node1:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # SASL机制
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.enabled.mechanisms=PLAIN # 完成身份验证的类
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 如果没有找到ACL(访问控制列表)配置,则允许任何操作。
allow.everyone.if.no.acl.found=true # 设置admin为超级用户
super.users=User:admin;User:kafka # ==================================================================node2
vi $KAFKA_HOME/config/server.properties broker.id=2 delete.topic.enable=true
auto.create.topics.enable=true #listeners=PLAINTEXT://node2:9092
listeners=SASL_PLAINTEXT://node2:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # SASL机制
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.enabled.mechanisms=PLAIN # 完成身份验证的类
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 如果没有找到ACL(访问控制列表)配置,则允许任何操作。
allow.everyone.if.no.acl.found=true # 设置admin为超级用户
super.users=User:admin;User:kafka # ==================================================================node3
vi $KAFKA_HOME/config/server.properties broker.id=3 delete.topic.enable=true
auto.create.topics.enable=true #listeners=PLAINTEXT://node3:9092
listeners=SASL_PLAINTEXT://node3:9092 log.dirs=/usr/local/kafka_2.12-0.10.2.1/logs zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka # SASL机制
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.enabled.mechanisms=PLAIN # 完成身份验证的类
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
# 如果没有找到ACL(访问控制列表)配置,则允许任何操作。
allow.everyone.if.no.acl.found=true # 设置admin为超级用户
super.users=User:admin;User:kafka # ==================================================================node1
# user_userName必须配置admin用户
vi $KAFKA_HOME/config/kafka_server_jaas.conf KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="Admin*123456"
user_admin="Admin*123456"
user_kafka="Kafka*123456"
user_producer="Producer*123456"
user_consumer="Consumer*123456";
}; # 这个配置定义了3个用户(kafka、producer和consumer)。在KafkaServer部分,username和password是broker用于初始化连接到其他的broker,
# 在这个例子中,kafka用户为broker间的通讯,useruserName定义了所有连接到broker和broker验证的所有的客户端连接包括其他broker的用户密码。
# useruserName必须配置admin用户,否则报错(useruserName必须配置kafka用户未验证) # JAAS文件作为每个broker的jvm参数,在kafka-server-start.sh脚本中增加如下配置
vi $KAFKA_HOME/bin/kafka-server-start.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_server_jaas.conf"
fi # 配置kafka client端 PLAIN机制下kafka生产者/消费者如何生产/消费
# kafka_client_jaas 可以分别建立不同如 producer_jaas.conf 和 consumer_jaas.conf
vi $KAFKA_HOME/config/kafka_client_jaas.conf KafkaClient {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="kafka"
password="Kafka*123456";
}; # vi $KAFKA_HOME/config/producer_jaas.conf
#KafkaClient {
# org.apache.kafka.common.security.plain.PlainLoginModule required
# username="producer"
# password="Producer*123456";
#}; #vi $KAFKA_HOME/config/consumer_jaas.conf
#KafkaClient {
# org.apache.kafka.common.security.plain.PlainLoginModule required
# username="consumer"
# password="Consumer*123456";
#}; # KafkaClient部分,username和password是客户端用来配置客户端连接broker的用户,在这个例子中,客户端使用kafka 用户连接到broker。
# 修改 producer 和 consumer 的配置文件(配置注意空格不然报错)
vi $KAFKA_HOME/config/producer.properties security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN vi $KAFKA_HOME/config/consumer.properties security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN # 在启动 producer 和 consumer 时,分别添加jvm参数
vi $KAFKA_HOME/bin/kafka-console-consumer.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_client_jaas.conf"
fi vi $KAFKA_HOME/bin/kafka-console-producer.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_client_jaas.conf"
fi # 在启动 producer 和 consumer 时,分别添加jvm参数
#vi $KAFKA_HOME/bin/kafka-console-producer.sh
#if [ "x$KAFKA_OPTS" = "x" ]; then
# export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/producer_jaas.conf"
#fi #vi $KAFKA_HOME/bin/kafka-console-consumer.sh
#if [ "x$KAFKA_OPTS" = "x" ]; then
# export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/consumer_jaas.conf"
#fi # ==================================================================node1
scp -r $KAFKA_HOME/config/kafka_server_jaas.conf node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/kafka_client_jaas.conf node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/producer.properties node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/consumer.properties node2:$KAFKA_HOME/config/ scp -r $KAFKA_HOME/bin/kafka-server-start.sh node2:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-consumer.sh node2:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-producer.sh node2:$KAFKA_HOME/bin/ scp -r $KAFKA_HOME/config/kafka_server_jaas.conf node3:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/kafka_client_jaas.conf node3:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/producer.properties node3:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/consumer.properties node3:$KAFKA_HOME/config/ scp -r $KAFKA_HOME/bin/kafka-server-start.sh node3:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-consumer.sh node3:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/kafka-console-producer.sh node3:$KAFKA_HOME/bin/ # 启动
# ==================================================================node1 node2 node3
zkServer.sh start zkServer.sh status kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties # ==================================================================node1
# kafka-topics.sh --delete --zookeeper node1:2181/kafka --topic sasltopic1
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181/kafka --replication-factor 1 --partitions 1 --topic sasltopic1 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181/kafka --topic sasltopic1 # 生产者授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181/kafka --add --allow-principal User:kafka --operation Write --topic sasltopic1 # 生产者
kafka-console-producer.sh --broker-list node1:9092 --topic sasltopic1 --producer.config $KAFKA_HOME/config/producer.properties
> This is a message
> This is another message # 消费者 主题Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181/kafka --add --allow-principal User:kafka --operation Read --topic sasltopic1 # 消费者 组Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181/kafka --add --allow-principal User:kafka --operation Read --group sasltopic1group1 # 消费者
kafka-console-consumer.sh --bootstrap-server node2:9092 --topic sasltopic1 --from-beginning --consumer.config $KAFKA_HOME/config/consumer.properties # 停止
kafka-server-stop.sh
zkServer.sh stop # 启动
# ==================================================================node1 node2 node3
zkServer.sh start zkServer.sh status kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties # ==================================================================node1
# kafka-topics.sh --delete --zookeeper node1:2181/kafka --topic sasltopic2
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 3 --topic sasltopic2 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic sasltopic2 # 生产者授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Write --topic sasltopic2 # 生产者
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic sasltopic2 --producer.config $KAFKA_HOME/config/producer.properties
> This is a message
> This is another message # 消费者 主题Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --topic sasltopic2 # 消费者 组Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --group sasltopic2group2 # 消费者
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --topic sasltopic2 --from-beginning --consumer.config $KAFKA_HOME/config/consumer.properties # 停止
kafka-server-stop.sh
zkServer.sh stop shutdown -h now
# 快照 kafka_saslplain

# ==================================================================多节点zookeeper下认证 SASL认证配置

# ==================================================================node1
vi $ZOOKEEPER_HOME/conf/zoo.cfg authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000 # ==================================================================node2
vi $ZOOKEEPER_HOME/conf/zoo.cfg authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000 # ==================================================================node3
vi $ZOOKEEPER_HOME/conf/zoo.cfg authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000 # ==================================================================node1
vi $KAFKA_HOME/config/kafka_zoo_jaas.conf ZKServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="kafka"
password="Kafka*123456"
user_kafka="Kafka*123456";
}; vi $KAFKA_HOME/bin/zookeeper-server-start.sh if [ "x$KAFKA_OPTS" = "x" ]; then
export KAFKA_OPTS="-Djava.security.auth.login.config=/usr/local/kafka_2.12-0.10.2.1/config/kafka_zoo_jaas.conf -Dzookeeper.sasl.serverconfig=ZKServer"
fi scp -r $KAFKA_HOME/config/kafka_zoo_jaas.conf node2:$KAFKA_HOME/config/
scp -r $KAFKA_HOME/config/kafka_zoo_jaas.conf node3:$KAFKA_HOME/config/ scp -r $KAFKA_HOME/bin/zookeeper-server-start.sh node2:$KAFKA_HOME/bin/
scp -r $KAFKA_HOME/bin/zookeeper-server-start.sh node3:$KAFKA_HOME/bin/ # 启动
# ==================================================================node1 node2 node3
zkServer.sh start zkServer.sh status kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties # ==================================================================node1
# kafka-topics.sh --delete --zookeeper node1:2181/kafka --topic sasltopic3
# 创建topic
kafka-topics.sh --create --zookeeper node1:2181,node2:2181,node3:2181/kafka --replication-factor 3 --partitions 3 --topic sasltopic3 # 查看topic列表
kafka-topics.sh --list --zookeeper node1:2181/kafka # 查询topic状态
kafka-topics.sh --describe --zookeeper node1:2181,node2:2181,node3:2181/kafka --topic sasltopic3 # 生产者授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Write --topic sasltopic3 # 生产者
kafka-console-producer.sh --broker-list node1:9092,node2:9092,node3:9092 --topic sasltopic3 --producer.config $KAFKA_HOME/config/producer.properties
> This is a message
> This is another message # 消费者 主题Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --topic sasltopic3 # 消费者 组Read授权
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=node1:2181,node2:2181,node3:2181/kafka --add --allow-principal User:kafka --operation Read --group sasltopic3group3 # 消费者
kafka-console-consumer.sh --bootstrap-server node1:9092,node2:9092,node3:9092 --topic sasltopic3 --from-beginning --consumer.config $KAFKA_HOME/config/consumer.properties # 停止
kafka-server-stop.sh
zkServer.sh stop shutdown -h now
# 快照 kafka_morezookeeper_saslplain

hadoop生态搭建(3节点)-08.kafka配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-03.zookeeper配置

    # https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html # ===== ...

  3. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  4. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  5. hadoop生态搭建(3节点)-10.spark配置

    # https://www.scala-lang.org/download/2.12.4.html# ================================================= ...

  6. hadoop生态搭建(3节点)-13.mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  7. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  8. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

  9. hadoop生态搭建(3节点)-14.redis配置

    # ==================================================================规划node1 redis:7000 7001 192.168. ...

随机推荐

  1. webpack之傻瓜式教程及前端自动化入门

    原文地址:https://www.cnblogs.com/liqiyuan/p/6246870.html 接触webpack也有挺长一段时间了,公司的项目也是一直用着webpack在打包处理,但前几天 ...

  2. 修改mysql允许主机访问的权限

    开启mysql的远程访问权限 默认mysql的用户是没有远程访问的权限的,因此当程序跟数据库不在同一台服务器上时,我们需要开启mysql的远程访问权限. 主流的有两种方法,改表法和授权法. 相对而言, ...

  3. 工作好搭档(四):铷安居 H-C81 电脑桌

    引言:工欲善其事,必先利其器.码农十年,与电脑打了二十多年的交道,也配置了一些过得去的装备.资金有限,更希望所有的投入都在刀刃上.写工作好搭档系列,是晒考虑的原因.思路.经验和教训.欢迎并希望大伙能一 ...

  4. nfs 服务器

    1.创建共享目录 #mkdir /home/hellolinux/nfs 2.创建或修改/etc/exports文件 #vi /etc/exports home/hellolinux/nfs 192. ...

  5. NGUI UILabel文字宽度和 UITweener

    做个记录 方便别人和自己以后查找. NGUI UILabel   文字宽度 高度 mLabel.GetComponent<UILabel>().getLabWidth()   mLabel ...

  6. swagger使用二:swagger配置多个项目注释

    在项目中采用swagger测试接口,提供接口给其他人员都非常的方便. 在swagger默认配置中,默认只显示接口访问层中的注释,可是很多的参数说明都已经在实体层中了啊?(如下图)不可能再把实体层中的模 ...

  7. 国外优秀JavaScript资源推荐

    JavaScript的优秀资源          原文链接:http://code.tutsplus.com/articles/resources-for-staying-on-top-of-java ...

  8. 16、SpringBoot-CRUD错误处理机制(3)

    3).将自己指定的数据携带出去 出现错误以后,会来到/error请求,会被BasicErrorController 进行处理 响应出去的数据是由 getErrorAttributes 得到的( Abs ...

  9. shiro注解,初始化资源和权限,会话管理

     有具体问题的可以参考之前的关于shiro的博文,关于shiro的博文均是一次工程的内容  注解: 新建一个类: 此时需要有admin的权限才可以执行下面的代码 public class ShiroS ...

  10. Windows 10推送的锁屏壁纸保存方法

    Windows 10推送的锁屏壁纸保存方法 工作中使用的系统为Windows 10,锁屏时显示的壁纸很漂亮,并且每天都会更新,有几张特别喜欢,于是就想这些壁纸到底保存在哪里呢?经过一番摸索,终于搞明白 ...