最近在使用Spring Cloud进行分布式微服务搭建,顺便对集成KafKa的方案做了一些总结,今天详细介绍一下KafKa集群安装过程:

1. 在根目录创建kafka文件夹(service1、service2、service3都创建)

[root@localhost /]# mkdir kafka

2.通过Xshell上传文件到service1服务器:上传kafka_2.9.2-0.8.1.1.tgz到/software文件夹

3.远程copy将service1下的/software/kafka_2.9.2-0.8.1.1.tgz到service2、service3

[root@localhost software]# scp -r /software/kafka_2.9.2-0.8.1.1.tgz root@192.168.2.212:/software/

[root@localhost software]# scp -r /software/kafka_2.9.2-0.8.1.1.tgz root@192.168.2.213:/software/

3.copy /software/kafka_2.9.2-0.8.1.1.tgz到/kafka/目录(service1、service2、service3都执行)

[root@localhost software]# cp /software/kafka_2.9.2-0.8.1.1.tgz /kafka/

4.安装解压kafka_2.9.2-0.8.1.1.tgz(service1、service2、service3都执行)

[root@localhost /]# cd /kafka/

[root@localhost kafka]# tar -zxvf kafka_2.9.2-0.8.1.1.tgz

5.创建kafka消息目录(service1,service2,service3都要创建)

[root@localhost kafka]# mkdir kafkaLogs

6. 修改kafka的配置文件(service1,service2,service3都要配置)

[root@localhost /]# cd /kafka/kafka_2.9.2-0.8.1.1/

[root@localhost kafka_2.9.2-0.8.1.1]# cd config/

[root@localhost config]# ls

consumer.properties  log4j.properties  producer.properties  server.properties  test-log4j.properties  tools-log4j.properties  zookeeper.properties

[root@localhost config]# vi server.properties

# Licensed to the Apache Software Foundation (ASF) under one or more

# contributor license agreements.  See the NOTICE file distributed with

# this work for additional information regarding copyright ownership.

# The ASF licenses this file to You under the Apache License, Version 2.0

# (the "License"); you may not use this file except in compliance with

# the License.  You may obtain a copy of the License at

#

#    http://www.apache.org/licenses/LICENSE-2.0

#

# Unless required by applicable law or agreed to in writing, software

# distributed under the License is distributed on an "AS IS" BASIS,

# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

# See the License for the specific language governing permissions and

# limitations under the License.

# see kafka.server.KafkaConfig for additional details and defaults

############################# Server Basics #############################

# The id of the broker. This must be set to a unique integer for each broker.

broker.id=0  ---唯一标识

############################# Socket Server Settings #############################

# The port the socket server listens on

port=19092  --当前broker对外提供的TCP端口,默认9092

# Hostname the broker will bind to. If not set, the server will bind to all interfaces

host.name=192.168.2.213  --一般是关闭状态,我们要将它打开,如果dns解析失败,会出现文件句柄泄露,不要小看dns解析失败率,如果dns解析失败率为万分之一,由于kafka的性能非常高,每个topic的每个分区,每秒可以处理十万多条的数据,即使万分之一的失败率,每秒也要泄露10个文件句柄,很快句柄数就会泄露完毕,就会超过linux打开文件的数,就会出现异常,所以我们配置ip,就不会进行dns解析

# Hostname the broker will advertise to producers and consumers. If not set, it uses the

# value for "host.name" if configured.  Otherwise, it will use the value returned from

# java.net.InetAddress.getCanonicalHostName().

#advertised.host.name=<hostname routable by clients>

# The port to publish to ZooKeeper for clients to use. If this is not set,

# it will publish the same port that the broker binds to.

#advertised.port=<port accessible by clients>

# The number of threads handling network requests

num.network.threads=2   --broker网络处理的线程数,一般不做处理

# The number of threads doing disk I/O

num.io.threads=8  --broker io处理的线程数,这个数量一定要比log.dirs的目录数要大

# The send buffer (SO_SNDBUF) used by the socket server

socket.send.buffer.bytes=1048576  --将发送的消息先放到缓冲区,当到达一定量的时候再一次性发出

# The receive buffer (SO_RCVBUF) used by the socket server

socket.receive.buffer.bytes=1048576  --kafka接受消息的缓冲区,当接受的数量达到一定量的时候再写入磁盘

# The maximum size of a request that the socket server will accept (protection against OOM)

socket.request.max.bytes=104857600   --像kafka发送或者请求消息的最大数,此设置不能超过java堆栈大小

############################# Log Basics #############################

# A comma seperated list of directories under which to store log files

log.dirs=/kafka/kafkaLogs  --多个目录可以用,隔开

# The default number of log partitions per topic. More partitions allow greater

# parallelism for consumption, but this will also result in more files across

# the brokers.

num.partitions=2  --一个topic默认分区数

############################# Log Flush Policy #############################

# Messages are immediately written to the filesystem but by default we only fsync() to sync

# the OS cache lazily. The following configurations control the flush of data to disk.

# There are a few important trade-offs here:

#    1. Durability: Unflushed data may be lost if you are not using replication.

#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.

#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to exceessive seeks.

# The settings below allow one to configure the flush policy to flush data after a period of time or

# every N messages (or both). This can be done globally and overridden on a per-topic basis.

# The number of messages to accept before forcing a flush of data to disk

#log.flush.interval.messages=10000

# The maximum amount of time a message can sit in a log before we force a flush

#log.flush.interval.ms=1000

############################# Log Retention Policy #############################

# The following configurations control the disposal of log segments. The policy can

# be set to delete segments after a period of time, or after a given size has accumulated.

# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens

# from the end of the log.

# The minimum age of a log file to be eligible for deletion

log.retention.hours=168

message.max.byte=5048576   --kafka每条消息容纳的最大大小

default.replication.factor=2  --默认的复制因子,默认消息只有一个副本,不太安全,所以设置为2,如果某个分区的消息失败了,我们可以使用另一个分区的消息服务

replica.fetch.max.byte=5048576 --kafka每条消息容纳的最大大小

# A size-based retention policy for logs. Segments are pruned from the log as long as the remaining

# segments don't drop below log.retention.bytes.

#log.retention.bytes=1073741824

# The maximum size of a log segment file. When this size is reached a new log segment will be created.

log.segment.bytes=536870912  --消息持久化的最大大小

# The interval at which log segments are checked to see if they can be deleted according

# to the retention policies

log.retention.check.interval.ms=60000

# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.

# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.

log.cleaner.enable=false  --不使用log压缩

############################# Zookeeper #############################

# Zookeeper connection string (see zookeeper docs for details).

# This is a comma separated host:port pairs, each corresponding to a zk

# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".

# You can also append an optional chroot string to the urls to specify the

# root directory for all kafka znodes.

zookeeper.connect=192.168.2.211:2181,192.168.2.212:2181,192.168.2.213:2181   --zk地址

# Timeout in ms for connecting to zookeeper

zookeeper.connection.timeout.ms=1000000

7.启动kafka服务

[root@localhost bin]# ./kafka-server-start.sh -daemon ../config/server.properties

[root@localhost bin]# jps

27413 Kafka

27450 Jps

17884 QuorumPeerMain

8.验证kafka集群

[root@localhost bin]# ./kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 2 --partitions 1 --topic test

Created topic "test".

9.在service1上开启producer程序

./kafka-console-producer.sh --broker-list 192.168.2.211:9092 --topic test

[root@localhost bin]# ./kafka-console-producer.sh --broker-list 192.168.2.211:9092 --topic test

SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".

SLF4J: Defaulting to no-operation (NOP) logger implementation

SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.

10. 在service2上开启consumer程序

[root@localhost bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".

SLF4J: Defaulting to no-operation (NOP) logger implementation

SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.

11.在producer中发送消息:hello honghu

[root@localhost bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".

SLF4J: Defaulting to no-operation (NOP) logger implementation

SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.

hello honghu

12. 在consumer中接受到消息

[root@localhost bin]# ./kafka-console-consumer.sh --zookeeper localhost:2181 --topic test --from-beginning

SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".

SLF4J: Defaulting to no-operation (NOP) logger implementation

SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.

hello honghu    源码来源

KafKa集群安装详细步骤的更多相关文章

  1. hadoop 2.2.0集群安装详细步骤(简单配置,无HA)

    安装环境操作系统:CentOS 6.5 i586(32位)java环境:JDK 1.7.0.51hadoop版本:社区版本2.2.0,hadoop-2.2.0.tar.gz 安装准备设置集群的host ...

  2. redis3.0.0 集群安装详细步骤

    Redis集群部署文档(centos6系统) Redis集群部署文档(centos6系统) (要让集群正常工作至少需要3个主节点,在这里我们要创建6个redis节点,其中三个为主节点,三个为从节点,对 ...

  3. Redis集群安装详细步骤

    环境: Centos7    redis3.0 三台虚拟机主机名分别为 master   node1  node2 如果单机的时候设置过密码最好把密码去掉,避免位置的错误. 拍个快照方便恢复. 1.创 ...

  4. KafKa集群安装、配置

    一.事前准备 1.kafka官网:http://kafka.apache.org/downloads. 2.选择使用版本下载. 3.kafka集群环境准备:(linux) 192.168.145.12 ...

  5. kafka集群安装部署

    kafka集群安装 使用的版本 系统:centos6.5 centos6.7 jdk:1.7.0_79 zookeeper:3.4.9 kafka:2.10-0.10.1.0 一.环境准备[只列,不具 ...

  6. Kafka 集群安装

    Kafka 集群安装 环境: Linux 7.X kafka_2.x 在linux操作系统中,kafka安装在 /u04/app目录中 1. 下载 # wget https://mirrors.cnn ...

  7. zookeeper+kafka集群安装之二

    zookeeper+kafka集群安装之二 此为上一篇文章的续篇, kafka安装需要依赖zookeeper, 本文与上一篇文章都是真正分布式安装配置, 可以直接用于生产环境. zookeeper安装 ...

  8. zookeeper+kafka集群安装之一

    zookeeper+kafka集群安装之一 准备3台虚拟机, 系统是RHEL64服务版. 1) 每台机器配置如下: $ cat /etc/hosts ... # zookeeper hostnames ...

  9. zookeeper+kafka集群安装之中的一个

    版权声明:本文为博主原创文章.未经博主同意不得转载. https://blog.csdn.net/cheungmine/article/details/26678877 zookeeper+kafka ...

随机推荐

  1. L2-001. 紧急救援(迪杰斯特拉算法)

    L2-001. 紧急救援 时间限制 200 ms 内存限制 65536 kB 代码长度限制 8000 B 判题程序 Standard 作者 陈越 作为一个城市的应急救援队伍的负责人,你有一张特殊的全国 ...

  2. Comet OJ - Contest #13 「火鼠的皮衣 -不焦躁的内心-」

    来源:Comet OJ - Contest #13 芝士相关: 复平面在信息学奥赛中的应用[雾 其实是道 sb 题??? 发现原式貌似十分可二项式定理,然后发现确实如此 我们把 \(a^i\) 替换成 ...

  3. APT高持续渗透攻击-后门篇

    APT是指高级持续性威胁, 利用先进的攻击手段对特定目标进行长期持续性网络攻击的攻击形式,APT攻击的原理相对于其他攻击形式更为高级和先进,其高级性主要体现在APT在发动攻击之前需要对攻击对象的业务流 ...

  4. varchar nvarchar 设计长度时 设计成 (2^n)-1 的好处

    这个问题想说已久就是博没共享出来 首先提出个问题 CREATE TABLE `test` ( `a` ) DEFAULT NULL, ) ENGINE=InnoDB DEFAULT CHARSET=u ...

  5. atxserver2-rethinkdb的一些基础操作

    因为我只需要一些基础操作就好,所以在web端的 Data Explorer 操作,首先启动  rethinkdb.exe,然后登陆网页端, 下面是一些基础操作的语句, 一.创建表r.db(‘atxse ...

  6. cdn工作原理

    cdn工作原理 1.用户向浏览器输入www.web.com这个域名,浏览器第一次发现本地没有dns缓存,则向网站的DNS服务器请求: 2.网站的DNS域名解析器设置了CNAME,指向了www.web. ...

  7. SpringCloud系列(一):Eureka 服务注册与服务发现

    上一篇,我们介绍了服务注册中心,光有服务注册中心没有用,我们得发服务注册上去,得从它那边获取服务.下面我们注册一个服务到服务注册中心上去. 我们创建一个 hello-service 的 spring ...

  8. Codeforces1204C. Anna, Svyatoslav and Maps (贪心 + Floyd)

    题目链接:传送门 题目大意: 给出n<=100的有向图,和路径p,求p的最短子序列v,使得依次经过v中所有点的路径为p. 思路: 题意其实就是让我们求路径上的一些关键点v,对于所有的关键点:vi ...

  9. [易学易懂系列|rustlang语言|零基础|快速入门|(1)|开篇]

    今天我们来开一个新系列. 从零学习rustlang. 简单介绍下rustlang: Rust 是一门系统级编程语言,被设计为保证内存和线程安全,防止段错误产生.作为系统级编程语言,它的基本理念是 “零 ...

  10. SQL代码

    SELECT SCHEMA_NAME(SCHEMA_ID)AS ID,name as Table_name FROM sys.tables;--查询表视图 查询表视图