1.  

broker.id=0
num.network.threads=9
num.io.threads=24
socket.send.buffer.bytes=102400
listeners=PLAINTEXT://:9092
port=9092  
host.name=
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/home/service/var/kafka
num.partitions=12
offsets.topic.replication.factor=2   
transaction.state.log.min.isr=1
log.retention.hours=72
log.retention.check.interval.ms=300000
zookeeper.connect=10.12.176.3:2181,10.12.172.32:2181,10.12.174.14:2181/security-kafka
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=3000
log.cleaner.enable=true
delete.topic.enable=true

  1.  
  1. 1. addrep_cpd-app-down.json 文件内容:
  2. {"version":1, "partitions":[
  3. {"topic":"cpd-app-down","partition":0,"replicas":[1,2]},
  4. {"topic":"cpd-app-down","partition":1,"replicas":[2,3]},
  5. {"topic":"cpd-app-down","partition":2,"replicas":[3,4]},
  6. {"topic":"cpd-app-down","partition":3,"replicas":[4,5]},
  7. {"topic":"cpd-app-down","partition":4,"replicas":[5,6]},
  8. {"topic":"cpd-app-down","partition":5,"replicas":[6,0]},
  9. {"topic":"cpd-app-down","partition":6,"replicas":[0,1]},
  10. {"topic":"cpd-app-down","partition":7,"replicas":[1,2]},
  11. {"topic":"cpd-app-down","partition":8,"replicas":[2,3]},
  12. {"topic":"cpd-app-down","partition":9,"replicas":[3,4]},
  13. {"topic":"cpd-app-down","partition":10,"replicas":[4,5]},
  14. {"topic":"cpd-app-down","partition":11,"replicas":[5,6]},
  15. {"topic":"cpd-app-down","partition":12,"replicas":[6,0]},
  16. {"topic":"cpd-app-down","partition":13,"replicas":[0,1]}
  17. ] }
  18.  
  19. 2.sh kafka-reassign-partitions.sh --zookeeper 10.6.72.38:2181,10.6.72.8:2181 --reassignment-json-file ../config/addrep_cpd-app-down.json --execute
  1. broker.id=
  2. listeners=PLAINTEXT://10.32.104.37:9092
  3. num.network.threads=
  4. num.io.threads=
  5. socket.send.buffer.bytes=
  6. socket.receive.buffer.bytes=
  7. socket.request.max.bytes=
  8. log.dirs=/var/data/kafka
  9. num.partitions=
  10. num.recovery.threads.per.data.dir=
  11. log.retention.hours=
  12. log.segment.bytes=
  13. log.retention.check.interval.ms=
  14. log.cleaner.enable=true
  15. zookeeper.connect=10.32.106.42:,10.32.114.34:,10.32.104.37:
  16. zookeeper.connection.timeout.ms=
  17. delete.topic.enable=true
  18. transaction.state.log.min.isr=
  19. log.retention.hours=
  20. default.replication.factor=
  1. 1. 创建topics-to-move.json,输入topic信息
  2. {"topics":
  3. [{"topic": "TestSing"}],
  4. "version":1
  5. }
  6.  
  7. 2. 生成topic迁移到新broker的配置文件,json格式
  8. sh bin/kafka-reassign-partitions.sh --zookeeper 10.32.106.42:2181 --topics-to-move-json-file topics-to-move.json --broker-list "3,4,5" --generate
  9.  
  10. 3. 执行脚本,加载json文件,开始迁移操作
  11. sh bin/kafka-reassign-partitions.sh --zookeeper 10.32.106.42:2181 --reassignment-json-file config/testsing.json --execute
  1. kafka日志按文件大小分割: log4j.properties
  2. #log4j.appender.kafkaAppender=org.apache.log4j.DailyRollingFileAppender
  3. #log4j.appender.kafkaAppender.DatePattern='.'yyyy-MM-dd-HH
  4. log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
  5. log4j.appender.kafkaAppender.MaxFileSize=500MB
  6. log4j.appender.kafkaAppender.MaxBackupIndex=5

log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.kafkaAppender=org.apache.log4j.RollingFileAppender
log4j.appender.kafkaAppender.MaxFileSize=500MB
log4j.appender.kafkaAppender.MaxBackupIndex=5
log4j.appender.kafkaAppender.File=${kafka.logs.dir}/server.log
log4j.appender.kafkaAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.kafkaAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.stateChangeAppender=org.apache.log4j.RollingFileAppender
log4j.appender.stateChangeAppender.MaxFileSize=500MB
log4j.appender.stateChangeAppender.MaxBackupIndex=5
log4j.appender.stateChangeAppender.File=${kafka.logs.dir}/state-change.log
log4j.appender.stateChangeAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.stateChangeAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.requestAppender=org.apache.log4j.RollingFileAppender
log4j.appender.requestAppender.MaxFileSize=500MB
log4j.appender.requestAppender.MaxBackupIndex=5
log4j.appender.requestAppender.File=${kafka.logs.dir}/kafka-request.log
log4j.appender.requestAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.requestAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.cleanerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.cleanerAppender.MaxFileSize=500MB
log4j.appender.cleanerAppender.MaxBackupIndex=5
log4j.appender.cleanerAppender.File=${kafka.logs.dir}/log-cleaner.log
log4j.appender.cleanerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.cleanerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.controllerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.controllerAppender.MaxFileSize=500MB
log4j.appender.controllerAppender.MaxBackupIndex=5
log4j.appender.controllerAppender.File=${kafka.logs.dir}/controller.log
log4j.appender.controllerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.controllerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.appender.authorizerAppender=org.apache.log4j.RollingFileAppender
log4j.appender.authorizerAppender.MaxFileSize=500MB
log4j.appender.authorizerAppender.MaxBackupIndex=5
log4j.appender.authorizerAppender.File=${kafka.logs.dir}/kafka-authorizer.log
log4j.appender.authorizerAppender.layout=org.apache.log4j.PatternLayout
log4j.appender.authorizerAppender.layout.ConversionPattern=[%d] %p %m (%c)%n
log4j.logger.kafka=INFO, kafkaAppender
log4j.logger.kafka.network.RequestChannel$=WARN, requestAppender
log4j.additivity.kafka.network.RequestChannel$=false
log4j.logger.kafka.request.logger=WARN, requestAppender
log4j.additivity.kafka.request.logger=false
log4j.logger.kafka.controller=INFO, controllerAppender
log4j.additivity.kafka.controller=false
log4j.logger.kafka.log.LogCleaner=INFO, cleanerAppender
log4j.additivity.kafka.log.LogCleaner=false
log4j.logger.state.change.logger=INFO, stateChangeAppender
log4j.additivity.state.change.logger=false
log4j.logger.kafka.authorizer.logger=WARN, authorizerAppender
log4j.additivity.kafka.authorizer.logger=false

  1.  

Kafka配置文件及解释的更多相关文章

  1. kafka实战教程(python操作kafka),kafka配置文件详解

    kafka实战教程(python操作kafka),kafka配置文件详解 应用往Kafka写数据的原因有很多:用户行为分析.日志存储.异步通信等.多样化的使用场景带来了多样化的需求:消息是否能丢失?是 ...

  2. Hadoop生态圈-Kafka配置文件详解

    Hadoop生态圈-Kafka配置文件详解 作者:尹正杰 版权声明:原创作品,谢绝转载!否则将追究法律责任. 一.默认kafka配置文件内容([yinzhengjie@s101 ~]$ more /s ...

  3. my.cnf 配置文件参数解释

    my.cnf 配置文件参数解释: #*** client options 相关选项 ***# #以下选项会被MySQL客户端应用读取.注意只有MySQL附带的客户端应用程序保证可以读取这段内容.如果你 ...

  4. 分布式文件存储FastDFS(七)FastDFS配置文件具体解释

    配置FastDFS时.改动配置文件是非常重要的一个步骤,理解配置文件里每一项的意义更加重要,所以我參考了大神的帖子,整理了配置文件的解释.原帖例如以下:http://bbs.chinaunix.net ...

  5. kafka配置文件中参数的限制

    在kafka的优化过程中,不断的调节配置文件中的参数,但是有时候会遇到java.lang.NumberFormatException这样的错误 比如socket.receive.buffer.byte ...

  6. 在C#代码中应用Log4Net(三)Log4Net中配置文件的解释

    一个完整的配置文件的例子如下所示,这个是”在C#代码中应用Log4Net(二)”中使用的配置文件. <log4net> <!-- 错误日志类--> <logger nam ...

  7. 在C#代码中应用Log4Net 中配置文件的解释

    一个完整的配置文件的例子如下所示,这个是”在C#代码中应用Log4Net(二)”中使用的配置文件. <log4net> <!-- 错误日志类--> <logger nam ...

  8. log4j配置文件详细解释

    web.xml中配置启动log4j的配置 <!-- webAppRootKey进行配置,这里主要是让log能将日志写到对应项目根目录下 --> <!-- 定义以后,在Web Cont ...

  9. [转]Log4Net中配置文件的解释

    FROM:http://www.cnblogs.com/kissazi2/p/3392605.html 一个完整的配置文件的例子如下所示 <log4net> <!-- 错误日志类-- ...

随机推荐

  1. 9. Dockerfile 实际操作 (把 python app 打包成 image 并运行)

    1. 创建并进入 flask-hello-world mkdir flask-hello-world && cd flask-hello-world 2. 编写 python 文件 a ...

  2. 关于sql更新最后一个逗号的去除或则最后的and的去除

    去除最后的逗号substr_replace($update_sql ,"",-1);去除最后的and substr_replace($update_sql ,"" ...

  3. 10)global预定义变量

    代码展示: <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w ...

  4. WebServerFactoryCustomizer set the port, address, error pages etc.

    package com.ioc; import org.springframework.boot.SpringApplication; import org.springframework.boot. ...

  5. FastReport 使用入门 (二)

    上部分  我们将格式大概都画好了 下面 我们将Datatable的每列绑定到  我们添加的table控件上 .然后打开table控件的事件 双击选中 ManualBuild 事件 添加代码 priva ...

  6. RadixSort(基数排序)原理及C++代码实现

    基数排序是一种思想很值得学习的排序方法. 它突破了正常的排序思维:先排高位,如果高位相同再排次高位,直至最低.它的思想是利用稳定排序从低位开始排,最后再排最高位. 另外它用来划分的位不一定是一位一位的 ...

  7. 88)PHP,PDOStatement对象

    PDOStatement类,称之为PDO语句对象,SQL执行完(处理完)产生的结果对象. fetchColumn(index=) 允许传递参数,表示获得第一条记录的第几个字段的值. 相当于 getOn ...

  8. python往mysql数据库中写入数据和更新插入数据

    本文链接:https://blog.csdn.net/Mr__lqy/article/details/85719603 1. 连接mysql import pymysql db = pymysql.c ...

  9. Python连接SQLServer2000

    http://www.pymssql.org/en/stable/pymssql_examples.html 实例 import pymssql # 获取连接 conn = pymssql.conne ...

  10. ESTScan|EORF|Augustus|nr|PSM|

    生物信息学方法的目的有二:1.常规找鉴定已知蛋白2.鉴定新蛋白 控制数据库大小可以通过增多酶切使得大数据库灵敏性增高数据量变小: 分步搜索是对于经典方法使用后找不到的新蛋白进行补充挖掘,预测蛋白与高可 ...