1.创建拓扑,配置KafkaSpout、Bolt

KafkaTopologyBasic.java:

  1. package org.mort.storm.kafka;
  2.  
  3. import org.apache.kafka.clients.consumer.ConsumerConfig;
  4. import org.apache.kafka.clients.consumer.ConsumerRecord;
  5. import org.apache.storm.Config;
  6. import org.apache.storm.LocalCluster;
  7. import org.apache.storm.StormSubmitter;
  8. import org.apache.storm.generated.StormTopology;
  9. import org.apache.storm.kafka.spout.*;
  10. import org.apache.storm.topology.TopologyBuilder;
  11. import org.apache.storm.kafka.spout.KafkaSpoutRetryExponentialBackoff.TimeInterval;
  12. import org.apache.storm.tuple.Fields;
  13. import org.apache.storm.tuple.Values;
  14.  
  15. import java.util.List;
  16. import java.util.concurrent.TimeUnit;
  17.  
  18. import static org.apache.storm.kafka.spout.KafkaSpoutConfig.FirstPollOffsetStrategy.LATEST;
  19.  
  20. /**
  21. * 使用Storm消费Kafka数据,构建Storm拓扑(使用TopologyBuilder)
  22. * 实现SentenceBolt、PrinterBolt
  23. */
  24. public class KafkaTopologyBasic {
  25.  
  26. /**
  27. * JUST_VALUE_FUNC为kafka消息翻译函数
  28. * 此处简单的将其输出
  29. */
  30. private static Func<ConsumerRecord<String, String>, List<Object>> JUST_VALUE_FUNC = new Func<ConsumerRecord<String, String>, List<Object>>() {
  31. @Override
  32. public List<Object> apply(ConsumerRecord<String, String> record) {
  33. return new Values(record.value());
  34. }
  35. };
  36.  
  37. /**
  38. * KafkaSpout重试策略
  39. * @return
  40. */
  41. protected KafkaSpoutRetryService newRetryService() {
  42. return new KafkaSpoutRetryExponentialBackoff(new TimeInterval(500L, TimeUnit.MICROSECONDS), TimeInterval.milliSeconds(2),
  43. Integer.MAX_VALUE, TimeInterval.seconds(10));
  44. }
  45.  
  46. /**
  47. * KafkaSpout配置
  48. * 新版本的KafkaSpout通过KafkaSpoutConfig类进行配置,KafkaSpoutConfig定义了kafka相关的环境、主题、重试策略、消费的初始偏移量等等参数。
  49. * @return
  50. */
  51. protected KafkaSpoutConfig<String, String> newKafkaSpoutConfig() {
  52. return KafkaSpoutConfig.builder("192.168.1.201:9092", "first").setProp(ConsumerConfig.GROUP_ID_CONFIG, "kafkaSpoutTestGroup")
  53. .setProp(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, 200).setRecordTranslator(JUST_VALUE_FUNC, new Fields("str"))
  54. .setRetry(newRetryService()).setOffsetCommitPeriodMs(10000).setFirstPollOffsetStrategy(LATEST)
  55. .setMaxUncommittedOffsets(250).build();
  56. }
  57.  
  58. /**
  59. * 将上述bolt和spout以及配置类组合,配置topology
  60. * 构建Storm拓扑(使用TopologyBuilder)
  61. * @return
  62. */
  63. public StormTopology buildTopology() {
  64. TopologyBuilder builder = new TopologyBuilder();
  65. builder.setSpout("KafkaSpout", new KafkaSpout<String, String>(newKafkaSpoutConfig()), 1);
  66. builder.setBolt("SentenceBolt", new SentenceBolt(), 1).globalGrouping("KafkaSpout");
  67. builder.setBolt("PrinterBolt", new PrinterBolt(), 1).globalGrouping("SentenceBolt");
  68. return builder.createTopology();
  69. }
  70.  
  71. public final static boolean isCluster = true;
  72.  
  73. public static void main(String[] args) {
  74. // 1 创建拓扑
  75. KafkaTopologyBasic kb = new KafkaTopologyBasic();
  76. StormTopology topology = kb.buildTopology();
  77.  
  78. // 2 创建配置信息对象
  79. Config conf = new Config();
  80. // 配置Worker开启个数
  81. conf.setNumWorkers(4);
  82.  
  83. // 3 提交程序
  84. if(isCluster){
  85. try {
  86. // 分布式提交
  87. StormSubmitter.submitTopology("SentenceTopology", conf, topology);
  88. }catch (Exception e){
  89. e.printStackTrace();
  90. }
  91. }else {
  92. // 本地提交
  93. LocalCluster cluster = new LocalCluster();
  94. cluster.submitTopology("KafkaToplogy", conf, topology);
  95. try {
  96. // Wait for some time before exiting
  97. System.out.println("Waiting to consume from kafka");
  98. Thread.sleep(300000);
  99. } catch (Exception exception) {
  100. System.out.println("Thread interrupted exception : " + exception);
  101. }
  102. // kill the KafkaTopology
  103. cluster.killTopology("KafkaToplogy");
  104. // shut down the storm test cluster
  105. cluster.shutdown();
  106. }
  107. }
  108. }

PrinterBolt.java:

  1. package org.mort.storm.kafka;
  2.  
  3. import org.apache.storm.topology.BasicOutputCollector;
  4. import org.apache.storm.topology.OutputFieldsDeclarer;
  5. import org.apache.storm.topology.base.BaseBasicBolt;
  6. import org.apache.storm.tuple.Tuple;
  7. import org.slf4j.Logger;
  8. import org.slf4j.LoggerFactory;
  9.  
  10. /**
  11. * 用于打印输出Sentence的Bolt
  12. */
  13. public class PrinterBolt extends BaseBasicBolt {
  14.  
  15. private static final long serialVersionUID = 1L;
  16.  
  17. private static final Logger logger = LoggerFactory.getLogger(PrinterBolt.class);
  18.  
  19. @Override
  20. public void execute(Tuple input, BasicOutputCollector collector) {
  21. // get the sentence from the tuple and print it
  22. String sentence = input.getString(0);
  23. logger.info("Received Sentence: " + sentence);
  24. System.out.println("Received Sentence: " + sentence);
  25. }
  26.  
  27. @Override
  28. public void declareOutputFields(OutputFieldsDeclarer declarer) {
  29. // we don't emit anything
  30. }
  31. }

SentenceBolt.java

  1. package org.mort.storm.kafka;
  2.  
  3. import java.util.ArrayList;
  4. import java.util.List;
  5.  
  6. import org.apache.commons.lang.StringUtils;
  7. import org.apache.storm.topology.BasicOutputCollector;
  8. import org.apache.storm.topology.OutputFieldsDeclarer;
  9. import org.apache.storm.topology.base.BaseBasicBolt;
  10. import org.apache.storm.tuple.Fields;
  11. import org.apache.storm.tuple.Tuple;
  12. import org.apache.storm.tuple.Values;
  13. import org.slf4j.Logger;
  14. import org.slf4j.LoggerFactory;
  15.  
  16. /**
  17. * 写一组将输入的单词拼成一句话的bolt类,每行输入一个单词,当输入符号“.”时,视为一句话结束。
  18. */
  19. public class SentenceBolt extends BaseBasicBolt {
  20.  
  21. private static final long serialVersionUID = 1L;
  22.  
  23. private static final Logger logger = LoggerFactory.getLogger(SentenceBolt.class);
  24.  
  25. private List<String> words = new ArrayList<String>();
  26.  
  27. @Override
  28. public void execute(Tuple input, BasicOutputCollector collector) {
  29. // Get the word from the tuple
  30. String word = input.getString(0);
  31. if (StringUtils.isBlank(word)) {
  32. // ignore blank lines
  33. return;
  34. }
  35. logger.info("Received Word:" + word);
  36. System.out.println("Received Word:" + word);
  37. // add word to current list of words
  38. words.add(word);
  39. if (word.endsWith(".")) {
  40. // word ends with '.' which means this is the end
  41. // the SentenceBolt publishes a sentence tuple
  42. collector.emit(new Values(StringUtils.join(words, ' ')));
  43. // and reset the words list.
  44. words.clear();
  45. }
  46. }
  47.  
  48. @Override
  49. public void declareOutputFields(OutputFieldsDeclarer declarer) {
  50. declarer.declare(new Fields("sentence"));
  51. }
  52.  
  53. }

2.pom.xml设置

1)设置利用maven-assembly-plugin生成jar包方式

  1. <build>
  2. <plugins>
  3. <plugin>
  4. <artifactId>maven-assembly-plugin</artifactId>
  5. <configuration>
  6. <descriptorRefs>
  7. <descriptorRef>jar-with-dependencies</descriptorRef>
  8. </descriptorRefs>
  9. <archive>
  10. <manifest>
  11. <mainClass>org.mort.storm.kafka.KafkaTopologyBasic</mainClass>
  12. </manifest>
  13. </archive>
  14. </configuration>
  15. </plugin>
  16. </plugins>
  17. </build>

2)依赖包添加

注意storm-core依赖为了防止执行时冲突需要添加<scope>provided</scope>(本地运行时去掉、打包时加上)

  1. <dependencies>
  2. <dependency> <!-- 桥接:告诉Slf4j使用Log4j2 -->
  3. <groupId>org.apache.logging.log4j</groupId>
  4. <artifactId>log4j-slf4j-impl</artifactId>
  5. </dependency>
  6. <dependency>
  7. <groupId>org.slf4j</groupId>
  8. <artifactId>slf4j-api</artifactId>
  9. </dependency>
  10. <dependency>
  11. <groupId>org.apache.logging.log4j</groupId>
  12. <artifactId>log4j-api</artifactId>
  13. </dependency>
  14. <dependency>
  15. <groupId>org.apache.logging.log4j</groupId>
  16. <artifactId>log4j-core</artifactId>
  17. </dependency>
  18. <dependency>
  19. <groupId>com.google.guava</groupId>
  20. <artifactId>guava</artifactId>
  21. <version>26.0-jre</version>
  22. </dependency>
  23. <dependency>
  24. <groupId>org.apache.storm</groupId>
  25. <artifactId>storm-core</artifactId>
  26. <version>1.2.3</version>
  27. <scope>provided</scope>
  28. </dependency>
  29. <!-- storm-kafka连接客户端 -->
  30. <dependency>
  31. <groupId>org.apache.storm</groupId>
  32. <artifactId>storm-kafka-client</artifactId>
  33. <version>1.2.3</version>
  34. </dependency>
  35. <!-- kafka连接客户端 -->
  36. <dependency>
  37. <groupId>org.apache.kafka</groupId>
  38. <artifactId>kafka-clients</artifactId>
  39. <version>2.0.1</version>
  40. </dependency>
  41. </dependencies>

3.执行assembly:assembly生成jar包

4.复制jar包到集群,执行命令

  1. bin/storm jar /opt/run/storm-demo-1.0-SNAPSHOT-jar-with-dependencies.jar org.mort.storm.kafka.KafkaTopologyBasic
  2. bin/storm jar [jar包路径] [main所在类名]

执行效果:

  1. [root@hadoop201 apache-storm-1.2.]# bin/storm jar /opt/run/storm-demo-1.0-SNAPSHOT-jar-with-dependencies.jar org.mort.storm.kafka.KafkaTopologyBasic
  2. SLF4J: Class path contains multiple SLF4J bindings.
  3. SLF4J: Found binding in [jar:file:/opt/module/apache-storm-1.2./lib/log4j-slf4j-impl-2.8..jar!/org/slf4j/impl/StaticLoggerBinder.class]
  4. SLF4J: Found binding in [jar:file:/opt/run/storm-demo-1.0-SNAPSHOT-jar-with-dependencies.jar!/org/slf4j/impl/StaticLoggerBinder.class]
  5. SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
  6. SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
  7. Running: /opt/module/jdk1..0_144/bin/java -client -Ddaemon.name= -Dstorm.options= -Dstorm.home=/opt/module/apache-storm-1.2. -Dstorm.log.dir=/opt/module/apache-storm-1.2./logs -Djava.library.path=/usr/local/lib:/opt/local/lib:/usr/lib -Dstorm.conf.file= -cp /opt/module/apache-storm-1.2./*:/opt/module/apache-storm-1.2.3/lib/*:/opt/module/apache-storm-1.2.3/extlib/*:/opt/run/storm-demo-1.0-SNAPSHOT-jar-with-dependencies.jar:/opt/module/apache-storm-1.2.3/conf:/opt/module/apache-storm-1.2.3/bin -Dstorm.jar=/opt/run/storm-demo-1.0-SNAPSHOT-jar-with-dependencies.jar -Dstorm.dependency.jars= -Dstorm.dependency.artifacts={} org.mort.storm.kafka.KafkaTopologyBasic
  8. SLF4J: Class path contains multiple SLF4J bindings.
  9. SLF4J: Found binding in [jar:file:/opt/module/apache-storm-1.2.3/lib/log4j-slf4j-impl-2.8.2.jar!/org/slf4j/impl/StaticLoggerBinder.class]
  10. SLF4J: Found binding in [jar:file:/opt/run/storm-demo-1.0-SNAPSHOT-jar-with-dependencies.jar!/org/slf4j/impl/StaticLoggerBinder.class]
  11. SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
  12. SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
  13. 5481 [main] INFO o.a.s.k.s.KafkaSpoutConfig - Setting Kafka consumer property 'auto.offset.reset' to 'earliest' to ensure at-least-once processing
  14. 5564 [main] INFO o.a.s.k.s.KafkaSpoutConfig - Setting Kafka consumer property 'enable.auto.commit' to 'false', because the spout does not support auto-commit
  15. 7441 [main] WARN o.a.s.u.Utils - STORM-VERSION new 1.2.3 old null
  16. 7658 [main] INFO o.a.s.StormSubmitter - Generated ZooKeeper secret payload for MD5-digest: -8420262939352556619:-8011743779888436007
  17. 8316 [main] INFO o.a.s.u.NimbusClient - Found leader nimbus : hadoop201.com:6627
  18. 8388 [main] INFO o.a.s.s.a.AuthUtils - Got AutoCreds []
  19. 8426 [main] INFO o.a.s.u.NimbusClient - Found leader nimbus : hadoop201.com:6627
  20. 8661 [main] INFO o.a.s.StormSubmitter - Uploading dependencies - jars...
  21. 8661 [main] INFO o.a.s.StormSubmitter - Uploading dependencies - artifacts...
  22. 8662 [main] INFO o.a.s.StormSubmitter - Dependency Blob keys - jars : [] / artifacts : []
  23. 8751 [main] INFO o.a.s.StormSubmitter - Uploading topology jar /opt/run/storm-demo-1.0-SNAPSHOT-jar-with-dependencies.jar to assigned location: /opt/module/apache-storm-1.2.3/data/nimbus/inbox/stormjar-c0d5b00a-b07e-48f1-ac4d-871c5b3f635d.jar
  24. 9815 [main] INFO o.a.s.StormSubmitter - Successfully uploaded topology jar to assigned location: /opt/module/apache-storm-1.2.3/data/nimbus/inbox/stormjar-c0d5b00a-b07e-48f1-ac4d-871c5b3f635d.jar
  25. 9815 [main] INFO o.a.s.StormSubmitter - Submitting topology SentenceTopology in distributed mode with conf {"topology.workers":4,"storm.zookeeper.topology.auth.scheme":"digest","storm.zookeeper.topology.auth.payload":"-8420262939352556619:-8011743779888436007"}
  26. 9815 [main] WARN o.a.s.u.Utils - STORM-VERSION new 1.2.3 old 1.2.3
  27. 11935 [main] INFO o.a.s.StormSubmitter - Finished submitting topology: SentenceTopology

5.通过UI查看执行情况

Storm消费Kafka提交集群运行的更多相关文章

  1. 2 weekend110的mapreduce介绍及wordcount + wordcount的编写和提交集群运行 + mr程序的本地运行模式

    把我们的简单运算逻辑,很方便地扩展到海量数据的场景下,分布式运算. Map作一些,数据的局部处理和打散工作. Reduce作一些,数据的汇总工作. 这是之前的,weekend110的hdfs输入流之源 ...

  2. storm消费kafka实现实时计算

    大致架构 * 每个应用实例部署一个日志agent * agent实时将日志发送到kafka * storm实时计算日志 * storm计算结果保存到hbase storm消费kafka 创建实时计算项 ...

  3. Storm消费Kafka值得注意的坑

    问题描述: kafka是之前早就搭建好的,新建的storm集群要消费kafka的主题,由于kafka中已经记录了很多消息,storm消费时从最开始消费问题解决: 下面是摘自官网的一段话:How Kaf ...

  4. spark学习之IDEA配置spark并wordcount提交集群

    这篇文章包括以下内容 (1)IDEA中scala的安装 (2)hdfs简单的使用,没有写它的部署 (3) 使用scala编写简单的wordcount,输入文件和输出文件使用参数传递 (4)IDEA打包 ...

  5. storm单机运行与集群运行问题

    使用trident接口时,storm读取kafka数据会将kafka消费记录保存起来,将消费记录的位置保存在tridentTopology.newStream()的第一个参数里, 如果设置成从头开始消 ...

  6. 第1节 storm日志告警:1、 - 5、日志监控告警业务需求、代码、集群运行、总结

    如何解决短信或者邮件频繁发送的问题:每次发送的时候都先查询数据库记录,看一下有没有给这个人发送消息,上一次发送消息的时间是什么时候,如果发送时间间隔小于半个小时,就不要再发了 ============ ...

  7. Storm累计求和进群运行代码

    打成jar包放在主节点上去运行. import java.util.Map; import backtype.storm.Config; import backtype.storm.StormSubm ...

  8. Storm集成Kafka应用的开发

    我们知道storm的作用主要是进行流式计算,对于源源不断的均匀数据流流入处理是非常有效的,而现实生活中大部分场景并不是均匀的数据流,而是时而多时而少的数据流入,这种情况下显然用批量处理是不合适的,如果 ...

  9. Spark本地运行成功,集群运行空指针异。

    一个很久之前写的Spark作业,当时运行在local模式下.最近又开始处理这方面数据了,就打包提交集群,结果频频空指针.最开始以为是程序中有null调用了,经过排除发现是继承App导致集群运行时候无法 ...

随机推荐

  1. 在Windows下编译扩展OpenCV 3.1.0 + opencv_contrib 及一些问题

    一.准备工作: 1.下载OpenCV安装包:https://github.com/opencv/opencv 安装过程实际上就是解压过程,安装完成后得到(这里修改了文件名): 2.下载opencv_c ...

  2. 消息模板-RabbitTemplate

    RabbitTemplate是我们在与SpringAMQP整合的时候进行发送消息的关键类该类提供了丰富的发送消息的方法,包括可靠性消息投递.回调监听消息接口ConfirmCallback.返回值确认接 ...

  3. Latex里引用多个公式,如何将公式合并?

    如果是想要的效果:(1)-(3),怎么操作?类似于用\cite引用多个文献那样吗? 1. \eqref{lable 1, lable 2, label 3}? 得到的结果:3个问号 ??? 2.\eq ...

  4. Vue_(组件)自定义指令

    Vue.js自定义指令 传送门 自定义指令:除了内置指令,Vue也允许用户自定义指令 注册指令:通过全局API Vue.directive可以注册自定义指令 自定义指令的钩子函数参数:自定义指令的钩子 ...

  5. python-日常用法小记

    1.判断是否是数字 math.isnan("a") 2.数学math math.log(x) 3.查看安装路径 import sys print sys.path 4.字符串与日期 ...

  6. Java 学习(六)

    Java 学习(六) 标签(空格分隔): Java 枚举 JDK1.5引入了新的类型--枚举.在 Java 中它虽然算个"小"功能,却给我的开发带来了"大"方便 ...

  7. 关于MongoDB在windows下安装【解压版】

    一.首先创建数据库存储文件夹和日志文件夹,在mongodb下创建即可,找起来也方便,日志文件:mongo.log,安装出错方便查阅问题: 二.启动MongoDB服务:注意:一定要用管理员身份运行DOS ...

  8. python中的fstring的 !r,!a,!s

    首先是fstring的结构 f ' <text> { <expression> <optional !s, !r, or !a> <optional : fo ...

  9. 代码审计之seacms v6.54 前台Getshell 复现分析

    1.环境: php5.5.38+apache+seacms v6.54 上一篇文章针对seacms v6.45 进行了分析,官方给出针对修复前台geishell提供的方法为增加: $order = ( ...

  10. 【软件工程】团队Git现场编程实战

    组长博客链接 博客链接 组员职责分工 队员 职责分工 恩泽 进行任务的划分与安排,调用API,负责餐饮商铺及商圈信息的获取 金海 解析API返回的json数据,提取有关信息 君曦 部分算法编写 季城 ...