1 需求

kafka,storm,hdfs整合是流式数据常用的一套框架组合,现在

根据需求使用代码实现该需求

需求:应用所学技术实现,kafka接收随机句子,对接到storm中;使用storm集群统计句子中每个单词重复出现的次数(wordcount),将统计结果存入hdfs中。

1 pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <groupId>bigdata</groupId>
<artifactId>homework</artifactId>
<version>1.0-SNAPSHOT</version> <dependencies>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-core</artifactId>
<!--<scope>provided</scope>-->
<version>1.2.2</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-kafka-client</artifactId>
<!--<scope>provided</scope>-->
<version>1.2.2</version>
</dependency>
<dependency>
<groupId>org.apache.storm</groupId>
<artifactId>storm-hdfs</artifactId>
<version>1.0.2</version>
<exclusions>
<exclusion>
<groupId>io.confluent</groupId>
<artifactId>kafka-avro-serializer</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies> <build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>3.2.1</version>
<configuration>
<createDependencyReducedPom>true</createDependencyReducedPom>
</configuration>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<transformers>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
<transformer
implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>storm.StormTopologyDriver</mainClass>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<configuration>
<source>1.7</source>
<target>1.7</target>
<skip>true</skip>
</configuration>
</plugin>
</plugins>
</build> </project>
2 PullWords.java
package kafka;

import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer; import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean; /**
* @Description
* kafka消费者
*/
public class PullWords { private KafkaConsumer<String, String> consumer;
private AtomicBoolean isAutoCommit; // kafka topic
private final static String TOPIC = "wordCount"; public PullWords() {
isAutoCommit = new AtomicBoolean(false); // 默认非自动提交
Properties props = new Properties();
props.put("bootstrap.servers", "mini1:2181,mini2:2181,mini3:2181");
props.put("group.id", "wordCount"); // 设置消费者组,组内的所有消费者协调在一起来消费订阅主题
if (isAutoCommit.get()) {
props.put("enable.auto.commit", "true"); // 设置自动提交
props.put("auto.commit.interval.ms", "1000"); //配置自动提交消费进度的时间
} else {
props.put("enable.auto.commit", "false");
}
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList(TOPIC));
this.isAutoCommit = isAutoCommit;
} public void subscribe(String... topic) {
consumer.subscribe(Arrays.asList(topic));
} public ConsumerRecords<String, String> pull() {
ConsumerRecords<String, String> records = consumer.poll(100);
consumer.commitSync();
return records;
} public ConsumerRecords<String, String> pullOneOrMore() {
ConsumerRecords<String, String> records = null;
List<String> values = new ArrayList<>();
while (true) {
records = consumer.poll(10);
if (records != null) {
records.forEach(e -> values.add(e.value()));
if (values.size() >= 1) {
consumer.commitSync();
values.clear();
break;
}
}
}
return records;
} public void close() {
consumer.close();
} }

3 PushWords.java

package kafka;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata; import java.util.Properties;
import java.util.concurrent.Future; /**a
* @Description
* kafka生产者
* @Author hongzw@citycloud.com.cn
* @Date 2019-02-16 下午 7:08
*/
public class PushWords { private Producer<String, String> producer; // kafka topic
private final static String TOPIC = "words"; public PushWords() {
Properties props = new Properties();
props.put("bootstrap.servers", "storm01:9092,storm02:9092,storm03:9092");
props.put("acks", "all");
props.put("retries", 0); // 请求失败不再重试
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
producer = new KafkaProducer<>(props);
} // 发送句子到kafka集群
public Future<RecordMetadata> push(String key, String words) {
return producer.send(new ProducerRecord<>(TOPIC, key, words)); // send方法为异步调用
} public void close() {
producer.close();
} }

4 WordCount.java

package storm;

import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map; public class WordCount extends BaseBasicBolt { Map<String, Integer> wordCountMap = new HashMap<>(); @Override
public void execute(Tuple tuple, BasicOutputCollector basicOutputCollector) {
String word = tuple.getValueByField("word").toString();
Integer count = Integer.valueOf(tuple.getValueByField("count").toString());
Integer integer = wordCountMap.get(word);
if (integer == null) {
wordCountMap.put(word, count);
} else {
wordCountMap.put(word, wordCountMap.get(word) + 1);
}
if (wordCountMap.size() > 20) { // map里面有超过20个单词则发送hfdsBolt
List<Object> list = new ArrayList<>(); // wordCountMap.forEach((k, v) -> {
// String result = new String(k + ":" + v);
// list.add(result);
// }); for (Map.Entry<String, Integer> entry : wordCountMap.entrySet()) {
String result = new String(entry.getKey() + ":" + entry.getValue());
list.add(result);
} wordCountMap.clear();
if (list.size() > 0) {
basicOutputCollector.emit(new Values(list));
}
}
} @Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("total"));
}
}

5 WordCountSplit.java

package storm;

import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseBasicBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values; public class WordCountSplit extends BaseBasicBolt { @Override
public void execute(Tuple tuple, BasicOutputCollector collector) {
String[] words = tuple.getValue(tuple.fieldIndex("value")).toString().split(" ");
for (String word : words) {
collector.emit(new Values(word, 1));
}
} @Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word", "count"));
}
}

6 StormTopologyDriver.java

package storm;

import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.hdfs.bolt.HdfsBolt;
import org.apache.storm.hdfs.bolt.format.DefaultFileNameFormat;
import org.apache.storm.hdfs.bolt.format.DelimitedRecordFormat;
import org.apache.storm.hdfs.bolt.format.FileNameFormat;
import org.apache.storm.hdfs.bolt.format.RecordFormat;
import org.apache.storm.hdfs.bolt.rotation.FileRotationPolicy;
import org.apache.storm.hdfs.bolt.rotation.FileSizeRotationPolicy;
import org.apache.storm.hdfs.bolt.sync.CountSyncPolicy;
import org.apache.storm.hdfs.bolt.sync.SyncPolicy;
import org.apache.storm.kafka.spout.KafkaSpout;
import org.apache.storm.kafka.spout.KafkaSpoutConfig;
import org.apache.storm.topology.TopologyBuilder; public class StormTopologyDriver { public static void main(String[] args) throws InvalidTopologyException, AuthorizationException, AlreadyAliveException {
TopologyBuilder topologyBuilder = new TopologyBuilder();
KafkaSpoutConfig.Builder builder = new KafkaSpoutConfig.Builder("mini1:2181", "wordCount");
builder.setProp("group.id", "wordCount");
builder.setProp("enable.auto.commit", "true");
builder.setProp("auto.commit.interval.ms", "1000");
builder.setProp("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
builder.setProp("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
topologyBuilder.setSpout("kafkaSpout", new KafkaSpout<>(builder.build())); topologyBuilder.setBolt("wordCountSplit", new WordCountSplit()).shuffleGrouping("kafkaSpout");
topologyBuilder.setBolt("wordCount", new WordCount()).shuffleGrouping("wordCountSplit"); // 将文件保存到hdfs
// 设置输出目录
// 输出字段分隔符
RecordFormat format = new DelimitedRecordFormat().withFieldDelimiter(",");
// 每100个tuple同步到HDFS一次
SyncPolicy syncPolicy = new CountSyncPolicy(5);
// 每个写出文件的大小为5MB
FileRotationPolicy rotationPolicy = new FileSizeRotationPolicy(1.0f, FileSizeRotationPolicy.Units.MB);
FileNameFormat fileNameFormat = new DefaultFileNameFormat().withPath("/storm");
HdfsBolt hdfsBolt = new HdfsBolt().withFsUrl("hdfs://mini1:9000").withFileNameFormat(fileNameFormat)
.withRecordFormat(format).withRotationPolicy(rotationPolicy).withSyncPolicy(syncPolicy);
topologyBuilder.setBolt("hdfsBolt", hdfsBolt).shuffleGrouping("wordCount"); Config config = new Config();
config.setNumWorkers(2);
// 本地模式
// LocalCluster localCluster = new LocalCluster();
// localCluster.submitTopology("countWords", config, topologyBuilder.createTopology()); // 集群模式
StormSubmitter.submitTopology("countWords", config, topologyBuilder.createTopology());
}
}

大数据学习——kafka+storm+hdfs整合的更多相关文章

  1. 大数据学习:storm流式计算

    Storm是一个分布式的.高容错的实时计算系统.Storm适用的场景: 1.Storm可以用来用来处理源源不断的消息,并将处理之后的结果保存到持久化介质中. 2.由于Storm的处理组件都是分布式的, ...

  2. Kafka+Storm+HDFS整合实践

    在基于Hadoop平台的很多应用场景中,我们需要对数据进行离线和实时分析,离线分析可以很容易地借助于Hive来实现统计分析,但是对于实时的需求Hive就不合适了.实时应用场景可以使用Storm,它是一 ...

  3. [转载] Kafka+Storm+HDFS整合实践

    转载自http://www.tuicool.com/articles/NzyqAn 在基于Hadoop平台的很多应用场景中,我们需要对数据进行离线和实时分析,离线分析可以很容易地借助于Hive来实现统 ...

  4. 大数据学习系列之五 ----- Hive整合HBase图文详解

    引言 在上一篇 大数据学习系列之四 ----- Hadoop+Hive环境搭建图文详解(单机) 和之前的大数据学习系列之二 ----- HBase环境搭建(单机) 中成功搭建了Hive和HBase的环 ...

  5. Kafka+Storm+HDFS 整合示例

    消息通过各种方式进入到Kafka消息中间件,比如可以通过使用Flume来收集日志数据,然后在Kafka中路由暂存,然后再由实时计算程序Storm做实时分析,最后将结果保存在HDFS中,这时我们就需要将 ...

  6. 大数据学习之路-hdfs

    1.什么是hadoop hadoop中有3个核心组件: 分布式文件系统:HDFS —— 实现将文件分布式存储在很多的服务器上 分布式运算编程框架:MAPREDUCE —— 实现在很多机器上分布式并行运 ...

  7. 大数据学习之测试hdfs和mapreduce(二)

    上篇已经搭建好环境,本篇主要测试hadoop中的hdfs和mapreduce功能. 首先填坑:启动环境时发现DataNode启动不了.查看日志 从日志中可以看出,原因是因为datanode的clust ...

  8. 大数据学习——Kafka集群部署

    1下载安装包 2解压安装包 -0.9.0.1.tgz -0.9.0.1 kafka 3修改配置文件 cp server.properties  server.properties.bak # Lice ...

  9. 大数据学习——java操作hdfs环境搭建以及环境测试

    1 新建一个maven项目 打印根目录下的文件的名字 添加pom依赖 pom.xml <?xml version="1.0" encoding="UTF-8&quo ...

随机推荐

  1. TP框架设置验证码

    thinkphp框架有专门的的验证码生成的模块 public function shengcheng(){ $n = new \Think\Verify(); $n->entry(); } 下面 ...

  2. centos 7下Hadoop 2.7.2 伪分布式安装

    centos 7 下Hadoop 2.7.2 伪分布式安装,安装jdk,免密匙登录,配置mapreduce,配置YARN.详细步骤如下: 1.0 安装JDK 1.1 查看是否安装了openjdk [l ...

  3. LookAround开元之旅(持续更新中...)

    应用介绍随便瞧瞧是一款为android用户量身定做的免费图文资讯软件集美食,文学,语录等频道于一体界面简洁,操作流畅,图文分享,个性收藏是广大卓粉的必备神器APK下载 -->https://ra ...

  4. MAC无法确认开发者身份

    网上下载的软件,如果来自身份不明的开发者,在MAC上打开时会提示无法确认开发者的身份,在网上找到了一篇尝试解决的文章,文章链接地址为http://jingyan.baidu.com/article/f ...

  5. 校内选拔I题题解 构造题 Codeforces Round #318 [RussianCodeCup Thanks-Round] (Div. 2) ——D

    http://codeforces.com/contest/574/problem/D Bear and Blocks time limit per test 1 second memory limi ...

  6. Asp.Net Core 入门(八)—— Taghelper

    Taghelper是一个服务端的组件,可以在Razor文件中创建和渲染HTML元素,类似于我们在Asp.Net MVC中使用的Html Taghelper.Asp.Net Core MVC内置的Tag ...

  7. 计算机图形学:贝塞尔曲线(Bezier Curve)

    计算机图形学:贝塞尔曲线(Bezier Curve) 贝塞尔能由贝塞尔样条组合而成,也可产生更高维的贝塞尔曲面.

  8. Linux文件操作函数

    creat() 函数 close() 函数 read() 函数 read 函数实际读到的字节数少于要求读的字节数时: 读普通文件,在读到要求字节数之前就到达文件尾: 当从终端设备读,通常一次最多读一行 ...

  9. github+hexo+themes搭建简易个性主题博客

    0x00  install Node.js and git 安装Node.js:http://www.runoob.com/nodejs/nodejs-install-setup.html 安装git ...

  10. linux 常用命令(持续更新)

    查看IP地址 ifconfig 查看TCP端口 netstat -ntlp vi 文本编辑 (1)进入vi编辑模式 在vi的默认模式中,直接在界面中输入: i 在光标所在位置开始编辑: a 在光标所在 ...