目的:通过kafka输出的信息进行过滤,添加指定的字段后,进行打印

SentenceSpout:

package Trident;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties; import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Values; import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.serializer.StringDecoder;
import kafka.utils.VerifiableProperties; /**
* 从kafka获取数据 spout发射
* @author BFD-593
*
*/
public class SentenceSpout extends BaseRichSpout{
//TODO
private SpoutOutputCollector collector;
private ConsumerConnector consumer;
private int index=0;
@Override
public void nextTuple() {
Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
topicCountMap.put("helloworld", new Integer(1)); StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());
Map<String, List<KafkaStream<String, String>>> consumerMap =
consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
KafkaStream<String, String> stream = consumerMap.get("helloworld").get(0);
ConsumerIterator<String, String> it = stream.iterator(); int messageCount = 0;
while (it.hasNext()){
String string = it.next().message().toString()+" 1"+" 2";
String name = string.split(" ")[0];
String value = string.split(" ")[1]==null?"":string.split(" ")[1];
String value2= string.split(" ")[2]==null?"":string.split(" ")[2];
this.collector.emit(new Values(name,value,value2));
}
} @Override
public void open(Map map, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
Properties props = new Properties();
// zookeeper 配置
props.put("zookeeper.connect", "192.168.170.185:2181"); // 消费者所在组
props.put("group.id", "testgroup"); // zk连接超时
props.put("zookeeper.session.timeout.ms", "4000");
props.put("zookeeper.sync.time.ms", "200");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "smallest"); // 序列化类
props.put("serializer.class", "kafka.serializer.StringEncoder"); ConsumerConfig config = new ConsumerConfig(props);
this.consumer = Consumer.createJavaConsumerConnector(config);
} @Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
Fields field = new Fields("name", "sentence","sentence2");
declarer.declare(field);
} }

FunctionBolt:

	package Trident;

	import org.apache.storm.trident.operation.BaseFunction;
import org.apache.storm.trident.operation.TridentCollector;
import org.apache.storm.trident.tuple.TridentTuple;
import org.apache.storm.tuple.Values;
/**
* trident的函数操作:将spout发射的数据,添加一个fileds gender的
* 它不会替换掉原来的tuple
* @author BFD-593
*
*/
public class FunctionBolt extends BaseFunction{ @Override
public void execute(TridentTuple tuple, TridentCollector collector) {
String str = tuple.getStringByField("name");
if(str.equals("a")){
collector.emit(new Values("男"));
}else{
collector.emit(new Values("女"));
}
} }

MyFilter:

package Trident;

import java.util.Map;

import org.apache.storm.trident.operation.BaseFilter;
import org.apache.storm.trident.operation.TridentOperationContext;
import org.apache.storm.trident.tuple.TridentTuple;
/**
* trident的过滤操作:将spout的发送的tuple,过滤掉fields0是a并且fields1是b的tuple
* @author BFD-593
*
*/
public class MyFilter extends BaseFilter{
private TridentOperationContext context; @Override
public void prepare(Map conf, TridentOperationContext context) {
super.prepare(conf, context);
this.context = context;
}
@Override
public boolean isKeep(TridentTuple tuple) {
String name = tuple.getStringByField("name");
String value = tuple.getStringByField("sentence");
return (!"a".equals(name))||(!"b".equals(value));
} }

PrintFilter:

package Trident;

import java.util.Iterator;
import java.util.Map; import org.apache.storm.trident.operation.BaseFilter;
import org.apache.storm.trident.operation.TridentOperationContext;
import org.apache.storm.trident.tuple.TridentTuple;
import org.apache.storm.tuple.Fields;
/**
* 过滤打印所有的fields以及值
* @author BFD-593
*
*/
public class PrintFilter extends BaseFilter{
private TridentOperationContext context = null; @Override
public void prepare(Map conf, TridentOperationContext context) {
super.prepare(conf, context);
this.context = context;
} @Override
public boolean isKeep(TridentTuple tuple) {
Fields fields = tuple.getFields();
Iterator<String> iterator = fields.iterator();
String str = "";
while(iterator.hasNext()){
String next = iterator.next();
Object value = tuple.getValueByField(next);
str = str + next +":"+ value+",";
}
System.out.println(str);
return true;
} }

TopologyTrident:

package Trident;

import org.apache.kafka.common.utils.Utils;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.trident.TridentTopology;
import org.apache.storm.trident.operation.builtin.Count;
import org.apache.storm.tuple.Fields;
/**
* trident的过滤操作、函数操作、分驱聚合操作
* @author BFD-593
*
*/
public class TopologyTrident {
public static void main(String[] args) {
SentenceSpout spout = new SentenceSpout(); TridentTopology topology = new TridentTopology();
topology.newStream("spout", spout).each(new Fields("name"),new FunctionBolt(),new Fields("gender")).each(new Fields("name","sentence"), new MyFilter())
.each(new Fields("name","sentence","sentence2","gender"), new PrintFilter()); Config conf = new Config(); LocalCluster clu = new LocalCluster();
clu.submitTopology("mytopology", conf, topology.build()); Utils.sleep(100000000);
clu.killTopology("mytopology");
clu.shutdown(); }
}

  

package Trident;
import java.util.HashMap;import java.util.List;import java.util.Map;import java.util.Properties;
import org.apache.storm.spout.SpoutOutputCollector;import org.apache.storm.task.TopologyContext;import org.apache.storm.topology.OutputFieldsDeclarer;import org.apache.storm.topology.base.BaseRichSpout;import org.apache.storm.tuple.Fields;import org.apache.storm.tuple.Values;
import kafka.consumer.Consumer;import kafka.consumer.ConsumerConfig;import kafka.consumer.ConsumerIterator;import kafka.consumer.KafkaStream;import kafka.javaapi.consumer.ConsumerConnector;import kafka.serializer.StringDecoder;import kafka.utils.VerifiableProperties;
/** * 从kafka获取数据 spout发射 * @author BFD-593 * */public class SentenceSpout extends BaseRichSpout{//TODOprivate SpoutOutputCollector collector;private ConsumerConnector consumer;private int index=0;@Overridepublic void nextTuple() {Map<String, Integer> topicCountMap = new HashMap<String, Integer>();          topicCountMap.put("helloworld", new Integer(1));            StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());          StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());          Map<String, List<KafkaStream<String, String>>> consumerMap =           consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);          KafkaStream<String, String> stream = consumerMap.get("helloworld").get(0);          ConsumerIterator<String, String> it = stream.iterator();                   int messageCount = 0;          while (it.hasNext()){          String string = it.next().message().toString()+" 1"+" 2";        String name = string.split(" ")[0];        String value = string.split(" ")[1]==null?"":string.split(" ")[1];        String value2= string.split(" ")[2]==null?"":string.split(" ")[2];            this.collector.emit(new Values(name,value,value2));        }  }
@Overridepublic void open(Map map, TopologyContext context, SpoutOutputCollector collector) {this.collector =  collector;Properties props = new Properties(); // zookeeper 配置          props.put("zookeeper.connect", "192.168.170.185:2181");            // 消费者所在组          props.put("group.id", "testgroup");            // zk连接超时          props.put("zookeeper.session.timeout.ms", "4000");          props.put("zookeeper.sync.time.ms", "200");          props.put("auto.commit.interval.ms", "1000");          props.put("auto.offset.reset", "smallest");                    // 序列化类          props.put("serializer.class", "kafka.serializer.StringEncoder");            ConsumerConfig config = new ConsumerConfig(props);  this.consumer = Consumer.createJavaConsumerConnector(config);}
@Overridepublic void declareOutputFields(OutputFieldsDeclarer declarer) {Fields field = new Fields("name", "sentence","sentence2");declarer.declare(field);}
}

storm trident的filter和函数的更多相关文章

  1. storm trident function函数

    package cn.crxy.trident; import java.util.List; import backtype.storm.Config; import backtype.storm. ...

  2. Strom-7 Storm Trident 详细介绍

    一.概要 1.1 Storm(简介)      Storm是一个实时的可靠地分布式流计算框架.      具体就不多说了,举个例子,它的一个典型的大数据实时计算应用场景:从Kafka消息队列读取消息( ...

  3. Storm Trident API

    在Storm Trident中有五种操作类型 Apply Locally:本地操作,所有操作应用在本地节点数据上,不会产生网络传输 Repartitioning:数据流重定向,单纯的改变数据流向,不会 ...

  4. Storm专题二:Storm Trident API 使用具体解释

    一.概述      Storm Trident中的核心数据模型就是"Stream",也就是说,Storm Trident处理的是Stream.可是实际上Stream是被成批处理的. ...

  5. storm trident 示例

    Storm Trident的核心数据模型是一批一批被处理的“流”,“流”在集群的分区在集群的节点上,对“流”的操作也是并行的在每个分区上进行. Trident有五种对“流”的操作: 1.      不 ...

  6. storm trident merger

    import java.util.List; import backtype.storm.Config; import backtype.storm.LocalCluster; import back ...

  7. Python【day 14-5】sorted filter map函数应用和练习

    '''''' ''' 内置函数或者和匿名函数结合输出 4,用map来处理字符串列表,把列表中所有人都变成sb,比方alex_sb name=[‘oldboy’,'alex','wusir'] 5,用m ...

  8. lambda匿名函数sorted排序函数filter过滤函数map映射函数

    lambda函数:表示匿名函数,不需要def来声明,一句话就能搞定. 语法:函数名=lamda 参数:返回值 求10的10次方 f=lambda n:n**n print(f(10)) 注意: 函数名 ...

  9. storm trident 的介绍与使用

    一.trident 的介绍 trident 的英文意思是三叉戟,在这里我的理解是因为之前我们通过之前的学习topology spout bolt 去处理数据是没有问题的,但trident 的对spou ...

随机推荐

  1. js some和filter用法和区别

    some方法 array1.some(callbackfn[, thisArg]) 对数组array1中的每个元素调用回调函数callbackfn,当回调函数返回true或者遍历完所有数组后,some ...

  2. HTTP上传大文件要考虑的问题

    1.大文件上传服务器内存占用 一般WEB开发框架如SpringMVC,在基于Web容器如Tomcat处理HTTP请求时,都倾向于采用职责链流水线式的处理机制.HTTP请求被封装为一个可解析对象放在内存 ...

  3. CSS3:CSS 参考手册

    ylbtech-CSS:CSS 参考手册 1.返回顶部 1. W3School 的 CSS 参考手册定期通过所有主流浏览器进行测试. CSS 属性 CSS 属性组: 动画 背景 边框和轮廓 盒(框) ...

  4. .NETFramework:Thread

    ylbtech-.NETFramework:Thread 1.返回顶部 1. #region 程序集 mscorlib, Version=2.0.0.0, Culture=neutral, Publi ...

  5. vue 使用scss报错

    vue-cli默认没有scss-loader,需要安装依赖:sass-loader  node-sass 安装之后重启就可以使用: <style lang="scss"> ...

  6. Visual Studio 2013 Update 1

    Visual Studio 2013 Update 1 VS2013.1.iso 共 245 MB http://download.microsoft.com/download/8/2/6/826E2 ...

  7. fullpage中高度错误的解决方法

    今天我再用fullpage写全屏页面的时候,发现在ie中,一整屏的页面总是不能铺满,高度总是少一截儿,各种搜索,找到了个合适的方法,这里记录下,也希望给之后遇到这个问题的人提供一种方式,当然不一定能解 ...

  8. 1.1- 1.2 hive入门

    一.hive是什么 由Facebook开源用于解决海量结构化日志的数据统计: Hive是基于Hadoop的一个数据仓库工具,可以将结构化的数据文件映射成一张表, 并提供类SQL查询功能: 构建在Had ...

  9. Flutter实战视频-移动电商-10.首页_FlutterSwiper轮播效果制作

    10.首页_FlutterSwiper轮播效果制作 博客地址: https://jspang.com/post/FlutterShop.html#toc-5c2 flutter_swiper http ...

  10. 甩掉 ashx/asmx,使用jQuery.ajaxWebService请求WebMethod,Ajax处理更加简练

    在WebForm下 开发ajax程序,需要借助于一般处理程序(*.ashx)或web服务(*.asmx),并且每一个ajax请求,都要建一个这样的文件,如此一来,如果在一个项目中ajax程序多了,势必 ...